hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38409af830d185e6db60580d4fce6581028fc750
| 1,247
|
py
|
Python
|
dowsing/tests/maturin.py
|
bgerrity/dowsing
|
87172e2a26bf3704b2fee256be4600e1925b257e
|
[
"MIT"
] | 1
|
2022-03-21T22:41:58.000Z
|
2022-03-21T22:41:58.000Z
|
dowsing/tests/maturin.py
|
bgerrity/dowsing
|
87172e2a26bf3704b2fee256be4600e1925b257e
|
[
"MIT"
] | 47
|
2020-10-02T20:33:58.000Z
|
2022-03-31T19:10:18.000Z
|
dowsing/tests/maturin.py
|
jreese/dowsing
|
f2ad1becb10fab631dc5143ba9547eb478cbf90e
|
[
"MIT"
] | 4
|
2020-05-25T04:50:51.000Z
|
2022-02-04T15:20:28.000Z
|
import unittest
from pathlib import Path
import volatile
from dowsing.maturin import MaturinReader
class MaturinReaderTest(unittest.TestCase):
def test_orjson(self) -> None:
# This is a simplified version of orjson 3.4.0
with volatile.dir() as d:
dp = Path(d)
(dp / "pyproject.toml").write_text(
"""\
[project]
name = "orjson"
repository = "https://example.com/"
[build-system]
build-backend = "maturin"
requires = ["maturin>=0.8.1,<0.9"]
"""
)
(dp / "Cargo.toml").write_text(
"""\
[package]
name = "orjson"
version = "3.4.0"
authors = ["foo <foo@example.com>"]
description = "Summary here"
license = "Apache-2.0 OR MIT"
repository = "https://example.com/repo"
homepage = "https://example.com/home"
readme = "README.md"
keywords = ["foo", "bar", "baz"]
[package.metadata.maturin]
requires-python = ">=3.6"
classifer = [
"License :: OSI Approved :: Apache Software License",
"License :: OSI Approved :: MIT License",
]
"""
)
r = MaturinReader(dp)
md = r.get_metadata()
self.assertEqual("orjson", md.name)
self.assertEqual("3.4.0", md.version)
# TODO more tests
| 23.980769
| 57
| 0.589415
|
7dbf62e153a2bf51fa48d5b7841557dfcd8dffc1
| 1,738
|
py
|
Python
|
Infra_2022_update/Userfee_per_FTE.py
|
ScilifelabDataCentre/Annual-report-2021
|
6a5d9ca0320ee6cba39245e81e2bd61f1a6822e1
|
[
"MIT"
] | null | null | null |
Infra_2022_update/Userfee_per_FTE.py
|
ScilifelabDataCentre/Annual-report-2021
|
6a5d9ca0320ee6cba39245e81e2bd61f1a6822e1
|
[
"MIT"
] | null | null | null |
Infra_2022_update/Userfee_per_FTE.py
|
ScilifelabDataCentre/Annual-report-2021
|
6a5d9ca0320ee6cba39245e81e2bd61f1a6822e1
|
[
"MIT"
] | null | null | null |
# generates barplot with units on y-axis and user income per FTE on x-axis
import pandas as pd
import plotly.graph_objects as go
import os
from colour_science_2022 import (
SCILIFE_COLOURS,
)
# Add data
Userfee_FTE = pd.read_excel(
"data/Total User Fee per FTE 2021.xlsx",
sheet_name="Single Data",
header=0,
engine="openpyxl",
keep_default_na=False,
)
Userfee_FTE = Userfee_FTE.rename(columns={"User Fee/FTE (kSEK)": "Fee_per_FTE"})
Userfee_FTE["Mfunds"] = Userfee_FTE["Fee_per_FTE"] / 1000
# print(Userfee_FTE)
# Make stacked bar chart
fig = go.Figure(
data=[
go.Bar(
name="User Fee per FTE",
y=Userfee_FTE.Unit,
x=Userfee_FTE.Mfunds,
orientation="h",
marker=dict(color=SCILIFE_COLOURS[0], line=dict(color="#000000", width=1)),
),
]
)
# fig.update_layout(xaxis=go.layout.XAxis(tickangle=45))
fig.update_layout(
plot_bgcolor="white",
font=dict(size=40),
autosize=False,
margin=dict(r=0, t=0, b=0, l=0),
width=3000,
height=2200,
yaxis={"categoryorder": "total ascending"},
)
# modify x-axis
fig.update_yaxes(
title=" ",
showgrid=True,
linecolor="black",
)
Userfee_FTE_max = max(Userfee_FTE["Mfunds"])
# modify y-axis
fig.update_xaxes(
title="<br>Total User Fee Income per FTE (MSEK)", # keep the break to give y-axis title space between graph
showgrid=True,
gridcolor="lightgrey",
linecolor="black",
dtick=0.5, # 10 will work fine with most values
range=[0, int(Userfee_FTE_max * 1.1)],
)
if not os.path.isdir("Plots"):
os.mkdir("Plots")
# fig.show()
fig.write_image("Plots/Userfee_per_FTE_2021.png")
fig.write_image("Plots/Userfee_per_FTE_2021.svg")
| 24.478873
| 112
| 0.663982
|
e72fbdd54b2576424af6cd808db7226ca4bf2682
| 3,935
|
py
|
Python
|
huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/nova_show_server_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/nova_show_server_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/nova_show_server_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class NovaShowServerRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'server_id': 'str',
'open_stack_api_version': 'str'
}
attribute_map = {
'server_id': 'server_id',
'open_stack_api_version': 'OpenStack-API-Version'
}
def __init__(self, server_id=None, open_stack_api_version=None):
"""NovaShowServerRequest - a model defined in huaweicloud sdk"""
self._server_id = None
self._open_stack_api_version = None
self.discriminator = None
self.server_id = server_id
if open_stack_api_version is not None:
self.open_stack_api_version = open_stack_api_version
@property
def server_id(self):
"""Gets the server_id of this NovaShowServerRequest.
云服务器ID。
:return: The server_id of this NovaShowServerRequest.
:rtype: str
"""
return self._server_id
@server_id.setter
def server_id(self, server_id):
"""Sets the server_id of this NovaShowServerRequest.
云服务器ID。
:param server_id: The server_id of this NovaShowServerRequest.
:type: str
"""
self._server_id = server_id
@property
def open_stack_api_version(self):
"""Gets the open_stack_api_version of this NovaShowServerRequest.
微版本头
:return: The open_stack_api_version of this NovaShowServerRequest.
:rtype: str
"""
return self._open_stack_api_version
@open_stack_api_version.setter
def open_stack_api_version(self, open_stack_api_version):
"""Sets the open_stack_api_version of this NovaShowServerRequest.
微版本头
:param open_stack_api_version: The open_stack_api_version of this NovaShowServerRequest.
:type: str
"""
self._open_stack_api_version = open_stack_api_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NovaShowServerRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.907801
| 96
| 0.587294
|
0b1291d4045f4223f61fd07043b24ba34909e1bd
| 11,331
|
py
|
Python
|
homeassistant/components/group/light.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 1
|
2019-09-28T07:06:51.000Z
|
2019-09-28T07:06:51.000Z
|
homeassistant/components/group/light.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 19
|
2020-01-29T23:21:07.000Z
|
2021-07-23T23:26:51.000Z
|
homeassistant/components/group/light.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 1
|
2020-06-19T02:47:19.000Z
|
2020-06-19T02:47:19.000Z
|
"""This platform allows several lights to be grouped into one light."""
import asyncio
from collections import Counter
import itertools
import logging
from typing import Any, Callable, Iterator, List, Optional, Tuple
import voluptuous as vol
from homeassistant.components import light
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_ENTITIES,
CONF_NAME,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import State, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.util import color as color_util
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_EFFECT_LIST,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Light Group"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_ENTITIES): cv.entities_domain(light.DOMAIN),
}
)
SUPPORT_GROUP_LIGHT = (
SUPPORT_BRIGHTNESS
| SUPPORT_COLOR_TEMP
| SUPPORT_EFFECT
| SUPPORT_FLASH
| SUPPORT_COLOR
| SUPPORT_TRANSITION
| SUPPORT_WHITE_VALUE
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
) -> None:
"""Initialize light.group platform."""
async_add_entities([LightGroup(config.get(CONF_NAME), config[CONF_ENTITIES])])
class LightGroup(light.Light):
"""Representation of a light group."""
def __init__(self, name: str, entity_ids: List[str]) -> None:
"""Initialize a light group."""
self._name = name
self._entity_ids = entity_ids
self._is_on = False
self._available = False
self._brightness: Optional[int] = None
self._hs_color: Optional[Tuple[float, float]] = None
self._color_temp: Optional[int] = None
self._min_mireds: Optional[int] = 154
self._max_mireds: Optional[int] = 500
self._white_value: Optional[int] = None
self._effect_list: Optional[List[str]] = None
self._effect: Optional[str] = None
self._supported_features: int = 0
self._async_unsub_state_changed = None
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
@callback
def async_state_changed_listener(
entity_id: str, old_state: State, new_state: State
):
"""Handle child updates."""
self.async_schedule_update_ha_state(True)
self._async_unsub_state_changed = async_track_state_change(
self.hass, self._entity_ids, async_state_changed_listener
)
await self.async_update()
async def async_will_remove_from_hass(self):
"""Handle removal from HASS."""
if self._async_unsub_state_changed is not None:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def is_on(self) -> bool:
"""Return the on/off state of the light group."""
return self._is_on
@property
def available(self) -> bool:
"""Return whether the light group is available."""
return self._available
@property
def brightness(self) -> Optional[int]:
"""Return the brightness of this light group between 0..255."""
return self._brightness
@property
def hs_color(self) -> Optional[Tuple[float, float]]:
"""Return the HS color value [float, float]."""
return self._hs_color
@property
def color_temp(self) -> Optional[int]:
"""Return the CT color value in mireds."""
return self._color_temp
@property
def min_mireds(self) -> Optional[int]:
"""Return the coldest color_temp that this light group supports."""
return self._min_mireds
@property
def max_mireds(self) -> Optional[int]:
"""Return the warmest color_temp that this light group supports."""
return self._max_mireds
@property
def white_value(self) -> Optional[int]:
"""Return the white value of this light group between 0..255."""
return self._white_value
@property
def effect_list(self) -> Optional[List[str]]:
"""Return the list of supported effects."""
return self._effect_list
@property
def effect(self) -> Optional[str]:
"""Return the current effect."""
return self._effect
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def should_poll(self) -> bool:
"""No polling needed for a light group."""
return False
async def async_turn_on(self, **kwargs):
"""Forward the turn_on command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
emulate_color_temp_entity_ids = []
if ATTR_BRIGHTNESS in kwargs:
data[ATTR_BRIGHTNESS] = kwargs[ATTR_BRIGHTNESS]
if ATTR_HS_COLOR in kwargs:
data[ATTR_HS_COLOR] = kwargs[ATTR_HS_COLOR]
if ATTR_COLOR_TEMP in kwargs:
data[ATTR_COLOR_TEMP] = kwargs[ATTR_COLOR_TEMP]
# Create a new entity list to mutate
updated_entities = list(self._entity_ids)
# Walk through initial entity ids, split entity lists by support
for entity_id in self._entity_ids:
state = self.hass.states.get(entity_id)
if not state:
continue
support = state.attributes.get(ATTR_SUPPORTED_FEATURES)
# Only pass color temperature to supported entity_ids
if bool(support & SUPPORT_COLOR) and not bool(
support & SUPPORT_COLOR_TEMP
):
emulate_color_temp_entity_ids.append(entity_id)
updated_entities.remove(entity_id)
data[ATTR_ENTITY_ID] = updated_entities
if ATTR_WHITE_VALUE in kwargs:
data[ATTR_WHITE_VALUE] = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
data[ATTR_EFFECT] = kwargs[ATTR_EFFECT]
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
if ATTR_FLASH in kwargs:
data[ATTR_FLASH] = kwargs[ATTR_FLASH]
if not emulate_color_temp_entity_ids:
await self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_ON, data, blocking=True
)
return
emulate_color_temp_data = data.copy()
temp_k = color_util.color_temperature_mired_to_kelvin(
emulate_color_temp_data[ATTR_COLOR_TEMP]
)
hs_color = color_util.color_temperature_to_hs(temp_k)
emulate_color_temp_data[ATTR_HS_COLOR] = hs_color
del emulate_color_temp_data[ATTR_COLOR_TEMP]
emulate_color_temp_data[ATTR_ENTITY_ID] = emulate_color_temp_entity_ids
await asyncio.gather(
self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_ON, data, blocking=True
),
self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
emulate_color_temp_data,
blocking=True,
),
)
async def async_turn_off(self, **kwargs):
"""Forward the turn_off command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
await self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_OFF, data, blocking=True
)
async def async_update(self):
"""Query all members and determine the light group state."""
all_states = [self.hass.states.get(x) for x in self._entity_ids]
states = list(filter(None, all_states))
on_states = [state for state in states if state.state == STATE_ON]
self._is_on = len(on_states) > 0
self._available = any(state.state != STATE_UNAVAILABLE for state in states)
self._brightness = _reduce_attribute(on_states, ATTR_BRIGHTNESS)
self._hs_color = _reduce_attribute(on_states, ATTR_HS_COLOR, reduce=_mean_tuple)
self._white_value = _reduce_attribute(on_states, ATTR_WHITE_VALUE)
self._color_temp = _reduce_attribute(on_states, ATTR_COLOR_TEMP)
self._min_mireds = _reduce_attribute(
states, ATTR_MIN_MIREDS, default=154, reduce=min
)
self._max_mireds = _reduce_attribute(
states, ATTR_MAX_MIREDS, default=500, reduce=max
)
self._effect_list = None
all_effect_lists = list(_find_state_attributes(states, ATTR_EFFECT_LIST))
if all_effect_lists:
# Merge all effects from all effect_lists with a union merge.
self._effect_list = list(set().union(*all_effect_lists))
self._effect = None
all_effects = list(_find_state_attributes(on_states, ATTR_EFFECT))
if all_effects:
# Report the most common effect.
effects_count = Counter(itertools.chain(all_effects))
self._effect = effects_count.most_common(1)[0][0]
self._supported_features = 0
for support in _find_state_attributes(states, ATTR_SUPPORTED_FEATURES):
# Merge supported features by emulating support for every feature
# we find.
self._supported_features |= support
# Bitwise-and the supported features with the GroupedLight's features
# so that we don't break in the future when a new feature is added.
self._supported_features &= SUPPORT_GROUP_LIGHT
def _find_state_attributes(states: List[State], key: str) -> Iterator[Any]:
"""Find attributes with matching key from states."""
for state in states:
value = state.attributes.get(key)
if value is not None:
yield value
def _mean_int(*args):
"""Return the mean of the supplied values."""
return int(sum(args) / len(args))
def _mean_tuple(*args):
"""Return the mean values along the columns of the supplied values."""
return tuple(sum(l) / len(l) for l in zip(*args))
def _reduce_attribute(
states: List[State],
key: str,
default: Optional[Any] = None,
reduce: Callable[..., Any] = _mean_int,
) -> Any:
"""Find the first attribute matching key from states.
If none are found, return default.
"""
attrs = list(_find_state_attributes(states, key))
if not attrs:
return default
if len(attrs) == 1:
return attrs[0]
return reduce(*attrs)
| 32.748555
| 88
| 0.655988
|
e4dafd9df3c6da4ef9f816a45574016007aae2b4
| 5,000
|
py
|
Python
|
pyinfra/api/connectors/chroot.py
|
yggdr/pyinfra
|
f21315e8b19a59a033e214464a2ca22914856199
|
[
"MIT"
] | 1,532
|
2015-06-13T19:48:52.000Z
|
2022-03-26T15:32:45.000Z
|
pyinfra/api/connectors/chroot.py
|
yggdr/pyinfra
|
f21315e8b19a59a033e214464a2ca22914856199
|
[
"MIT"
] | 729
|
2015-09-24T08:42:39.000Z
|
2022-03-31T07:15:44.000Z
|
pyinfra/api/connectors/chroot.py
|
yggdr/pyinfra
|
f21315e8b19a59a033e214464a2ca22914856199
|
[
"MIT"
] | 419
|
2015-12-16T21:00:34.000Z
|
2022-03-05T21:05:07.000Z
|
import os
from tempfile import mkstemp
import click
import six
from pyinfra import local, logger
from pyinfra.api import QuoteString, StringCommand
from pyinfra.api.exceptions import ConnectError, InventoryError, PyinfraError
from pyinfra.api.util import get_file_io, memoize
from pyinfra.progress import progress_spinner
from .local import run_shell_command as run_local_shell_command
from .util import get_sudo_password, make_unix_command
@memoize
def show_warning():
logger.warning('The @chroot connector is in beta!')
def make_names_data(directory=None):
if not directory:
raise InventoryError('No directory provided!')
show_warning()
yield '@chroot/{0}'.format(directory), {
'chroot_directory': '/{0}'.format(directory.lstrip('/')),
}, ['@chroot']
def connect(state, host):
chroot_directory = host.data.chroot_directory
try:
with progress_spinner({'chroot run'}):
local.shell(
'chroot {0} ls'.format(chroot_directory), splitlines=True,
)
except PyinfraError as e:
raise ConnectError(e.args[0])
host.host_data['chroot_directory'] = chroot_directory
return True
def run_shell_command(
state,
host,
command,
get_pty=False,
timeout=None,
stdin=None,
success_exit_codes=None,
print_output=False,
print_input=False,
return_combined_output=False,
use_sudo_password=False,
**command_kwargs
):
if use_sudo_password:
command_kwargs['use_sudo_password'] = get_sudo_password(
state,
host,
use_sudo_password,
run_shell_command=run_shell_command,
put_file=put_file,
)
chroot_directory = host.host_data['chroot_directory']
command = make_unix_command(command, state=state, **command_kwargs)
command = QuoteString(command)
logger.debug(
'--> Running chroot command on ({0}):{1}'.format(
chroot_directory, command,
),
)
chroot_command = StringCommand(
'chroot', chroot_directory,
'sh', '-c', command,
)
return run_local_shell_command(
state,
host,
chroot_command,
timeout=timeout,
stdin=stdin,
success_exit_codes=success_exit_codes,
print_output=print_output,
print_input=print_input,
return_combined_output=return_combined_output,
)
def put_file(
state,
host,
filename_or_io,
remote_filename,
print_output=False,
print_input=False,
**kwargs # ignored (sudo/etc)
):
_, temp_filename = mkstemp()
try:
# Load our file or IO object and write it to the temporary file
with get_file_io(filename_or_io) as file_io:
with open(temp_filename, 'wb') as temp_f:
data = file_io.read()
if isinstance(data, six.text_type):
data = data.encode()
temp_f.write(data)
chroot_directory = host.host_data['chroot_directory']
chroot_command = 'cp {0} {1}/{2}'.format(
temp_filename, chroot_directory, remote_filename,
)
status, _, stderr = run_local_shell_command(
state,
host,
chroot_command,
print_output=print_output,
print_input=print_input,
)
finally:
os.remove(temp_filename)
if not status:
raise IOError('\n'.join(stderr))
if print_output:
click.echo(
'{0}file uploaded to chroot: {1}'.format(
host.print_prefix, remote_filename,
),
err=True,
)
return status
def get_file(
state,
host,
remote_filename,
filename_or_io,
print_output=False,
print_input=False,
**kwargs # ignored (sudo/etc)
):
_, temp_filename = mkstemp()
try:
chroot_directory = host.host_data['chroot_directory']
chroot_command = 'cp {0}/{1} {2}'.format(
chroot_directory, remote_filename, temp_filename,
)
status, _, stderr = run_local_shell_command(
state,
host,
chroot_command,
print_output=print_output,
print_input=print_input,
)
# Load the temporary file and write it to our file or IO object
with open(temp_filename) as temp_f:
with get_file_io(filename_or_io, 'wb') as file_io:
data = temp_f.read()
if isinstance(data, six.text_type):
data = data.encode()
file_io.write(data)
finally:
os.remove(temp_filename)
if not status:
raise IOError('\n'.join(stderr))
if print_output:
click.echo(
'{0}file downloaded from chroot: {1}'.format(
host.print_prefix, remote_filename,
),
err=True,
)
return status
EXECUTION_CONNECTOR = True
| 24.038462
| 77
| 0.611
|
af58637a41a3cc1ac5b0b6d037689457b3054bd3
| 17,058
|
py
|
Python
|
rsnapsim/defunct/generalized_cpp/test_translation_ssa_generic.py
|
MunskyGroup/rSNAPsim
|
af3e496d5252e1d2e1da061277123233a5d609b4
|
[
"MIT"
] | 1
|
2022-01-28T18:17:37.000Z
|
2022-01-28T18:17:37.000Z
|
rsnapsim/defunct/generalized_cpp/test_translation_ssa_generic.py
|
MunskyGroup/rSNAPsim
|
af3e496d5252e1d2e1da061277123233a5d609b4
|
[
"MIT"
] | null | null | null |
rsnapsim/defunct/generalized_cpp/test_translation_ssa_generic.py
|
MunskyGroup/rSNAPsim
|
af3e496d5252e1d2e1da061277123233a5d609b4
|
[
"MIT"
] | 1
|
2020-12-02T06:36:17.000Z
|
2020-12-02T06:36:17.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 6 18:17:09 2020
@author: willi
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 18:07:30 2020
@author: willi
"""
#Test file for generalized SSA
#import rSNAPsim as rss
import numpy as np
import time
import matplotlib.pyplot as plt
import ssa_translation_generic
def generate_additional_ks(enters,pauses,jumps,stops,L):
def frame_check_1(L,arr):
return (L- arr[:,1]+1)*(arr[:,1]>0) + L*(arr[:,1]>1)
def frame_check_3(L,arr):
return (L- arr[:,3]+1)*(arr[:,3]>0) + L*(arr[:,3]>1)
def gen_ks_1_loc(L,arr):
arr[:,0] = arr[:,0]+frame_check_1(L,arr)
arr[:,1] = arr[:,2]
arr = arr[:,0:2]
max_arr = np.max( arr[:,0])
return arr,max_arr
def gen_ks_3_loc(L,arr):
arr[:,0] = arr[:,0]+ frame_check_1(L,arr)
arr[:,1] = arr[:,2]+ frame_check_3(L,arr)
arr[:,2] = arr[:,4]
arr = arr[:,0:3]
max_arr = max([np.max( arr[:,0]),np.max( arr[:,1])])
return arr,max_arr
max_enter = 0
max_pause = 0
max_stop = 0
max_jump = 0
k_jumps = np.copy(jumps)
k_pauses = np.copy(pauses)
k_stops = np.copy(stops)
k_enters = np.copy(enters)
if len(k_enters) != 0:
k_enters,max_enter = gen_ks_1_loc(L,k_enters)
if len(k_pauses) != 0:
k_pauses,max_pause = gen_ks_1_loc(L,k_pauses)
if len(k_stops) != 0:
k_stops,max_stop = gen_ks_1_loc(L,k_stops)
if len(k_jumps) != 0:
k_jumps,max_jump = gen_ks_3_loc(L,k_jumps)
max_loc = max(max_jump,max_stop,max_pause,max_enter)
if max_loc <=L:
frames_used = 0
if max_loc > L:
frames_used = 1
if max_loc > 2*L-1 :
frames_used = 2
return k_enters, k_pauses, k_stops, k_jumps, frames_used
#rsnap = rss.rSNAPsim()
#rsnap.open_seq_file('gene_files/H2B_withTags.txt')
#rsnap.run_default()
k = np.ones((1,300)).flatten()
kelong = k[1:-1]
kelong[49] = 0
kelong[149]= 0
kelong[248] = 0
#k_fss = np.array([[200,0,200,1,.3]])
k_pause = np.array([[30,0,.01]])
k_enters = np.array([[10,0,.02],[10,2,.04]],dtype=np.float64)
k_stops = np.array([[50,0,10],[50,1,10],[50,2,10]],dtype=np.float64)
k_fss = np.array([[20,0,20,1,1]],dtype=np.float64)
#k_pause = np.array([[30,2,100],[40,2,100]],dtype=np.float64)
k_enters,k_pauses,k_stops,k_jumps,frames_used = generate_additional_ks(k_enters,[],k_fss,k_stops,100)
t_array = np.array([0,100,500],dtype=np.float64)
t0 = 15
t_array = np.linspace(0,400,400,dtype=np.float64)
N_rib = 200
result = np.zeros((len(t_array)*N_rib),dtype=np.int32 )
#kelong = np.array([3.1,3.2,3.3,3.4,3.5,3.1,3.2,3.3,3.4,3.5],dtype=np.float64)
n_trajectories = 1
start = time.time()
all_results = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
lenfrap = len(np.intersect1d(np.where(t_array>0)[0],np.where(t_array<20)[0]))
all_frapresults = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
all_ribtimes = np.zeros((n_trajectories,400),dtype=np.float64)
all_coltimes = np.zeros((n_trajectories,400),dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
all_ribs = np.zeros((n_trajectories,1))
seeds = np.random.randint(0,0x7FFFFFF,n_trajectories)
k_add = np.hstack((k_enters.flatten(),k_pauses.flatten(),k_stops.flatten(),k_jumps.flatten() ))
t_array_copy = np.copy(t_array)
while t_array_copy.shape[0] != 200:
t_array_copy = np.vstack((t_array_copy,t_array))
for i in range(n_trajectories):
result = np.zeros((len(t_array)*N_rib),dtype=np.int32)
frapresult = np.zeros((len(t_array)*N_rib),dtype=np.int32)
ribtimes = np.zeros((400),dtype=np.float64)
coltimes = np.zeros((400),dtype=np.int32)
ssa_translation_generic.run_SSA_generic(result,ribtimes,coltimes, kelong,frapresult,t_array, np.array([0,0,0],dtype=np.float64), seeds[i],nribs, k_add.flatten() ,2,0,3,1 )
all_results[i,:] = result
all_frapresults[i,:] = frapresult
all_coltimes[i,:] = coltimes
all_ribtimes[i,:] = ribtimes
all_ribs[i,:] = nribs[0]
traj = all_results[0,:].reshape((N_rib,len(t_array))).T
f,ax = plt.subplots(2,1)
ax[0].set_ylim([0,300])
ax[0].fill_between([0,400],[100,100],color='red',alpha=.2)
ax[0].fill_between([0,400],[200,200],color='green',alpha=.2)
ax[0].fill_between([0,400],[300,300],color='blue',alpha=.2)
ax[0].plot(traj,'.')
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Ribosome Location')
ax[0].set_title(' 100 codons, enters: 0,10 and +2,10 FSS: 0,20 to +1,20 Stops: 50 0,1,2' )
spatial_x = (traj + (traj > 100) + (traj > 199))%100
ax[1].set_ylim([0,100])
#ax[1].plot(t_array,spatial_x,'.')
ax[1].plot(t_array_copy.T[traj<=100],spatial_x[traj <= 100],'r.')
ax[1].plot(t_array_copy.T[traj>100],spatial_x[traj > 100],'g.')
ax[1].plot(t_array_copy.T[traj>199],spatial_x[traj > 199],'b.')
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Ribosome Location')
ax[1].set_title(' spatial location ' )
ax[1].legend(['0','+1','+2'])
###################################################################
k = np.ones((1,300)).flatten()
kelong = k[1:-1]
kelong[49] = 3
kelong[79] = 0
k_enters = np.array([[10,0,.04]],dtype=np.float64)
k_stops = np.array([[50,0,10],[80,0,10]],dtype=np.float64)
k_fss = []
k_pause = []
#k_pause = np.array([[30,2,100],[40,2,100]],dtype=np.float64)
k_enters,k_pauses,k_stops,k_jumps,frames_used = generate_additional_ks(k_enters,k_pause,k_fss,k_stops,100)
t_array = np.array([0,100,500],dtype=np.float64)
t0 = 15
t_array = np.linspace(0,400,400,dtype=np.float64)
N_rib = 200
result = np.zeros((len(t_array)*N_rib),dtype=np.int32 )
#kelong = np.array([3.1,3.2,3.3,3.4,3.5,3.1,3.2,3.3,3.4,3.5],dtype=np.float64)
n_trajectories = 1
start = time.time()
all_results = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
lenfrap = len(np.intersect1d(np.where(t_array>0)[0],np.where(t_array<20)[0]))
all_frapresults = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
all_ribtimes = np.zeros((n_trajectories,400),dtype=np.float64)
all_coltimes = np.zeros((n_trajectories,400),dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
all_ribs = np.zeros((n_trajectories,1))
seeds = np.random.randint(0,0x7FFFFFF,n_trajectories)
k_add = np.hstack((k_enters.flatten(),k_pauses.flatten(),k_stops.flatten(),k_jumps.flatten() ))
t_array_copy = np.copy(t_array)
while t_array_copy.shape[0] != 200:
t_array_copy = np.vstack((t_array_copy,t_array))
for i in range(n_trajectories):
result = np.zeros((len(t_array)*N_rib),dtype=np.int32)
frapresult = np.zeros((len(t_array)*N_rib),dtype=np.int32)
ribtimes = np.zeros((400),dtype=np.float64)
coltimes = np.zeros((400),dtype=np.int32)
ssa_translation_generic.run_SSA_generic(result,ribtimes,coltimes, kelong,frapresult,t_array, np.array([0,0,0],dtype=np.float64), seeds[i],nribs, k_add.flatten() ,len(k_enters),len(k_pauses),len(k_stops),len(k_jumps) )
all_results[i,:] = result
all_frapresults[i,:] = frapresult
all_coltimes[i,:] = coltimes
all_ribtimes[i,:] = ribtimes
all_ribs[i,:] = nribs[0]
traj = all_results[0,:].reshape((N_rib,len(t_array))).T
f,ax = plt.subplots(2,1)
ax[0].set_ylim([0,300])
ax[0].fill_between([0,400],[100,100],color='red',alpha=.2)
ax[0].fill_between([0,400],[200,200],color='green',alpha=.2)
ax[0].fill_between([0,400],[300,300],color='blue',alpha=.2)
ax[0].plot(traj,'.')
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Ribosome Location')
ax[0].set_title(' 100 codons, enters: 0,10 stops: 0,50 and 0,80' )
spatial_x = (traj + (traj > 100) + (traj > 199))%100
ax[1].set_ylim([0,100])
#ax[1].plot(t_array,spatial_x,'.')
ax[1].plot(t_array_copy.T[traj<=100],spatial_x[traj <= 100],'r.')
ax[1].plot(t_array_copy.T[traj>100],spatial_x[traj > 100],'g.')
ax[1].plot(t_array_copy.T[traj>199],spatial_x[traj > 199],'b.')
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Ribosome Location')
ax[1].set_title(' spatial location ' )
ax[1].legend(['0','+1','+2'])
#####################################################
k = np.ones((1,300)).flatten()
kelong = k[1:-1]
kelong[49] = 0
kelong[179] = 0
k_enters = np.array([[10,0,.04]],dtype=np.float64)
k_stops = np.array([[50,0,10],[80,1,10]],dtype=np.float64)
k_fss = np.array([[30,0,30,1,1]],dtype=np.float64)
k_pause = []
#k_pause = np.array([[30,2,100],[40,2,100]],dtype=np.float64)
k_enters,k_pauses,k_stops,k_jumps,frames_used = generate_additional_ks(k_enters,k_pause,k_fss,k_stops,100)
t_array = np.array([0,100,500],dtype=np.float64)
t0 = 15
t_array = np.linspace(0,400,400,dtype=np.float64)
N_rib = 200
result = np.zeros((len(t_array)*N_rib),dtype=np.int32 )
#kelong = np.array([3.1,3.2,3.3,3.4,3.5,3.1,3.2,3.3,3.4,3.5],dtype=np.float64)
n_trajectories = 1
start = time.time()
all_results = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
lenfrap = len(np.intersect1d(np.where(t_array>0)[0],np.where(t_array<20)[0]))
all_frapresults = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
all_ribtimes = np.zeros((n_trajectories,400),dtype=np.float64)
all_coltimes = np.zeros((n_trajectories,400),dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
all_ribs = np.zeros((n_trajectories,1))
seeds = np.random.randint(0,0x7FFFFFF,n_trajectories)
k_add = np.hstack((k_enters.flatten(),k_pauses.flatten(),k_stops.flatten(),k_jumps.flatten() ))
t_array_copy = np.copy(t_array)
while t_array_copy.shape[0] != 200:
t_array_copy = np.vstack((t_array_copy,t_array))
for i in range(n_trajectories):
result = np.zeros((len(t_array)*N_rib),dtype=np.int32)
frapresult = np.zeros((len(t_array)*N_rib),dtype=np.int32)
ribtimes = np.zeros((400),dtype=np.float64)
coltimes = np.zeros((400),dtype=np.int32)
ssa_translation_generic.run_SSA_generic(result,ribtimes,coltimes, kelong,frapresult,t_array, np.array([0,0,0],dtype=np.float64), seeds[i],nribs, k_add.flatten() ,1,0,2,1 )
all_results[i,:] = result
all_frapresults[i,:] = frapresult
all_coltimes[i,:] = coltimes
all_ribtimes[i,:] = ribtimes
all_ribs[i,:] = nribs[0]
traj = all_results[0,:].reshape((N_rib,len(t_array))).T
f,ax = plt.subplots(2,1)
ax[0].set_ylim([0,300])
ax[0].fill_between([0,400],[100,100],color='red',alpha=.2)
ax[0].fill_between([0,400],[200,200],color='green',alpha=.2)
ax[0].fill_between([0,400],[300,300],color='blue',alpha=.2)
ax[0].plot(traj,'.')
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Ribosome Location')
ax[0].set_title(' 100 codons, enters: 0,10 stops: 0,50 and 1,80 FSS: 0,30 to 1,30' )
spatial_x = (traj + (traj > 100) + (traj > 199))%100
ax[1].set_ylim([0,100])
#ax[1].plot(t_array,spatial_x,'.')
ax[1].plot(t_array_copy.T[traj<=100],spatial_x[traj <= 100],'r.')
ax[1].plot(t_array_copy.T[traj>100],spatial_x[traj > 100],'g.')
ax[1].plot(t_array_copy.T[traj>199],spatial_x[traj > 199],'b.')
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Ribosome Location')
ax[1].set_title(' spatial location ' )
ax[1].legend(['0','+1','+2'])
######################
k = np.ones((1,300)).flatten()
kelong = k[1:-1]
kelong[49] = 0
kelong[278] = 0
k_enters = np.array([[10,0,.04],[10,2,.02]],dtype=np.float64)
k_stops = np.array([[50,0,10],[80,2,10]],dtype=np.float64)
k_fss = []
k_pause = []
#k_pause = np.array([[30,2,100],[40,2,100]],dtype=np.float64)
k_enters,k_pauses,k_stops,k_jumps,frames_used = generate_additional_ks(k_enters,k_pause,k_fss,k_stops,100)
t_array = np.array([0,100,500],dtype=np.float64)
t0 = 15
t_array = np.linspace(0,400,400,dtype=np.float64)
N_rib = 200
result = np.zeros((len(t_array)*N_rib),dtype=np.int32 )
#kelong = np.array([3.1,3.2,3.3,3.4,3.5,3.1,3.2,3.3,3.4,3.5],dtype=np.float64)
n_trajectories = 1
start = time.time()
all_results = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
lenfrap = len(np.intersect1d(np.where(t_array>0)[0],np.where(t_array<20)[0]))
all_frapresults = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
all_ribtimes = np.zeros((n_trajectories,400),dtype=np.float64)
all_coltimes = np.zeros((n_trajectories,400),dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
all_ribs = np.zeros((n_trajectories,1))
seeds = np.random.randint(0,0x7FFFFFF,n_trajectories)
k_add = np.hstack((k_enters.flatten(),k_pauses.flatten(),k_stops.flatten(),k_jumps.flatten() ))
t_array_copy = np.copy(t_array)
while t_array_copy.shape[0] != 200:
t_array_copy = np.vstack((t_array_copy,t_array))
for i in range(n_trajectories):
result = np.zeros((len(t_array)*N_rib),dtype=np.int32)
frapresult = np.zeros((len(t_array)*N_rib),dtype=np.int32)
ribtimes = np.zeros((400),dtype=np.float64)
coltimes = np.zeros((400),dtype=np.int32)
ssa_translation_generic.run_SSA_generic(result,ribtimes,coltimes, kelong,frapresult,t_array, np.array([0,0,0],dtype=np.float64), seeds[i],nribs, k_add.flatten() ,2,0,2,0 )
all_results[i,:] = result
all_frapresults[i,:] = frapresult
all_coltimes[i,:] = coltimes
all_ribtimes[i,:] = ribtimes
all_ribs[i,:] = nribs[0]
traj = all_results[0,:].reshape((N_rib,len(t_array))).T
f,ax = plt.subplots(2,1)
ax[0].set_ylim([0,300])
ax[0].fill_between([0,400],[100,100],color='red',alpha=.2)
ax[0].fill_between([0,400],[200,200],color='green',alpha=.2)
ax[0].fill_between([0,400],[300,300],color='blue',alpha=.2)
ax[0].plot(traj,'.')
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Ribosome Location')
ax[0].set_title(' 100 codons, enters: 0,10 2,20 stops: 0,50 and 2,80' )
spatial_x = (traj + (traj > 100) + (traj > 199))%100
ax[1].set_ylim([0,100])
#ax[1].plot(t_array,spatial_x,'.')
ax[1].plot(t_array_copy.T[traj<=100],spatial_x[traj <= 100],'r.')
ax[1].plot(t_array_copy.T[traj>100],spatial_x[traj > 100],'g.')
ax[1].plot(t_array_copy.T[traj>199],spatial_x[traj > 199],'b.')
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Ribosome Location')
ax[1].set_title(' spatial location ' )
ax[1].legend(['0','+1','+2'])
###########
k = np.ones((1,300)).flatten()
kelong = k[1:-1]
kelong[49] = 0
kelong[39] = 0.1
kelong[278] = 0
k_enters = np.array([[10,0,.04],[10,2,.02]],dtype=np.float64)
k_stops = np.array([[50,0,10],[80,2,10]],dtype=np.float64)
k_fss = []
k_pause = np.array([[40,0,100]],dtype=np.float64)
#k_pause = np.array([[30,2,100],[40,2,100]],dtype=np.float64)
k_enters,k_pauses,k_stops,k_jumps,frames_used = generate_additional_ks(k_enters,k_pause,k_fss,k_stops,100)
t_array = np.array([0,100,500],dtype=np.float64)
t0 = 15
t_array = np.linspace(0,400,400,dtype=np.float64)
N_rib = 200
result = np.zeros((len(t_array)*N_rib),dtype=np.int32 )
#kelong = np.array([3.1,3.2,3.3,3.4,3.5,3.1,3.2,3.3,3.4,3.5],dtype=np.float64)
n_trajectories = 1
start = time.time()
all_results = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
lenfrap = len(np.intersect1d(np.where(t_array>0)[0],np.where(t_array<20)[0]))
all_frapresults = np.zeros((n_trajectories,N_rib*len(t_array)),dtype=np.int32)
all_ribtimes = np.zeros((n_trajectories,400),dtype=np.float64)
all_coltimes = np.zeros((n_trajectories,400),dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
all_ribs = np.zeros((n_trajectories,1))
seeds = np.random.randint(0,0x7FFFFFF,n_trajectories)
k_add = np.hstack((k_enters.flatten(),k_pauses.flatten(),k_stops.flatten(),k_jumps.flatten() ))
t_array_copy = np.copy(t_array)
while t_array_copy.shape[0] != 200:
t_array_copy = np.vstack((t_array_copy,t_array))
for i in range(n_trajectories):
result = np.zeros((len(t_array)*N_rib),dtype=np.int32)
frapresult = np.zeros((len(t_array)*N_rib),dtype=np.int32)
ribtimes = np.zeros((400),dtype=np.float64)
coltimes = np.zeros((400),dtype=np.int32)
ssa_translation_generic.run_SSA_generic(result,ribtimes,coltimes, kelong,frapresult,t_array, np.array([0,0,0],dtype=np.float64), seeds[i],nribs, k_add.flatten() ,len(k_enters),len(k_pauses),len(k_stops),len(k_jumps))
all_results[i,:] = result
all_frapresults[i,:] = frapresult
all_coltimes[i,:] = coltimes
all_ribtimes[i,:] = ribtimes
all_ribs[i,:] = nribs[0]
traj = all_results[0,:].reshape((N_rib,len(t_array))).T
f,ax = plt.subplots(2,1)
ax[0].set_ylim([0,300])
ax[0].fill_between([0,400],[100,100],color='red',alpha=.2)
ax[0].fill_between([0,400],[200,200],color='green',alpha=.2)
ax[0].fill_between([0,400],[300,300],color='blue',alpha=.2)
ax[0].plot(traj,'.')
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Ribosome Location')
ax[0].set_title(' 100 codons, enters: 0,10 2,20 stops: 0,50 and 2,80' )
spatial_x = (traj + (traj > 100) + (traj > 199))%100
ax[1].set_ylim([0,100])
#ax[1].plot(t_array,spatial_x,'.')
ax[1].plot(t_array_copy.T[traj<=100],spatial_x[traj <= 100],'r.')
ax[1].plot(t_array_copy.T[traj>100],spatial_x[traj > 100],'g.')
ax[1].plot(t_array_copy.T[traj>199],spatial_x[traj > 199],'b.')
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Ribosome Location')
ax[1].set_title(' spatial location ' )
ax[1].legend(['0','+1','+2'])
| 28.862944
| 222
| 0.661391
|
1c0f41127792580391a5ca5c77468f1bd9bec693
| 989
|
py
|
Python
|
BubbleSort.py
|
vanigupta20024/Programming-Challenges
|
578dba33e9f6b04052a503bcb5de9b32f33494a5
|
[
"MIT"
] | 14
|
2020-10-15T21:47:18.000Z
|
2021-12-01T06:06:51.000Z
|
BubbleSort.py
|
vanigupta20024/Programming-Challenges
|
578dba33e9f6b04052a503bcb5de9b32f33494a5
|
[
"MIT"
] | null | null | null |
BubbleSort.py
|
vanigupta20024/Programming-Challenges
|
578dba33e9f6b04052a503bcb5de9b32f33494a5
|
[
"MIT"
] | 4
|
2020-06-15T14:40:45.000Z
|
2021-06-15T06:22:03.000Z
|
# GHC Codepath - Module SE101
# Sandbox 10
#!/bin/python3
import math
import os
import random
import re
import sys
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY nums as parameter.
# Implement bubble sort to sort nums and return the
# number of swaps it took to sort nums.
def swap(num1, num2):
num1, num2 = num2, num1
def bubble_sort(nums):
last_index = len(nums)
swaps = 0
while last_index > 0:
for index in range(1, last_index):
if nums[index] < nums[index - 1]:
swap(nums[index], nums[index - 1])
swaps += 1
last_index -= 1
return swaps
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nums_count = int(input().strip())
nums = []
for _ in range(nums_count):
nums_item = int(input().strip())
nums.append(nums_item)
result = bubble_sort(nums)
fptr.write(str(result) + '\n')
fptr.close()
| 20.604167
| 55
| 0.619818
|
2268eafdbd08cd0d6a175d19cedd79b7b984289b
| 3,077
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_lrn_op.py
|
mozga-intel/Paddle
|
e027eb40d7dcb970888a87209af2b3e7393cf25b
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_lrn_op.py
|
mozga-intel/Paddle
|
e027eb40d7dcb970888a87209af2b3e7393cf25b
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_lrn_op.py
|
mozga-intel/Paddle
|
e027eb40d7dcb970888a87209af2b3e7393cf25b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestLRNOp(OpTest):
def get_input(self):
''' TODO(gongweibao): why it's grad diff is so large?
x = np.ndarray(
shape=(self.N, self.C, self.H, self.W), dtype=float, order='C')
for m in range(0, self.N):
for i in range(0, self.C):
for h in range(0, self.H):
for w in range(0, self.W):
x[m][i][h][w] = m * self.C * self.H * self.W + \
i * self.H * self.W + \
h * self.W + w + 1
'''
x = np.random.rand(self.N, self.C, self.H, self.W).astype("float32")
return x + 1
def get_out(self):
start = -(self.n - 1) / 2
end = start + self.n
mid = np.empty((self.N, self.C, self.H, self.W)).astype("float32")
mid.fill(self.k)
for m in range(0, self.N):
for i in range(0, self.C):
for c in range(start, end):
ch = i + c
if ch < 0 or ch >= self.C:
continue
s = mid[m][i][:][:]
r = self.x[m][ch][:][:]
s += np.square(r) * self.alpha
mid2 = np.power(mid, -self.beta)
return np.multiply(self.x, mid2), mid
def get_attrs(self):
attrs = {
'n': self.n,
'k': self.k,
'alpha': self.alpha,
'beta': self.beta
}
return attrs
def setUp(self):
self.op_type = "lrn"
self.N = 2
self.C = 3
self.H = 5
self.W = 5
self.n = 5
self.k = 2.0
self.alpha = 0.0001
self.beta = 0.75
self.x = self.get_input()
self.out, self.mid_out = self.get_out()
self.inputs = {'X': self.x}
self.outputs = {'Out': self.out, 'MidOut': self.mid_out}
self.attrs = self.get_attrs()
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', max_relative_error=0.01)
class TestLRNMKLDNNOp(TestLRNOp):
def get_attrs(self):
attrs = TestLRNOp.get_attrs(self)
attrs['use_mkldnn'] = True
return attrs
def test_check_output(self):
self.check_output(atol=0.002)
if __name__ == "__main__":
unittest.main()
| 30.166667
| 76
| 0.529737
|
6b201e0ebcf484cb1aaced0890e1473662effdb8
| 1,899
|
py
|
Python
|
OP_ui_list_chapter.py
|
Tilapiatsu/blender-lineup_maker
|
b020a791645b08328287e932aa2e9287d222dc4c
|
[
"MIT"
] | 1
|
2019-07-16T13:11:29.000Z
|
2019-07-16T13:11:29.000Z
|
OP_ui_list_chapter.py
|
Tilapiatsu/blender-lineup_maker
|
b020a791645b08328287e932aa2e9287d222dc4c
|
[
"MIT"
] | null | null | null |
OP_ui_list_chapter.py
|
Tilapiatsu/blender-lineup_maker
|
b020a791645b08328287e932aa2e9287d222dc4c
|
[
"MIT"
] | null | null | null |
import bpy, re
from . import logger as L
def add_keyword(context, naming_convention, keyword):
if len(naming_convention):
return '{}{}{}{}{}'.format(naming_convention, context.scene.lm_separator, '<', keyword, '>')
else:
return '{}{}{}{}'.format(naming_convention, '<', keyword, '>')
def slice_keyword(context, convention):
keyword_pattern = re.compile(r'[{0}]?(<[a-zA-Z0-9^?^!]+>|[a-zA-Z0-9]+)[{0}]?'.format(context.scene.lm_separator), re.IGNORECASE)
return keyword_pattern.findall(convention)
def remove_keyword(context, convention):
scn = context.scene
keyword = slice_keyword(context, convention)
new_keyword = ''
length = len(keyword)
for i,k in enumerate(keyword):
if i < length - 1:
new_keyword = new_keyword + k
if i < length - 2:
new_keyword = new_keyword + scn.lm_separator
return new_keyword
class LM_UI_AddChapterKeyword(bpy.types.Operator):
bl_idname = "scene.lm_add_chapter_keyword"
bl_label = "Add Keyword to the current chapter"
bl_options = {'REGISTER', 'UNDO'}
keyword: bpy.props.StringProperty(default='')
def execute(self, context):
if self.keyword == '':
keyword = context.scene.lm_keywords[context.scene.lm_keyword_idx]
context.scene.lm_chapter_naming_convention = add_keyword(context, context.scene.lm_chapter_naming_convention, keyword.name.upper())
return {'FINISHED'}
class LM_UI_RemoveChapterKeyword(bpy.types.Operator):
bl_idname = "scene.lm_remove_chapter_keyword"
bl_label = "Clear Chapter keyword"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Remove last Chapter keyword."
def execute(self, context):
scn = context.scene
scn.lm_chapter_naming_convention = remove_keyword(context, scn.lm_chapter_naming_convention)
return {'FINISHED'}
| 33.315789
| 139
| 0.674566
|
e85cc9c585511309e4279c49ae21a57ef1d7fac2
| 717
|
py
|
Python
|
web/transiq/restapi/dynamo/config.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
web/transiq/restapi/dynamo/config.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | 14
|
2020-06-05T23:06:45.000Z
|
2022-03-12T00:00:18.000Z
|
web/transiq/restapi/dynamo/config.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from restapi.dynamo.models import DEV_GPS_LOCATION, STAGE_GPS_LOCATION, PROD_GPS_LOCATION, LOCAL_GPS_LOCATION, DEV_User, \
STAGE_User, PROD_User, LOCAL_User
class DynamoTablesEnvConfiguration:
def __init__(self):
if settings.ENV == 'dev':
self.GPS_LOCATION = DEV_GPS_LOCATION
self.User = DEV_User
elif settings.ENV == 'stage':
self.GPS_LOCATION = STAGE_GPS_LOCATION
self.User = STAGE_User
elif settings.ENV == 'prod':
self.GPS_LOCATION = PROD_GPS_LOCATION
self.User = PROD_User
else:
self.GPS_LOCATION = LOCAL_GPS_LOCATION
self.User = LOCAL_User
| 35.85
| 122
| 0.658298
|
928e349987bf4c2d71fc7819c3b51470956f99bf
| 7,788
|
py
|
Python
|
src/iaqualink/device.py
|
frenck/iaqualink-py
|
c75d693225556ec59e32088479be9c0fd84f6e9c
|
[
"BSD-3-Clause"
] | null | null | null |
src/iaqualink/device.py
|
frenck/iaqualink-py
|
c75d693225556ec59e32088479be9c0fd84f6e9c
|
[
"BSD-3-Clause"
] | null | null | null |
src/iaqualink/device.py
|
frenck/iaqualink-py
|
c75d693225556ec59e32088479be9c0fd84f6e9c
|
[
"BSD-3-Clause"
] | null | null | null |
from enum import Enum, auto, unique
import logging
from typing import Optional
from iaqualink.typing import DeviceData
from iaqualink.const import (
AQUALINK_TEMP_CELSIUS_LOW,
AQUALINK_TEMP_CELSIUS_HIGH,
AQUALINK_TEMP_FAHRENHEIT_LOW,
AQUALINK_TEMP_FAHRENHEIT_HIGH,
)
@unique
class AqualinkState(Enum):
OFF = "0"
ON = "1"
ENABLED = "3"
# XXX - I don't know the exact values per type. The enum is pretty much a
# placeholder. If you know what type of lights you have and have debugging
# on, please submit an issue to GitHub with the details so I can update the
# code.
@unique
class AqualinkLightType(Enum):
JANDY_LED_WATERCOLORS = auto()
JANDY_COLORS = auto()
HAYWARD_COLOR_LOGIC = auto()
PENTAIR_INTELLIBRITE = auto()
PENTAIR_SAM_SAL = auto()
# XXX - These values are probably LightType-specific but they're all I have
# at the moment. I can see this changing into a color profile system later.
class AqualinkLightEffect(Enum):
NONE = "0"
ALPINE_WHITE = "1"
SKY_BLUE = "2"
COBALT_BLUE = "3"
CARIBBEAN_BLUE = "4"
SPRING_GREEN = "5"
EMERALD_GREEN = "6"
EMERALD_ROSE = "7"
MAGENTA = "8"
VIOLENT = "9"
SLOW_COLOR_SPLASH = "10"
FAST_COLOR_SPLASH = "11"
USA = "12"
FAT_TUESDAY = "13"
DISCO_TECH = "14"
LOGGER = logging.getLogger("aqualink")
class AqualinkDevice(object):
def __init__(self, system: "AqualinkSystem", data: "DeviceData"):
self.system = system
self.data = data
def __repr__(self) -> str:
attrs = ["name", "data"]
attrs = ["%s=%r" % (i, getattr(self, i)) for i in attrs]
return f'{self.__class__.__name__}({" ".join(attrs)})'
@property
def label(self) -> str:
if "label" in self.data:
label = self.data["label"]
return " ".join([x.capitalize() for x in label.split()])
else:
label = self.data["name"]
return " ".join([x.capitalize() for x in label.split("_")])
@property
def state(self) -> str:
return self.data["state"]
@property
def name(self) -> str:
return self.data["name"]
@classmethod
def from_data(cls, system: "AqualinkSystem", data: DeviceData) -> "AqualinkDevice":
if data["name"].endswith("_heater"):
class_ = AqualinkHeater
elif data["name"].endswith("_set_point"):
class_ = AqualinkThermostat
elif data["name"].endswith("_pump"):
class_ = AqualinkPump
elif data["name"] == "freeze_protection":
class_ = AqualinkBinarySensor
elif data["name"].startswith("aux_"):
if data["type"] == "2":
class_ = AqualinkColorLight
elif data["type"] == "1":
class_ = AqualinkDimmableLight
elif "LIGHT" in data["label"]:
class_ = AqualinkLightToggle
else:
class_ = AqualinkAuxToggle
else:
class_ = AqualinkSensor
return class_(system, data)
class AqualinkSensor(AqualinkDevice):
pass
class AqualinkBinarySensor(AqualinkSensor):
"""These are non-actionable sensors, essentially read-only on/off."""
@property
def is_on(self) -> bool:
return (
AqualinkState(self.state) in [AqualinkState.ON, AqualinkState.ENABLED]
if self.state
else False
)
class AqualinkToggle(AqualinkDevice):
@property
def is_on(self) -> bool:
return (
AqualinkState(self.state) in [AqualinkState.ON, AqualinkState.ENABLED]
if self.state
else False
)
async def turn_on(self) -> None:
if not self.is_on:
await self.toggle()
async def turn_off(self) -> None:
if self.is_on:
await self.toggle()
async def toggle(self) -> None:
raise NotImplementedError()
class AqualinkPump(AqualinkToggle):
async def toggle(self) -> None:
await self.system.set_pump(f"set_{self.name}")
class AqualinkHeater(AqualinkToggle):
async def toggle(self) -> None:
await self.system.set_heater(f"set_{self.name}")
class AqualinkAuxToggle(AqualinkToggle):
async def toggle(self) -> None:
await self.system.set_aux(self.data["aux"])
# Using AqualinkLight as a Mixin so we can use isinstance(dev, AqualinkLight).
class AqualinkLight(object):
@property
def brightness(self) -> Optional[int]:
raise NotImplementedError()
@property
def effect(self) -> Optional[str]:
raise NotImplementedError()
@property
def is_dimmer(self) -> bool:
return self.brightness is not None
@property
def is_color(self) -> bool:
return self.effect is not None
class AqualinkLightToggle(AqualinkLight, AqualinkAuxToggle):
@property
def brightness(self) -> Optional[bool]:
return None
@property
def effect(self) -> Optional[int]:
return None
class AqualinkDimmableLight(AqualinkLight, AqualinkDevice):
@property
def brightness(self) -> Optional[int]:
return int(self.data["subtype"])
@property
def effect(self) -> Optional[int]:
return None
@property
def is_on(self) -> bool:
return self.brightness != 0
async def set_brightness(self, brightness: int) -> None:
# Brightness only works in 25% increments.
if brightness not in [0, 25, 50, 75, 100]:
msg = f"{brightness}% isn't a valid percentage. Only use 25% increments."
raise Exception(msg)
data = {"aux": self.data["aux"], "light": f"{brightness}"}
await self.system.set_light(data)
async def turn_on(self, level: int = 100) -> None:
if self.brightness != level:
await self.set_brightness(level)
async def turn_off(self) -> None:
if self.is_on:
await self.set_brightness(0)
class AqualinkColorLight(AqualinkLight, AqualinkDevice):
@property
def brightness(self) -> Optional[int]:
# Assuming that color lights don't have adjustable brightness.
return None
@property
def effect(self) -> Optional[int]:
return self.data["state"]
@property
def is_on(self) -> bool:
return self.effect != "0"
async def set_effect(self, effect: str) -> None:
try:
AqualinkLightEffect(effect)
except Exception:
msg = f"{repr(effect)} isn't a valid effect."
raise Exception(msg)
data = {
"aux": self.data["aux"],
"light": effect,
"subtype": self.data["subtype"],
}
await self.system.set_light(data)
async def turn_off(self):
if self.is_on:
await self.set_effect("0")
async def turn_on(self):
if not self.is_on:
await self.set_effect("1")
class AqualinkThermostat(AqualinkDevice):
@property
def temp(self) -> str:
# Spa takes precedence for temp1 if present.
if self.name.startswith("pool") and self.system.has_spa:
return "temp2"
return "temp1"
async def set_temperature(self, temperature: int) -> None:
unit = self.system.temp_unit
if unit == "F":
low = AQUALINK_TEMP_FAHRENHEIT_LOW
high = AQUALINK_TEMP_FAHRENHEIT_HIGH
else:
low = AQUALINK_TEMP_CELSIUS_LOW
high = AQUALINK_TEMP_CELSIUS_HIGH
if temperature not in range(low, high + 1):
msg = f"{temperature}{unit} isn't a valid temperature"
msg += f" ({low}-{high}{unit})."
raise Exception(msg)
data = {self.temp: temperature}
await self.system.set_temps(data)
| 27.519435
| 87
| 0.611325
|
d35fa68092f3d5dcaaa8e981ed6c58219c7fca6d
| 2,581
|
py
|
Python
|
tests/acceptance/helpers/todomvc.py
|
pupsikpic/selene
|
225796dbd58782c1d4baee85fd6e22eac3a9912f
|
[
"MIT"
] | null | null | null |
tests/acceptance/helpers/todomvc.py
|
pupsikpic/selene
|
225796dbd58782c1d4baee85fd6e22eac3a9912f
|
[
"MIT"
] | null | null | null |
tests/acceptance/helpers/todomvc.py
|
pupsikpic/selene
|
225796dbd58782c1d4baee85fd6e22eac3a9912f
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2015-2021 Iakiv Kramarenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from selene import be, have
from selene.support.shared import browser
# TODOMVC_URL = 'file://' + os.path.abspath(os.path.dirname(__file__)) + '/../../resources/todomvcapp/home.html'
TODOMVC_URL = 'https://todomvc4tasj.herokuapp.com/'
OTHER_PAGE_URL = 'file://{}/../resources/orderapp/order.html'.format(
os.path.abspath(os.path.dirname(__file__))
)
is_TodoMVC_loaded = (
'return (Object.keys(require.s.contexts._.defined).length === 39)'
)
def open_todomvc():
# todo: refactor to use repo copy of todomvc
browser.open(TODOMVC_URL)
browser.wait_until(have.js_returned(True, is_TodoMVC_loaded))
def given_at_other_page():
if not browser.element("#order_details").matching(be.visible):
browser.open(OTHER_PAGE_URL)
def execute_js(js_string):
return browser.execute_script(js_string)
def given(*tasks):
if not browser.element("#new-todo").matching(be.visible):
open_todomvc()
import json
script = 'localStorage.setItem("todos-troopjs", "{}")'.format(
str(json.dumps(tasks))
.replace('"', '\\"')
.replace('\\\\"', '\\\\\\"')
.replace("False", "false")
)
execute_js(script)
open_todomvc()
def given_empty_tasks():
given()
def task(task_text, is_completed=False):
return dict(title=task_text, completed=is_completed)
def given_active(*task_texts):
return given(*[task(text) for text in task_texts])
when_active = given_active
| 30.72619
| 112
| 0.722201
|
8ee849c873327d32f20185758efb728f272ef445
| 1,001
|
py
|
Python
|
IPython/utils/tests/test_pycolorize.py
|
dchichkov/ipython
|
8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4
|
[
"BSD-3-Clause-Clear"
] | 1
|
2018-09-24T13:45:40.000Z
|
2018-09-24T13:45:40.000Z
|
IPython/utils/tests/test_pycolorize.py
|
dchichkov/ipython
|
8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4
|
[
"BSD-3-Clause-Clear"
] | 3
|
2015-04-01T13:14:57.000Z
|
2015-05-26T16:01:37.000Z
|
IPython/utils/tests/test_pycolorize.py
|
dchichkov/ipython
|
8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4
|
[
"BSD-3-Clause-Clear"
] | 1
|
2015-05-17T14:14:26.000Z
|
2015-05-17T14:14:26.000Z
|
"""Test suite for our color utilities.
Authors
-------
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# third party
import nose.tools as nt
# our own
from IPython.utils.PyColorize import Parser
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test_unicode_colorize():
p = Parser()
f1 = p.format('1/0', 'str')
f2 = p.format(u'1/0', 'str')
nt.assert_equals(f1, f2)
| 28.6
| 78
| 0.356643
|
2638834f4f35f97a5a909b3d626eab099d81d416
| 59
|
py
|
Python
|
atmPy/data_archives/noaa_gml/__init__.py
|
hagne/atm-py
|
3d928a537948866fd78ff9d78b79469e6b7e1de1
|
[
"MIT"
] | 5
|
2015-09-09T20:06:59.000Z
|
2021-03-17T17:41:40.000Z
|
atmPy/data_archives/noaa_gml/__init__.py
|
hagne/atm-py
|
3d928a537948866fd78ff9d78b79469e6b7e1de1
|
[
"MIT"
] | 9
|
2016-02-22T18:15:21.000Z
|
2020-01-09T15:56:30.000Z
|
atmPy/data_archives/noaa_gml/__init__.py
|
hagne/atm-py
|
3d928a537948866fd78ff9d78b79469e6b7e1de1
|
[
"MIT"
] | 3
|
2016-04-19T16:19:35.000Z
|
2017-08-18T16:01:40.000Z
|
# -*- coding: utf-8 -*-
from .gml_lab import get_all_sites
| 19.666667
| 34
| 0.677966
|
9348ec62986bb140b019905801724df663b7ee04
| 89
|
py
|
Python
|
faceweb/faceapp/apps.py
|
Dheeraj2407/Face-web
|
e73696e0ff12fc19a8d88ccaf2a830db4fcbb102
|
[
"MIT"
] | null | null | null |
faceweb/faceapp/apps.py
|
Dheeraj2407/Face-web
|
e73696e0ff12fc19a8d88ccaf2a830db4fcbb102
|
[
"MIT"
] | null | null | null |
faceweb/faceapp/apps.py
|
Dheeraj2407/Face-web
|
e73696e0ff12fc19a8d88ccaf2a830db4fcbb102
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class FaceappConfig(AppConfig):
name = 'faceapp'
| 14.833333
| 33
| 0.752809
|
f43166246af4b769a0a32b268ad523e766f29d89
| 31,121
|
py
|
Python
|
kanaconv/converter.py
|
msikma/kanaconv
|
194f142e616ab5dd6d13a687b96b9f8abd1b4ea8
|
[
"MIT"
] | 7
|
2015-09-17T14:44:04.000Z
|
2018-06-28T07:55:59.000Z
|
kanaconv/converter.py
|
msikma/kanaconv
|
194f142e616ab5dd6d13a687b96b9f8abd1b4ea8
|
[
"MIT"
] | null | null | null |
kanaconv/converter.py
|
msikma/kanaconv
|
194f142e616ab5dd6d13a687b96b9f8abd1b4ea8
|
[
"MIT"
] | null | null | null |
# coding=utf8
#
# (C) 2015-2016, MIT License
'''
Finite state machine that converts hiragana and katakana strings to rōmaji
according to Modified Hepburn transliteration rules.
Only kana and rōmaji is usable; kanji can't be used as input.
Aside from the usual kana, the following are supported:
* The wi/we kana characters(ゐゑ・ヰヱ)
* Rare characters that are mostly for loanwords (ヺヸヷヴゔ)
* The repeater characters(ゝゞヽヾ)
* The yori and koto ligatures(ゟ・ヿ)
* Numerous punctuation and bracket characters (e.g. 【】「」・。, etc)
Conversely, the following characters and features are not supported,
with no plans to support them in the future:
* Half width katakana (U+FF65 - U+FF9F)
* Enclosed katakana (U+32D0 - U+32FE)
* Katakana phonetic extensions for Ainu (U+31F0 - U+31FF)
* Historical kana supplement (𛀀; U+1B000, 𛀁; U+1B001)
* Enclosed signs (🈁; U+1F201, 🈂; U+1F202, 🈓; U+1F213)
* Rare typographical symbols
* Vertical-only symbols
The theoretical combinations yi, ye and wu don't exist, nor does the
repeater mark with handakuten.
'''
import sys
import re
from .utils import kana_romaji_lt, merge_dicts, fw_romaji_lt
from .exceptions import (
InvalidCharacterTypeError, UnexpectedCharacterError
)
from .charsets import (
romaji, katakana, hiragana, lvmarker, fw_romaji, punctuation,
punct_spacing, preprocess_chars, macron_vowels, circumflex_vowels
)
from .constants import (
CV, XVOWEL, VOWEL, END_CHAR, UNKNOWN_DISCARD, UNKNOWN_RAISE,
UNKNOWN_INCLUDE, MACRON_STYLE, CIRCUMFLEX_STYLE
)
# Lookup table for consonant-vowel (cv) kana and their rōmaji data.
cvs_romaji = romaji['set_cvs']
cvs_katakana = katakana['set_cvs']
cvs_hiragana = hiragana['set_cvs']
cv_lt = kana_romaji_lt(cvs_romaji, cvs_katakana, cvs_hiragana)
# Lookup table for vowel kana.
vowels_romaji = romaji['set_vowels']
vowels_katakana = katakana['set_vowels']
vowels_hiragana = hiragana['set_vowels']
vowel_lt = kana_romaji_lt(vowels_romaji, vowels_katakana, vowels_hiragana)
# Lookup table for small vowel kana.
xvowels_romaji = romaji['set_xvowels']
xvowels_katakana = katakana['set_xvowels']
xvowels_hiragana = hiragana['set_xvowels']
xvowel_lt = kana_romaji_lt(xvowels_romaji, xvowels_katakana, xvowels_hiragana)
# Global lookup table for all kana character types except the digraphs.
kana_lt = merge_dicts(cv_lt, vowel_lt, xvowel_lt)
# Lookup table for digraph kana.
di_a_romaji = romaji['set_digraphs_a']
di_b_romaji = romaji['set_digraphs_b']
di_a_katakana = katakana['set_digraphs_a']
di_b_katakana = katakana['set_digraphs_b']
di_a_hiragana = hiragana['set_digraphs_a']
di_b_hiragana = hiragana['set_digraphs_b']
di_a_lt = kana_romaji_lt(di_a_romaji, di_a_katakana, di_a_hiragana)
di_b_lt = kana_romaji_lt(di_b_romaji, di_b_katakana, di_b_hiragana)
# String replacement table, including the punctuation characters.
repl = merge_dicts(
katakana['replacements'], hiragana['replacements'], punctuation,
# Add the lookup table for fullwidth romaji.
fw_romaji_lt(list(fw_romaji['full']), list(fw_romaji['regular']))
)
# We use sets to be able to do quick lookups.
cvs = set(cv_lt)
vowels = set(vowel_lt)
xvowels = set(xvowel_lt)
di_a = set(di_a_lt)
di_b = set(di_b_lt)
geminates = {katakana['geminate'], hiragana['geminate']}
# Repeater characters (with and without dakuten).
rpts = {katakana['repeater'], hiragana['repeater']}
drpts = {katakana['repeater_dakuten'], hiragana['repeater_dakuten']}
# The lookup tables of characters that can have a (han)dakuten, and their sets.
dkt_lt = merge_dicts(katakana['dakutenize'], hiragana['dakutenize'])
dkt_cvs = set(dkt_lt)
hdkt_lt = merge_dicts(katakana['handakutenize'], hiragana['handakutenize'])
hdkt_cvs = set(hdkt_lt)
# The singular dakuten characters.
dkt = {hiragana['dakuten'], hiragana['spacing_dakuten']}
hdkt = {hiragana['handakuten'], hiragana['spacing_handakuten']}
# Character combinations that can become long vowels,
# notwithstanding the usage of the long vowel marker.
lv_combinations = {('a', 'a'), ('u', 'u'), ('e', 'e'), ('o', 'o'), ('o', 'u')}
# Characters that trigger an apostrophe after a lone 'n'.
n_apostrophe = {'a', 'i', 'u', 'e', 'o', 'y'}
# Whether we're on Python 2--used for some legacy compatibility code.
PYTHON_2 = sys.version_info < (3, 0)
# Translation table for macron to circumflex style long vowels.
if not PYTHON_2:
vowels_to_circumflexes = str.maketrans(macron_vowels, circumflex_vowels)
else:
macron_vowels_ord = [ord(char) for char in macron_vowels]
vowels_to_circumflexes = dict(zip(macron_vowels_ord, circumflex_vowels))
# The replacement character for impossible geminate marker combinations.
# E.g. っえ becomes -e. todo: implement
REPL_CHAR = romaji['repl_char']
# The character that follows the 'n' before vowels and 'y'.
APOSTROPHE_CHAR = romaji['apostrophe_char']
# The valid character types.
CHAR_TYPES = {CV, VOWEL, XVOWEL}
# Two special characters that change the machine's behavior.
WORD_BORDER = '|' # word boundary, e.g. 子馬 = こ|うま = kouma, not kōma.
PARTICLE_INDICATOR = '.' # indicates a particle, e.g. わたし.は = watashi wa.
class KanaConv(object):
'''
The main converter class. After initialization, use to_romaji()
to convert a kana string to rōmaji.
'''
def __init__(self):
'''
Initializes the variables we'll use in the state machine.
Also see the set_state() function.
'''
# What to do with unknown characters; either we discard them,
# include them in the output, or raise an exception.
self.unknown_strategy = UNKNOWN_INCLUDE
self.unknown_char = None
# Long vowel style, either with macron (ā) or with circumflex (â).
self.vowel_style = MACRON_STYLE
# The case of the final output.
self.uppercase = False
# The character stack, containing the characters of the rōmaji output.
self.stack = []
# Number of long vowel markers in the state.
self.lvmarker_count = 0
# The ウ flag: whether a long vowel marker was added due to the
# presence of a ウ. Needed in case of the 'w' exception.
self.has_u_lvm = False
# Number of geminate markers in the state.
self.geminate_count = 0
# The character that will directly follow the flushed character.
self.next_char_info = None
self.next_char_type = None
# The currently active vowel character.
self.active_vowel = None
self.active_vowel_info = None
self.active_vowel_ro = None
# The currently active small vowel character.
self.active_xvowel = None
self.active_xvowel_info = None
# The currently active character.
self.active_char = None
self.active_char_info = None
# The type of character; either a consonant-vowel pair or a vowel.
self.active_char_type = None
# Information on digraph character parts.
self.active_dgr_a_info = None
self.active_dgr_b_info = None
# Whether the state has a small vowel or digraph second part.
self.has_xvowel = False
self.has_digraph_b = False
# Make the machine ready to accept the first character.
self._empty_stack()
self._clear_char()
def _clear_char(self):
'''
Clears the current character and makes the machine ready
to accept the next character.
'''
self.lvmarker_count = 0
self.geminate_count = 0
self.next_char_info = None
self.next_char_type = None
self.active_vowel = None
self.active_vowel_info = None
self.active_vowel_ro = None
self.active_xvowel = None
self.active_xvowel_info = None
self.active_char = None
self.active_char_info = None
self.active_char_type = None
self.active_dgr_a_info = None
self.active_dgr_b_info = None
self.has_xvowel = False
self.has_digraph_b = False
self.has_u_lvm = False
self.unknown_char = None
def set_unknown_strategy(self, behavior):
'''
Sets the strategy for dealing with unknown characters.
'''
self.unknown_strategy = behavior
def set_vowel_style(self, style):
'''
Sets the vowel style to either use macrons or circumflexes.
'''
self.vowel_style = style
def set_uppercase(self, state=True):
'''
Sets the output to appear either as lowercase or as uppercase.
'''
self.uppercase = state
def _empty_stack(self):
'''
Empties the stack, making the converter ready for the next
transliteration job. Performed once after we finish one string of
input.
'''
self.stack = []
def _append_unknown_char(self):
'''
Appends the unknown character, in case one was encountered.
'''
if self.unknown_strategy == UNKNOWN_INCLUDE and \
self.unknown_char is not None:
self._append_to_stack(self.unknown_char)
self.unknown_char = None
def _flush_char(self):
'''
Appends the rōmaji characters that represent the current state
of the machine. For example, if the state includes the character
ト, plus a geminate marker and a long vowel marker, this causes
the characters "ttō" to be added to the output.
'''
# Ignore in case there's no active character, only at the
# first iteration of the conversion process.
if self.active_char is None:
if self.unknown_char is not None:
self._append_unknown_char()
return
char_info = self.active_char_info
char_type = self.active_char_type
char_ro = char_info[0]
xv = self.active_xvowel_info
di_b = self.active_dgr_b_info
gem = self.geminate_count
lvm = self.lvmarker_count
# Check for special combinations. This is exceptional behavior
# for very specific character combinations, too unique to
# build into the data model for every kana.
# If a special combination is found, we'll replace the
# rōmaji character we were planning on flushing.
if char_type == VOWEL and len(char_info) >= 3 and xv is not None:
try:
exc = char_info[2]['xv'][xv[0]]
# Found a special combination. Replace the rōmaji character.
char_ro = exc
except (IndexError, KeyError):
# IndexError: no 'xv' exceptions list for this vowel.
# KeyError: no exception for the current small vowel.
pass
# Check whether we're dealing with a valid char type.
if char_type not in CHAR_TYPES:
raise InvalidCharacterTypeError
# If no modifiers are active (geminate marker, small vowel marker,
# etc.) then just the currently active character is flushed.
# We'll also continue if the character is 'n', which has a special
# case attached to it that we'll tackle down below.
if xv is di_b is None and gem == lvm == 0 and char_ro != 'n':
self._append_to_stack(char_ro)
self._append_unknown_char()
self._clear_char()
return
# At this point, we're considering two main factors: the currently
# active character, and possibly a small vowel character if one is set.
# For example, if the active character is テ and a small vowel ィ
# is set, the result is 'ti'. If no small vowel is set, just
# plain 'te' comes out.
#
# Aside from this choice, we're also considering the number of active
# long vowel markers, which repeats the vowel part. If there's
# at least one long vowel marker, we also use a macron vowel
# rather than the regular one, e.g. 'ī' instead of 'i'.
if char_type == CV:
# Deconstruct the info object for clarity.
char_gem_cons = char_info[1] # the extra geminate consonant
char_cons = char_info[2] # the consonant part
char_lv = char_info[4] # the long vowel part
# If this flushed character is an 'n', and precedes a vowel or
# a 'y' consonant, it must be followed by an apostrophe.
char_apostrophe = ''
if char_ro == 'n' and self.next_char_info is not None:
first_char = None
if self.next_char_type == CV:
first_char = self._char_ro_cons(
self.next_char_info,
CV
)
if self.next_char_type == VOWEL or \
self.next_char_type == XVOWEL:
first_char = self._char_ro_vowel(
self.next_char_info,
VOWEL
)
# If the following character is in the set of characters
# that should trigger an apostrophe, add it to the output.
if first_char in n_apostrophe:
char_apostrophe = APOSTROPHE_CHAR
# Check to see if we've got a full digraph.
if self.active_dgr_a_info is not None and \
self.active_dgr_b_info is not None:
char_cons = self.active_dgr_a_info[0]
# Determine the geminate consonant part (which can be
# arbitrarily long).
gem_cons = char_gem_cons * gem
if xv is not None:
# Combine the consonant of the character with the small vowel.
# Use a macron vowel if there's a long vowel marker,
# else use the regular vowel.
vowel = xv[1] * lvm if lvm > 0 else xv[0]
elif di_b is not None:
# Put together the digraph. Here we produce the latter half
# of the digraph.
vowel = di_b[1] * lvm if lvm > 0 else di_b[0]
else:
# Neither a small vowel marker, nor a digraph.
vowel = ''
if vowel != '':
# If we've got a special vowel part, combine it with the
# main consonant.
char_main = char_cons + char_apostrophe + vowel
else:
# If not, process the main character and add the long vowels
# if applicable.
if lvm > 0:
char_main = char_cons + char_apostrophe + char_lv * lvm
else:
char_main = char_ro + char_apostrophe
self._append_to_stack(gem_cons + char_main)
if char_type == VOWEL or char_type == XVOWEL:
char_lv = char_info[1] # the long vowel part
if xv is not None:
xv_ro = xv[1] * lvm if lvm > 0 else xv[0]
self._append_to_stack(char_ro + xv_ro)
else:
vowel_ro = char_lv * lvm if lvm > 0 else char_ro
self._append_to_stack(vowel_ro)
# Append unknown the character as well.
self._append_unknown_char()
self._clear_char()
def _append_to_stack(self, string):
'''
Appends a string to the output stack.
'''
self.stack.append(string)
def _promote_solitary_xvowel(self):
'''
"Promotes" the current xvowel to a regular vowel, in case
it is not otherwise connected to a character.
Used to print small vowels that would otherwise get lost;
normally small vowels always form a pair, but in case one is
by itself it should basically act like a regular vowel.
'''
char_type = self.active_char_type
# Only promote if we actually have an xvowel, and if the currently
# active character is not a consonant-vowel pair or vowel.
if char_type == VOWEL or char_type == CV or self.active_xvowel is None:
return
self._set_char(self.active_xvowel, XVOWEL)
self.active_xvowel = None
self.active_xvowel_info = None
def _add_unknown_char(self, string):
'''
Adds an unknown character to the stack.
'''
if self.has_xvowel:
# Ensure an xvowel gets printed if we've got an active
# one right now.
self._promote_solitary_xvowel()
self.unknown_char = string
self._flush_char()
def _set_digraph_a(self, char):
'''
Sets the currently active character, in case it is (potentially)
the first part of a digraph.
'''
self._set_char(char, CV)
self.active_dgr_a_info = di_a_lt[char]
def _set_digraph_b(self, char):
'''
Sets the second part of a digraph.
'''
self.has_digraph_b = True
# Change the active vowel to the one provided by the second part
# of the digraph.
self.active_vowel_ro = di_b_lt[char][0]
self.active_dgr_b_info = di_b_lt[char]
def _char_lookup(self, char):
'''
Retrieves a character's info from the lookup table.
'''
return kana_lt[char]
def _char_ro_cons(self, char_info, type):
'''
Returns the consonant part of a character in rōmaji.
'''
if type == CV:
return char_info[1]
return None
def _char_ro_vowel(self, char_info, type):
'''
Returns the vowel part of a character in rōmaji.
'''
if type == CV:
return char_info[3]
if type == VOWEL or type == XVOWEL:
return char_info[0]
return None
def _set_char(self, char, type):
'''
Sets the currently active character, e.g. ト. We save some information
about the character as well. active_char_info contains the full
tuple of rōmaji info, and active_ro_vowel contains e.g. 'o' for ト.
We also set the character type: either a consonant-vowel pair
or a vowel. This affects the way the character is flushed later.
'''
self.next_char_info = self._char_lookup(char)
self.next_char_type = type
self._flush_char()
self.active_char = char
self.active_char_type = type
self.active_char_info = self._char_lookup(char)
self.active_vowel_ro = self._char_ro_vowel(self.active_char_info, type)
def _is_long_vowel(self, vowel_ro_a, vowel_ro_b):
'''
Checks whether two rōmaji vowels combine to become a long vowel.
True for a + a, u + u, e + e, o + o, and o + u. The order of
arguments matters for the o + u combination.
'''
return (vowel_ro_a, vowel_ro_b) in lv_combinations
def _set_vowel(self, vowel):
'''
Sets the currently active vowel, e.g. ア.
Vowels act slightly differently from other characters. If one
succeeds the same vowel (or consonant-vowel pair with the same vowel)
then it acts like a long vowel marker. E.g. おねえ becomes onē.
Hence, either we increment the long vowel marker count, or we
flush the current character and set the active character to this.
In some cases, the ウ becomes a consonant-vowel if it's
paired with a small vowel. We will not know this until we see
what comes after the ウ, so there's some backtracking
if that's the case.
'''
vowel_info = kana_lt[vowel]
vowel_ro = self.active_vowel_ro
if self._is_long_vowel(vowel_ro, vowel_info[0]):
# Check to see if the current vowel is ウ. If so,
# we might need to backtrack later on in case the 'u'
# turns into 'w' when ウ is coupled with a small vowel.
if vowel_ro == 'u':
self.has_u_lvm = True
self._inc_lvmarker()
else:
# Not the same, so flush the active character and continue.
self._set_char(vowel, VOWEL)
self.active_vowel_info = vowel_info
self.active_vowel = vowel
def _set_xvowel(self, xvowel):
'''
Sets the currently active small vowel, e.g. ァ.
If an active small vowel has already been set, the current character
must be flushed. (Double small vowels don't occur in dictionary
words.) After that, we'll set the current character to this small
vowel; in essence, it will act like a regular size vowel.
We'll check for digraphs too, just so e.g. しょ followed by ぉ acts
like a long vowel marker. This doesn't occur in dictionary words,
but it's the most sensible behavior for unusual input.
If the currently active character ends with the same vowel as this
small vowel, a long vowel marker is added instead.
E.g. テェ becomes 'tē'.
'''
xvowel_info = kana_lt[xvowel]
vowel_info = self.active_vowel_info
dgr_b_info = None
# Special case: if the currently active character is 'n', we must
# flush the character and set this small vowel as the active character.
# This is because small vowels cannot affect 'n' like regular
# consonant-vowel pairs.
curr_is_n = self.active_vowel_ro == 'n'
# Special case: if we've got an active vowel with special cases
# attached to it (only ウ), and the small vowel that follows it
# activates that special case, we may need to backtrack a bit.
# This is because ウ is normally 'u' but becomes 'w' if there's
# a small vowel right behind it (except the small 'u').
# The 'w' behaves totally different from a standard vowel.
if self.has_u_lvm and \
xvowel_info is not None and \
vowel_info is not None and \
len(vowel_info) > 2 and \
vowel_info[2].get('xv') is not None and \
vowel_info[2]['xv'].get(xvowel_info[0]) is not None:
# Decrement the long vowel marker, which was added on the
# assumption that the 'u' is a vowel.
self._dec_lvmarker()
# Save the current vowel. We'll flush the current character,
# without this vowel, and then set it again from a clean slate.
former_vowel = self.active_vowel
self.active_vowel_info = None
self._flush_char()
self._set_char(former_vowel, VOWEL)
if self.active_vowel_ro == xvowel_info[0]:
# We have an active character whose vowel is the same.
self._inc_lvmarker()
elif self.has_xvowel is True:
# We have an active small vowel already. Flush the current
# character and act as though the current small vowel
# is a regular vowel.
self._flush_char()
self._set_char(xvowel, XVOWEL)
return
elif self.has_digraph_b is True:
# We have an active digraph (two parts).
dgr_b_info = self.active_dgr_b_info
if curr_is_n:
self._set_char(xvowel, XVOWEL)
return
if dgr_b_info is not None:
if self._is_long_vowel(self.active_vowel_ro, dgr_b_info[0]) or \
self._is_long_vowel(self.active_dgr_b_info[0], dgr_b_info[0]):
# Same vowel as the one that's currently active.
self._inc_lvmarker()
else:
# Not the same, so flush the active character and continue.
self.active_vowel_ro = self.active_xvowel_info[0]
self._set_char(xvowel, XVOWEL)
else:
self.active_xvowel = xvowel
self.active_xvowel_info = xvowel_info
self.has_xvowel = True
def _inc_geminate(self):
'''
Increments the geminate marker count. Unless no active character
has been set, this causes the current character to be flushed.
'''
if self.active_char is not None:
self._flush_char()
self.geminate_count += 1
def _inc_lvmarker(self):
'''
Increments the long vowel marker count.
'''
# Ignore the long vowel marker in case it occurs before any
# characters that it can affect.
if self.active_char is None:
return
self.lvmarker_count += 1
def _dec_lvmarker(self):
'''
Decrements the long vowel marker count, unless it would become
a negative value.
'''
if self.lvmarker_count == 0:
return
self.lvmarker_count -= 1
def _postprocess_output(self, output):
'''
Performs the last modifications before the output is returned.
'''
# Replace long vowels with circumflex characters.
if self.vowel_style == CIRCUMFLEX_STYLE:
try:
output = output.translate(vowels_to_circumflexes)
except TypeError:
# Python 2 will error out here if there are no
# macron characters in the string to begin with.
pass
# Output the desired case.
if self.uppercase:
output = output.upper()
return output
def _flush_stack(self):
'''
Returns the final output and resets the machine's state.
'''
output = self._postprocess_output(''.join(self.stack))
self._clear_char()
self._empty_stack()
if not PYTHON_2:
return output
else:
return unicode(output)
def _preprocess_input(self, input):
'''
Preprocesses the input before it's split into a list.
'''
if not re.search(preprocess_chars, input):
# No characters that we need to preprocess, so continue without.
return input
input = self._add_punctuation_spacing(input)
return input
def _preprocess_chars(self, chars):
'''
Performs string preprocessing before the main conversion algorithm
is used. Simple string replacements (for example, fullwidth rōmaji
to regular rōmaji) are performed at this point.
'''
chars = self._normalize_dakuten(chars)
chars = self._process_repeaters(chars)
chars = self._perform_replacements(chars)
return chars
def _add_punctuation_spacing(self, input):
'''
Adds additional spacing to punctuation characters. For example,
this puts an extra space after a fullwidth full stop.
'''
for replacement in punct_spacing:
input = re.sub(replacement[0], replacement[1], input)
return input
def _perform_replacements(self, chars):
'''
Performs simple key/value string replacements that require no logic.
This is used to convert the fullwidth rōmaji, several ligatures,
and the punctuation characters.
'''
for n in range(len(chars)):
char = chars[n]
if char in repl:
chars[n] = repl[char]
# Some replacements might result in multi-character strings
# being inserted into the list. Ensure we still have a list
# of single characters for iteration.
return list(''.join(chars))
def _normalize_dakuten(self, chars):
'''
Replaces the dakuten and handakuten modifier character combinations
with single characters. For example, か\u3099か becomes がけ,
or は゜は becomes ぱは.
'''
prev = None
prev_n = None
# Set all repeater characters to 0 initially,
# then go through the list and remove them all.
for n in range(len(chars)):
char = chars[n]
if char in dkt:
chars[n] = 0
if prev in dkt_cvs:
chars[prev_n] = dkt_lt[prev]
if char in hdkt:
chars[n] = 0
if prev in hdkt_cvs:
chars[prev_n] = hdkt_lt[prev]
prev = char
prev_n = n
# Remove all 0 values. There should not be any other than the ones we
# just added. (This could use (0).__ne__, but that's Python 3 only.)
return list(filter(lambda x: x is not 0, chars))
def _process_repeaters(self, chars):
'''
Replace all repeater characters (e.g. turn サヾエ into サザエ).
'''
prev = None
for n in range(len(chars)):
char = chars[n]
if char in rpts:
# The character is a repeater.
chars[n] = prev
if char in drpts:
# The character is a repeater with dakuten.
# If the previous character can have a dakuten, add that
# to the stack; if not, just add whatever we had previously.
if prev in dkt_cvs:
chars[n] = dkt_lt[prev]
else:
chars[n] = prev
prev = char
return chars
def to_romaji(self, input):
'''
Converts kana input to rōmaji and returns the result.
'''
input = self._preprocess_input(input)
# Preprocess the input, making string replacements where needed.
chars = list(input)
chars = self._preprocess_chars(chars)
chars.append(END_CHAR)
for char in chars:
if char in di_a:
self._set_digraph_a(char)
continue
if char in di_b:
self._set_digraph_b(char)
continue
if char in cvs:
self._set_char(char, CV)
continue
if char in vowels:
self._set_vowel(char)
continue
if char in xvowels:
self._set_xvowel(char)
continue
if char in geminates:
self._inc_geminate()
continue
if char == lvmarker:
self._inc_lvmarker()
continue
if char == WORD_BORDER:
# When stumbling upon a word border, e.g. in ぬれ|えん,
# the current word has finished, meaning the character
# should be flushed.
self._flush_char()
continue
if char == END_CHAR:
self._promote_solitary_xvowel()
self._flush_char()
continue
# If we're still here, that means we've stumbled upon a character
# the machine can't deal with.
if self.unknown_strategy == UNKNOWN_DISCARD:
continue
if self.unknown_strategy == UNKNOWN_RAISE:
raise UnexpectedCharacterError
if self.unknown_strategy == UNKNOWN_INCLUDE:
# The default strategy.
self._add_unknown_char(char)
return self._flush_stack()
| 35.853687
| 79
| 0.613476
|
e560095578c4b4875967f12a2a1b9fbd717145e5
| 1,676
|
py
|
Python
|
btcmarketsapi/tests/package-test.py
|
SigmaAdvancedAnalytics/python-btcmarkets
|
7ae6bd79707d45a78ac4ebaba1fe6585aa8e7089
|
[
"MIT"
] | null | null | null |
btcmarketsapi/tests/package-test.py
|
SigmaAdvancedAnalytics/python-btcmarkets
|
7ae6bd79707d45a78ac4ebaba1fe6585aa8e7089
|
[
"MIT"
] | 4
|
2018-02-12T23:18:20.000Z
|
2018-02-12T23:21:27.000Z
|
btcmarketsapi/tests/package-test.py
|
SigmaAdvancedAnalytics/btcmarketsapi
|
7ae6bd79707d45a78ac4ebaba1fe6585aa8e7089
|
[
"MIT"
] | null | null | null |
# Dependent packages
import sys
import pprint as pp
#Local packages for testing
sys.path.append("../")
import config
from api import Client
api_key = config.api_key
private_key = config.private_key
client = Client(api_key, private_key)
instrument = 'ETH'
currency = 'AUD'
limit = 200
since_order = 0
since_trade = 0
orders = [610067267,610067457]
#Account balance
print('=== Account ==='*5)
response = client.account_balance()
pp.pprint(response)
# Trading Fee
print('=== Trading Fee ==='*5)
response = client.account_trading_fee(instrument,currency)
pp.pprint(response)
# Market data
print('=== Market Tick ==='*5)
response = client.market_tick(instrument,currency)
pp.pprint(response)
response2 = client.market_all_ticks(currency)
pp.pprint(response2)
# Orderbook
print('=== Orderbook ==='*5)
response = client.market_orderbook(instrument,currency)
pp.pprint(response)
# Market Trades
print('=== Market Trades ==='*5)
response = client.market_trades(instrument,currency)
pp.pprint(response)
# Order History
print('=== Order History ==='*5)
response = client.order_history(instrument,currency,limit,since_order)
pp.pprint(response)
# Trade History
print('=== Trade History ==='*5)
response = client.trade_history(instrument,currency,limit,since_trade)
pp.pprint(response)
# Detailed Order History
print('=== Detailed Order History ==='*5)
response = client.order_detail(orders)
pp.pprint(response)
print('=== Open Orders ==='*5)
response = client.open_orders(instrument,currency,limit,since_trade)
pp.pprint(response)
"""
#client.order_open(instrument, CURRENCY, 200, 00000000)
"""
| 22.346667
| 71
| 0.713604
|
2bc709a8fb406b4661a0c30ff26e29cc90a877ec
| 5,346
|
py
|
Python
|
tests/core/contracts/test_implicit_contract.py
|
99Kies/web3.py
|
b62f678f530dc5ea4aab4111f436ab479f63829c
|
[
"MIT"
] | 1
|
2021-03-25T11:14:57.000Z
|
2021-03-25T11:14:57.000Z
|
tests/core/contracts/test_implicit_contract.py
|
99Kies/web3.py
|
b62f678f530dc5ea4aab4111f436ab479f63829c
|
[
"MIT"
] | null | null | null |
tests/core/contracts/test_implicit_contract.py
|
99Kies/web3.py
|
b62f678f530dc5ea4aab4111f436ab479f63829c
|
[
"MIT"
] | null | null | null |
import pytest
from eth_utils import (
is_integer,
)
from web3.contract import (
ImplicitContract,
)
@pytest.fixture()
def math_contract(web3, MATH_ABI, MATH_CODE, MATH_RUNTIME, address_conversion_func):
# Deploy math contract
# NOTE Must use non-specialized contract factory or else deploy() doesn't work
MathContract = web3.eth.contract(
abi=MATH_ABI,
bytecode=MATH_CODE,
bytecode_runtime=MATH_RUNTIME,
)
tx_hash = MathContract.constructor().transact()
tx_receipt = web3.eth.waitForTransactionReceipt(tx_hash)
math_address = address_conversion_func(tx_receipt['contractAddress'])
# Return interactive contract instance at deployed address
# TODO Does parent class not implement 'deploy()' for a reason?
MathContract = web3.eth.contract(
abi=MATH_ABI,
bytecode=MATH_CODE,
bytecode_runtime=MATH_RUNTIME,
ContractFactoryClass=ImplicitContract,
)
with pytest.warns(DeprecationWarning, match='deprecated in favor of contract.caller'):
contract = MathContract(math_address)
assert contract.address == math_address
return contract
@pytest.fixture()
def get_transaction_count(web3):
def get_transaction_count(blocknum_or_label):
block = web3.eth.get_block(blocknum_or_label)
# Return the blocknum if we requested this via labels
# so we can directly query the block next time (using the same API call)
# Either way, return the number of transactions in the given block
if blocknum_or_label in ["pending", "latest", "earliest"]:
return block.number, len(block.transactions)
else:
return len(block.transactions)
return get_transaction_count
def test_implicitcontract_call_default(math_contract, get_transaction_count):
# When a function is called that defaults to call
blocknum, starting_txns = get_transaction_count("pending")
with pytest.warns(DeprecationWarning, match='deprecated in favor of classic contract syntax'):
start_count = math_contract.counter()
assert is_integer(start_count)
# Check that a call was made and not a transact
# (Auto-mining is enabled, so query by block number)
assert get_transaction_count(blocknum) == starting_txns
# Check that no blocks were mined
assert get_transaction_count("pending") == (blocknum, 0)
def test_implicitcontract_transact_default(web3, math_contract, get_transaction_count):
# Use to verify correct operation later on
with pytest.warns(DeprecationWarning, match='deprecated in favor of classic contract syntax'):
start_count = math_contract.counter()
assert is_integer(start_count) # Verify correct type
# When a function is called that defaults to transact
blocknum, starting_txns = get_transaction_count("pending")
with pytest.warns(DeprecationWarning,
match='deprecated in favor of classic contract syntax') as warnings:
math_contract.increment(transact={})
# Check that a transaction was made and not a call
assert math_contract.counter() - start_count == 1
# Check that the correct number of warnings are raised
assert len(warnings) == 2
# (Auto-mining is enabled, so query by block number)
assert get_transaction_count(blocknum) == starting_txns + 1
# Check that only one block was mined
assert get_transaction_count("pending") == (blocknum + 1, 0)
def test_implicitcontract_call_override(math_contract, get_transaction_count):
# When a function is called with transact override that defaults to call
blocknum, starting_txns = get_transaction_count("pending")
with pytest.warns(DeprecationWarning, match='deprecated in favor of classic contract syntax'):
math_contract.counter(transact={})
# Check that a transaction was made and not a call
# (Auto-mining is enabled, so query by block number)
assert get_transaction_count(blocknum) == starting_txns + 1
# Check that only one block was mined
assert get_transaction_count("pending") == (blocknum + 1, 0)
def test_implicitcontract_transact_override(math_contract, get_transaction_count):
# Use to verify correct operation later on
with pytest.warns(DeprecationWarning, match='deprecated in favor of classic contract syntax'):
start_count = math_contract.counter()
assert is_integer(start_count) # Verify correct type
# When a function is called with call override that defaults to transact
blocknum, starting_txns = get_transaction_count("pending")
with pytest.warns(DeprecationWarning,
match='deprecated in favor of classic contract syntax') as warnings:
math_contract.increment(call={})
# Check that a call was made and not a transact
assert math_contract.counter() - start_count == 0
assert len(warnings) == 2
# (Auto-mining is enabled, so query by block number)
assert get_transaction_count(blocknum) == starting_txns
# Check that no blocks were mined
assert get_transaction_count("pending") == (blocknum, 0)
def test_implicitcontract_deprecation_warning(math_contract):
with pytest.warns(DeprecationWarning, match='deprecated in favor of classic contract syntax'):
math_contract.counter(transact={})
| 44.55
| 98
| 0.728021
|
efdd3d05e7e2fb2da584cc82d5558dc5dbc8dbd6
| 889
|
py
|
Python
|
dds_web/web/root.py
|
zishanmirza/dds_web
|
dbcb92176951cf7589a558833e2c870e9e47e9df
|
[
"BSD-3-Clause"
] | null | null | null |
dds_web/web/root.py
|
zishanmirza/dds_web
|
dbcb92176951cf7589a558833e2c870e9e47e9df
|
[
"BSD-3-Clause"
] | 47
|
2020-02-04T15:20:12.000Z
|
2020-06-01T06:25:21.000Z
|
dds_web/web/root.py
|
zishanmirza/dds_web
|
dbcb92176951cf7589a558833e2c870e9e47e9df
|
[
"BSD-3-Clause"
] | null | null | null |
"""Global application routes.
Most of the app routes are in `dds_web/web/user.py`.
Here we have the routes that are not specific to a user.
"""
from flask import Blueprint, render_template, jsonify
from flask import current_app as app
from dds_web import forms
pages = Blueprint("pages", __name__)
@pages.route("/", methods=["GET"])
def home():
"""Home page."""
form = forms.LoginForm()
return render_template("home.html", form=form)
@pages.route("/policy", methods=["GET"])
def open_policy():
"""Show privacy policy."""
return render_template("policy.html")
@pages.route("/status")
def get_status():
"""Return a simple status message to confirm that the system is ready."""
return jsonify({"status": "ready"})
@app.errorhandler(404)
def page_not_found(e):
# note that we set the 404 status explicitly
return render_template("404.html"), 404
| 24.027027
| 77
| 0.697413
|
68b8197e3267bb31fbfa6630d1f818518039f036
| 2,140
|
py
|
Python
|
composer/models/gpt2/model.py
|
jbloxham/composer
|
6dd0a0f297cafb404333d6280a5344bcb7f3bee6
|
[
"Apache-2.0"
] | 2
|
2022-03-17T04:48:04.000Z
|
2022-03-20T09:06:19.000Z
|
composer/models/gpt2/model.py
|
Averylamp/composer
|
1afc56e9c207734aee75ff8c5b046fb55d928fb5
|
[
"Apache-2.0"
] | null | null | null |
composer/models/gpt2/model.py
|
Averylamp/composer
|
1afc56e9c207734aee75ff8c5b046fb55d928fb5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
from __future__ import annotations
from typing import TYPE_CHECKING, Mapping
import transformers
from torchmetrics.collections import MetricCollection
from composer.models.nlp_metrics import Perplexity
from composer.models.transformer_shared import MosaicTransformer
if TYPE_CHECKING:
from composer.core.types import Batch, Metrics, Tensors
class GPT2Model(MosaicTransformer):
"""Implements a GPT-2 wrapper around a MosaicTransformer.
See this `paper <https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf>`_
for details on the GPT-2 architecutre.
Args:
module (transformers.GPT2Model): The model to wrap with this module.
config (transformers.GPT2Config): The config for the model.
tokenizer_name (str): The name of the tokenizer used for tihs model,
necessary to assert required model inputs.
"""
def __init__(self, module: transformers.GPT2Model, config: transformers.GPT2Config, tokenizer_name: str) -> None:
super().__init__(
module=module, #type: ignore (thirdparty)
config=config,
tokenizer_name=tokenizer_name)
# If we ever have algorithms that modify the loss function, then this might be a bit inefficient
# because it'll compute the expensive softmax operation twice.
# Instead, we should consider figuring out how to leverage self.train_loss and return the e^self.train_loss.
# Of course, this also depends on the implementation details of algorithms.
self.train_perplexity = Perplexity()
self.val_perplexity = Perplexity()
def loss(self, outputs: Mapping, batch: Batch) -> Tensors:
if outputs.get('loss', None) is not None:
return outputs['loss']
else:
raise NotImplementedError('Calculating loss directly not supported yet.')
def metrics(self, train: bool = False) -> Metrics:
return MetricCollection([self.train_loss, self.train_perplexity]) if train else MetricCollection(
[self.val_loss, self.val_perplexity])
| 40.377358
| 117
| 0.71729
|
2dc757d1bf787a0def80127421f75a6593868d2a
| 961
|
py
|
Python
|
src/dictionary.py
|
potatoTVnet/Cryptography_lab
|
703139540c4f7f4463fc0af641e1a7f0f477d6b3
|
[
"MIT"
] | null | null | null |
src/dictionary.py
|
potatoTVnet/Cryptography_lab
|
703139540c4f7f4463fc0af641e1a7f0f477d6b3
|
[
"MIT"
] | null | null | null |
src/dictionary.py
|
potatoTVnet/Cryptography_lab
|
703139540c4f7f4463fc0af641e1a7f0f477d6b3
|
[
"MIT"
] | null | null | null |
from random import randint
# def read_file():
# f = open("files/words.txt", "r")
# return f.readlines()
#
#
# def random_word():
# words = read_file()
# random = randint(0, len(words))
# return words[random].replace("\n", "").lower()
# random_word()
class Dictionary:
wordarray = []
def __init__(self):
self.wordarray = self.read_file()
def read_file(self):
f = open("files/words.txt", "r")
array = f.readlines()
array2 = open("files/words2.txt").readlines()
for i in range(0, len(array)):
array[i] = array[i].replace("\n", "").lower()
for i in range(0, len(array2)):
array2[i] = array2[i].replace("\n", "").lower()
return array + array2
def random_word(self):
random = randint(0, len(self.wordarray))
return self.wordarray[random]
def contains_word(self, word: str):
return word.lower() in self.wordarray
| 23.439024
| 59
| 0.57128
|
32e8cfc9ff26d6cd8fd260a8960bf6abb716c1a2
| 7,040
|
py
|
Python
|
tool/utils.py
|
allenwu5/pytorch-YOLOv4
|
d0faa44ae563c835c59f0180707f6b946b4a97ac
|
[
"Apache-2.0"
] | null | null | null |
tool/utils.py
|
allenwu5/pytorch-YOLOv4
|
d0faa44ae563c835c59f0180707f6b946b4a97ac
|
[
"Apache-2.0"
] | null | null | null |
tool/utils.py
|
allenwu5/pytorch-YOLOv4
|
d0faa44ae563c835c59f0180707f6b946b4a97ac
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
import time
import math
import numpy as np
import itertools
import struct # get_image_size
import imghdr # get_image_size
def sigmoid(x):
return 1.0 / (np.exp(-x) + 1.)
def softmax(x):
x = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1))
x = x / np.expand_dims(x.sum(axis=1), axis=1)
return x
def bbox_iou(box1, box2, x1y1x2y2=True):
# print('iou box1:', box1)
# print('iou box2:', box2)
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
mx = min(box1[0], box2[0])
Mx = max(box1[0] + w1, box2[0] + w2)
my = min(box1[1], box2[1])
My = max(box1[1] + h1, box2[1] + h2)
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea / uarea
def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):
# print(boxes.shape)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = confs.argsort()[::-1]
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
xx1 = np.maximum(x1[idx_self], x1[idx_other])
yy1 = np.maximum(y1[idx_self], y1[idx_other])
xx2 = np.minimum(x2[idx_self], x2[idx_other])
yy2 = np.minimum(y2[idx_self], y2[idx_other])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
if min_mode:
over = inter / np.minimum(areas[order[0]], areas[order[1:]])
else:
over = inter / (areas[order[0]] + areas[order[1:]] - inter)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
def plot_boxes_cv2(img, boxes, savename, class_names=None, color=None):
import cv2
img = np.copy(img)
colors = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)
def get_color(c, x, max_val):
ratio = float(x) / max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return int(r * 255)
width = img.shape[1]
height = img.shape[0]
detect_result_txt = f'{savename}.txt'
detect_result_img = f'{savename}.jpg'
with open(detect_result_txt, 'w') as f:
for i in range(len(boxes)):
box = boxes[i]
x1 = int(box[0] * width)
y1 = int(box[1] * height)
x2 = int(box[2] * width)
y2 = int(box[3] * height)
if color:
rgb = color
else:
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
if color is None:
rgb = (red, green, blue)
img = cv2.putText(img, class_names[cls_id], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)
# object_class_index, center_x, center_y, width, height
f.write(f' '.join([str(j) for j in [cls_id, (box[0]+box[2])/2, (box[1]+box[3])/2, box[2]-box[0], box[3]-box[1]]]))
f.write('\n')
img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, 1)
if detect_result_img:
print("save plot results to %s" % detect_result_img)
cv2.imwrite(detect_result_img, img)
return img
def read_truths(lab_path):
if not os.path.exists(lab_path):
return np.array([])
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
truths = truths.reshape(truths.size / 5, 5) # to avoid single truth problem
return truths
else:
return np.array([])
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names
def post_processing(img, conf_thresh, nms_thresh, output, verbose=False):
# anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
# num_anchors = 9
# anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# strides = [8, 16, 32]
# anchor_step = len(anchors) // num_anchors
# [batch, num, 1, 4]
box_array = output[0]
# [batch, num, num_classes]
confs = output[1]
t1 = time.time()
if type(box_array).__name__ != 'ndarray':
box_array = box_array.cpu().detach().numpy()
confs = confs.cpu().detach().numpy()
num_classes = confs.shape[2]
# [batch, num, 4]
box_array = box_array[:, :, 0]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
t2 = time.time()
bboxes_batch = []
for i in range(box_array.shape[0]):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
bboxes = []
# nms for each class
for j in range(num_classes):
cls_argwhere = l_max_id == j
ll_box_array = l_box_array[cls_argwhere, :]
ll_max_conf = l_max_conf[cls_argwhere]
ll_max_id = l_max_id[cls_argwhere]
keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)
if (keep.size > 0):
ll_box_array = ll_box_array[keep, :]
ll_max_conf = ll_max_conf[keep]
ll_max_id = ll_max_id[keep]
for k in range(ll_box_array.shape[0]):
bboxes.append([ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3], ll_max_conf[k], ll_max_conf[k], ll_max_id[k]])
bboxes_batch.append(bboxes)
t3 = time.time()
if verbose:
print('-----------------------------------')
print(' max and argmax : %f' % (t2 - t1))
print(' nms : %f' % (t3 - t2))
print('Post processing total : %f' % (t3 - t1))
print('-----------------------------------')
return bboxes_batch
| 28.971193
| 161
| 0.520597
|
e765fc1d9a78feb9e5ca4f3504ca4933e01f31f0
| 4,661
|
py
|
Python
|
train.py
|
vvvm23/denoising-diffusion-pytorch
|
50755ca523ae1e9a5238e2de86bccb983c52378e
|
[
"MIT"
] | 102
|
2020-06-25T06:25:54.000Z
|
2022-03-31T13:47:18.000Z
|
train.py
|
vvvm23/denoising-diffusion-pytorch
|
50755ca523ae1e9a5238e2de86bccb983c52378e
|
[
"MIT"
] | 6
|
2020-08-09T18:48:55.000Z
|
2022-03-12T01:36:06.000Z
|
train.py
|
vvvm23/denoising-diffusion-pytorch
|
50755ca523ae1e9a5238e2de86bccb983c52378e
|
[
"MIT"
] | 17
|
2020-07-17T07:16:26.000Z
|
2022-03-24T13:19:50.000Z
|
import os
import torch
from torch import nn, optim
from torch.utils import data
from torchvision import transforms
from tensorfn import load_arg_config, load_wandb
from tensorfn import distributed as dist
from tensorfn.optim import lr_scheduler
from tqdm import tqdm
from model import UNet
from diffusion import GaussianDiffusion, make_beta_schedule
from dataset import MultiResolutionDataset
from config import DiffusionConfig
def sample_data(loader):
loader_iter = iter(loader)
epoch = 0
while True:
try:
yield epoch, next(loader_iter)
except StopIteration:
epoch += 1
loader_iter = iter(loader)
yield epoch, next(loader_iter)
def accumulate(model1, model2, decay=0.9999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
def train(conf, loader, model, ema, diffusion, optimizer, scheduler, device, wandb):
loader = sample_data(loader)
pbar = range(conf.training.n_iter + 1)
if dist.is_primary():
pbar = tqdm(pbar, dynamic_ncols=True)
for i in pbar:
epoch, img = next(loader)
img = img.to(device)
time = torch.randint(
0,
conf.diffusion.beta_schedule["n_timestep"],
(img.shape[0],),
device=device,
)
loss = diffusion.p_loss(model, img, time)
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1)
scheduler.step()
optimizer.step()
accumulate(
ema, model.module, 0 if i < conf.training.scheduler.warmup else 0.9999
)
if dist.is_primary():
lr = optimizer.param_groups[0]["lr"]
pbar.set_description(
f"epoch: {epoch}; loss: {loss.item():.4f}; lr: {lr:.5f}"
)
if wandb is not None and i % conf.evaluate.log_every == 0:
wandb.log({"epoch": epoch, "loss": loss.item(), "lr": lr}, step=i)
if i % conf.evaluate.save_every == 0:
if conf.distributed:
model_module = model.module
else:
model_module = model
torch.save(
{
"model": model_module.state_dict(),
"ema": ema.state_dict(),
"scheduler": scheduler.state_dict(),
"optimizer": optimizer.state_dict(),
"conf": conf,
},
f"checkpoint/diffusion_{str(i).zfill(6)}.pt",
)
def main(conf):
wandb = None
if dist.is_primary() and conf.evaluate.wandb:
wandb = load_wandb()
wandb.init(project="denoising diffusion")
device = "cuda"
beta_schedule = "linear"
conf.distributed = dist.get_world_size() > 1
transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),
]
)
train_set = MultiResolutionDataset(
conf.dataset.path, transform, conf.dataset.resolution
)
train_sampler = dist.data_sampler(
train_set, shuffle=True, distributed=conf.distributed
)
train_loader = conf.training.dataloader.make(train_set, sampler=train_sampler)
model = conf.model.make()
model = model.to(device)
ema = conf.model.make()
ema = ema.to(device)
if conf.distributed:
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[dist.get_local_rank()],
output_device=dist.get_local_rank(),
)
optimizer = conf.training.optimizer.make(model.parameters())
scheduler = conf.training.scheduler.make(optimizer)
if conf.ckpt is not None:
ckpt = torch.load(conf.ckpt, map_location=lambda storage, loc: storage)
if conf.distributed:
model.module.load_state_dict(ckpt["model"])
else:
model.load_state_dict(ckpt["model"])
ema.load_state_dict(ckpt["ema"])
betas = conf.diffusion.beta_schedule.make()
diffusion = GaussianDiffusion(betas).to(device)
train(
conf, train_loader, model, ema, diffusion, optimizer, scheduler, device, wandb
)
if __name__ == "__main__":
conf = load_arg_config(DiffusionConfig)
dist.launch(
main, conf.n_gpu, conf.n_machine, conf.machine_rank, conf.dist_url, args=(conf,)
)
| 28.420732
| 88
| 0.596224
|
9dac6135000923d949d78dac072bbe3c8cbdba17
| 728
|
py
|
Python
|
python/runScript/main.py
|
sourabharvikar3/samplePrograms
|
d78278a45fcd20d966d2e7da6db2888f3681e93b
|
[
"MIT"
] | null | null | null |
python/runScript/main.py
|
sourabharvikar3/samplePrograms
|
d78278a45fcd20d966d2e7da6db2888f3681e93b
|
[
"MIT"
] | null | null | null |
python/runScript/main.py
|
sourabharvikar3/samplePrograms
|
d78278a45fcd20d966d2e7da6db2888f3681e93b
|
[
"MIT"
] | null | null | null |
import os
import subprocess, sys
target_path = r"C:\Users\sarvikar\Documents\fileSet\script"
def RunScript(script_file):
p = subprocess.Popen("cmd.exe Unblock-File " + script_file)
p = subprocess.Popen("powershell.exe -ExecutionPolicy UnRestricted " + script_file, stdout=sys.stdout)
p.communicate()
def EnumAndExecutePS():
try:
for file_name in os.listdir(target_path):
current_file_path = target_path + "\\" + file_name
if os.path.isfile(current_file_path) and file_name.endswith(".ps1"):
RunScript(current_file_path)
except Exception as e:
print(str(e))
def main():
while(1):
EnumAndExecutePS()
if __name__ == "__main__":
main()
| 29.12
| 106
| 0.668956
|
bb3ad746fb7ac15a5518e094b81daf1d22f369fb
| 1,606
|
py
|
Python
|
purity_fb/purity_fb_1dot1/__init__.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 5
|
2017-09-08T20:47:22.000Z
|
2021-06-29T02:11:05.000Z
|
purity_fb/purity_fb_1dot1/__init__.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 16
|
2017-11-27T20:57:48.000Z
|
2021-11-23T18:46:43.000Z
|
purity_fb/purity_fb_1dot1/__init__.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 22
|
2017-10-13T15:33:05.000Z
|
2021-11-08T19:56:21.000Z
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.1 Python SDK
Pure Storage FlashBlade REST 1.1 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.1
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into sdk package
from .models.error_response import ErrorResponse
from .models.file_system import FileSystem
from .models.file_system_response import FileSystemResponse
from .models.file_system_snapshot import FileSystemSnapshot
from .models.file_system_snapshot_response import FileSystemSnapshotResponse
from .models.login_response import LoginResponse
from .models.nfs_rule import NfsRule
from .models.object_response import ObjectResponse
from .models.pagination_info import PaginationInfo
from .models.protocol_rule import ProtocolRule
from .models.pure_error import PureError
from .models.pure_object import PureObject
from .models.snapshot_suffix import SnapshotSuffix
from .models.space import Space
from .models.version_response import VersionResponse
# import apis into sdk package
from .apis.authentication_api import AuthenticationApi
from .apis.file_system_snapshots_api import FileSystemSnapshotsApi
from .apis.file_systems_api import FileSystemsApi
from .apis.version_api import VersionApi
# import ApiClient
from .api_client import ApiClient
from .configuration import Configuration
configuration = Configuration()
| 35.688889
| 204
| 0.828767
|
2bfcb1ad3f7385fb96dedb0add23f1ca0b60ad54
| 9,308
|
py
|
Python
|
lbry/wallet/bip32.py
|
nishp77/lbry-sdk
|
7531401623a393a1491e3b65de0e2a65f8e45020
|
[
"MIT"
] | 2
|
2021-12-24T18:29:49.000Z
|
2021-12-26T02:04:57.000Z
|
lbry/wallet/bip32.py
|
nishp77/lbry-sdk
|
7531401623a393a1491e3b65de0e2a65f8e45020
|
[
"MIT"
] | null | null | null |
lbry/wallet/bip32.py
|
nishp77/lbry-sdk
|
7531401623a393a1491e3b65de0e2a65f8e45020
|
[
"MIT"
] | 1
|
2022-03-10T20:37:31.000Z
|
2022-03-10T20:37:31.000Z
|
from coincurve import PublicKey, PrivateKey as _PrivateKey
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
from lbry.crypto.base58 import Base58
from .util import cachedproperty
class DerivationError(Exception):
""" Raised when an invalid derivation occurs. """
class _KeyBase:
""" A BIP32 Key, public or private. """
def __init__(self, ledger, chain_code, n, depth, parent):
if not isinstance(chain_code, (bytes, bytearray)):
raise TypeError('chain code must be raw bytes')
if len(chain_code) != 32:
raise ValueError('invalid chain code')
if not 0 <= n < 1 << 32:
raise ValueError('invalid child number')
if not 0 <= depth < 256:
raise ValueError('invalid depth')
if parent is not None:
if not isinstance(parent, type(self)):
raise TypeError('parent key has bad type')
self.ledger = ledger
self.chain_code = chain_code
self.n = n
self.depth = depth
self.parent = parent
def _hmac_sha512(self, msg):
""" Use SHA-512 to provide an HMAC, returned as a pair of 32-byte objects. """
hmac = hmac_sha512(self.chain_code, msg)
return hmac[:32], hmac[32:]
def _extended_key(self, ver_bytes, raw_serkey):
""" Return the 78-byte extended key given prefix version bytes and serialized key bytes. """
if not isinstance(ver_bytes, (bytes, bytearray)):
raise TypeError('ver_bytes must be raw bytes')
if len(ver_bytes) != 4:
raise ValueError('ver_bytes must have length 4')
if not isinstance(raw_serkey, (bytes, bytearray)):
raise TypeError('raw_serkey must be raw bytes')
if len(raw_serkey) != 33:
raise ValueError('raw_serkey must have length 33')
return (
ver_bytes + bytes((self.depth,))
+ self.parent_fingerprint() + self.n.to_bytes(4, 'big')
+ self.chain_code + raw_serkey
)
def identifier(self):
raise NotImplementedError
def extended_key(self):
raise NotImplementedError
def fingerprint(self):
""" Return the key's fingerprint as 4 bytes. """
return self.identifier()[:4]
def parent_fingerprint(self):
""" Return the parent key's fingerprint as 4 bytes. """
return self.parent.fingerprint() if self.parent else bytes((0,)*4)
def extended_key_string(self):
""" Return an extended key as a base58 string. """
return Base58.encode_check(self.extended_key())
class PubKey(_KeyBase):
""" A BIP32 public key. """
def __init__(self, ledger, pubkey, chain_code, n, depth, parent=None):
super().__init__(ledger, chain_code, n, depth, parent)
if isinstance(pubkey, PublicKey):
self.verifying_key = pubkey
else:
self.verifying_key = self._verifying_key_from_pubkey(pubkey)
@classmethod
def _verifying_key_from_pubkey(cls, pubkey):
""" Converts a 33-byte compressed pubkey into an PublicKey object. """
if not isinstance(pubkey, (bytes, bytearray)):
raise TypeError('pubkey must be raw bytes')
if len(pubkey) != 33:
raise ValueError('pubkey must be 33 bytes')
if pubkey[0] not in (2, 3):
raise ValueError('invalid pubkey prefix byte')
return PublicKey(pubkey)
@cachedproperty
def pubkey_bytes(self):
""" Return the compressed public key as 33 bytes. """
return self.verifying_key.format(True)
@cachedproperty
def address(self):
""" The public key as a P2PKH address. """
return self.ledger.public_key_to_address(self.pubkey_bytes)
def ec_point(self):
return self.verifying_key.point()
def child(self, n: int):
""" Return the derived child extended pubkey at index N. """
if not 0 <= n < (1 << 31):
raise ValueError('invalid BIP32 public key child number')
msg = self.pubkey_bytes + n.to_bytes(4, 'big')
L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name
derived_key = self.verifying_key.add(L_b)
return PubKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
def identifier(self):
""" Return the key's identifier as 20 bytes. """
return hash160(self.pubkey_bytes)
def extended_key(self):
""" Return a raw extended public key. """
return self._extended_key(
self.ledger.extended_public_key_prefix,
self.pubkey_bytes
)
class PrivateKey(_KeyBase):
"""A BIP32 private key."""
HARDENED = 1 << 31
def __init__(self, ledger, privkey, chain_code, n, depth, parent=None):
super().__init__(ledger, chain_code, n, depth, parent)
if isinstance(privkey, _PrivateKey):
self.signing_key = privkey
else:
self.signing_key = self._signing_key_from_privkey(privkey)
@classmethod
def _signing_key_from_privkey(cls, private_key):
""" Converts a 32-byte private key into an coincurve.PrivateKey object. """
return _PrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
@classmethod
def _private_key_secret_exponent(cls, private_key):
""" Return the private key as a secret exponent if it is a valid private key. """
if not isinstance(private_key, (bytes, bytearray)):
raise TypeError('private key must be raw bytes')
if len(private_key) != 32:
raise ValueError('private key must be 32 bytes')
return int.from_bytes(private_key, 'big')
@classmethod
def from_seed(cls, ledger, seed):
# This hard-coded message string seems to be coin-independent...
hmac = hmac_sha512(b'Bitcoin seed', seed)
privkey, chain_code = hmac[:32], hmac[32:]
return cls(ledger, privkey, chain_code, 0, 0)
@cachedproperty
def private_key_bytes(self):
""" Return the serialized private key (no leading zero byte). """
return self.signing_key.secret
@cachedproperty
def public_key(self):
""" Return the corresponding extended public key. """
verifying_key = self.signing_key.public_key
parent_pubkey = self.parent.public_key if self.parent else None
return PubKey(self.ledger, verifying_key, self.chain_code, self.n, self.depth,
parent_pubkey)
def ec_point(self):
return self.public_key.ec_point()
def secret_exponent(self):
""" Return the private key as a secret exponent. """
return self.signing_key.to_int()
def wif(self):
""" Return the private key encoded in Wallet Import Format. """
return self.ledger.private_key_to_wif(self.private_key_bytes)
def address(self):
""" The public key as a P2PKH address. """
return self.public_key.address
def child(self, n):
""" Return the derived child extended private key at index N."""
if not 0 <= n < (1 << 32):
raise ValueError('invalid BIP32 private key child number')
if n >= self.HARDENED:
serkey = b'\0' + self.private_key_bytes
else:
serkey = self.public_key.pubkey_bytes
msg = serkey + n.to_bytes(4, 'big')
L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name
derived_key = self.signing_key.add(L_b)
return PrivateKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
def sign(self, data):
""" Produce a signature for piece of data by double hashing it and signing the hash. """
return self.signing_key.sign(data, hasher=double_sha256)
def identifier(self):
"""Return the key's identifier as 20 bytes."""
return self.public_key.identifier()
def extended_key(self):
"""Return a raw extended private key."""
return self._extended_key(
self.ledger.extended_private_key_prefix,
b'\0' + self.private_key_bytes
)
def _from_extended_key(ledger, ekey):
"""Return a PubKey or PrivateKey from an extended key raw bytes."""
if not isinstance(ekey, (bytes, bytearray)):
raise TypeError('extended key must be raw bytes')
if len(ekey) != 78:
raise ValueError('extended key must have length 78')
depth = ekey[4]
n = int.from_bytes(ekey[9:13], 'big')
chain_code = ekey[13:45]
if ekey[:4] == ledger.extended_public_key_prefix:
pubkey = ekey[45:]
key = PubKey(ledger, pubkey, chain_code, n, depth)
elif ekey[:4] == ledger.extended_private_key_prefix:
if ekey[45] != 0:
raise ValueError('invalid extended private key prefix byte')
privkey = ekey[46:]
key = PrivateKey(ledger, privkey, chain_code, n, depth)
else:
raise ValueError('version bytes unrecognised')
return key
def from_extended_key_string(ledger, ekey_str):
"""Given an extended key string, such as
xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd
3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL
return a PubKey or PrivateKey.
"""
return _from_extended_key(ledger, Base58.decode_check(ekey_str))
| 36.359375
| 100
| 0.639772
|
a03e9e0cafda730a96001181d8ede841ed12e78d
| 4,246
|
py
|
Python
|
plcapi.py
|
dreibh/planetlab-lxc-nodemanager
|
e3b9608c2e4184851f1fd2be7e449e62153789cf
|
[
"BSD-3-Clause"
] | null | null | null |
plcapi.py
|
dreibh/planetlab-lxc-nodemanager
|
e3b9608c2e4184851f1fd2be7e449e62153789cf
|
[
"BSD-3-Clause"
] | null | null | null |
plcapi.py
|
dreibh/planetlab-lxc-nodemanager
|
e3b9608c2e4184851f1fd2be7e449e62153789cf
|
[
"BSD-3-Clause"
] | null | null | null |
import safexmlrpc
import hmac
try:
from hashlib import sha1 as sha
except ImportError:
import sha
import logger
class PLCAPI:
"""
Wrapper around safexmlrpc.ServerProxy to automagically add an Auth
struct as the first argument to every XML-RPC call. Initialize
auth with either:
(node_id, key) => BootAuth
or
session => SessionAuth
To authenticate using the Boot Manager authentication method, or
the new session-based method, respectively.
"""
def __init__(self, uri, cacert, auth, timeout = 90, **kwds):
self.uri = uri
self.cacert = cacert
self.timeout = timeout
if isinstance(auth, (tuple, list)):
(self.node_id, self.key) = auth
self.session = None
elif isinstance(auth, str):
self.node_id = self.key = None
self.session = auth
else:
self.node_id = self.key = self.session = None
self.server = safexmlrpc.ServerProxy(self.uri, self.cacert, self.timeout, allow_none = 1, **kwds)
def update_session(self, f="/usr/boot/plnode.txt"):
# try authenticatipopulate /etc.planetlab/session
def plnode(key):
try:
return [i[:-1].split('=') for i in open(f).readlines() if i.startswith(key)][0][1].strip('"')
except:
return None
auth = (int(plnode("NODE_ID")), plnode("NODE_KEY"))
plc = PLCAPI(self.uri, self.cacert, auth, self.timeout)
open("/etc/planetlab/session", 'w').write(plc.GetSession().strip())
self.session = open("/etc/planetlab/session").read().strip()
def check_authentication(self):
authstatus = False
if self.key or self.session:
try:
authstatus = self.AuthCheck()
except:
logger.log_exc("plcapi: failed in plcapi.check_authentication")
return authstatus
def add_auth(self, function):
"""
Returns a wrapper which adds an Auth struct as the first
argument when the function is called.
"""
def canonicalize(args):
"""
BootAuth canonicalization method. Parameter values are
collected, sorted, converted to strings, then hashed with
the node key.
"""
values = []
for arg in args:
if isinstance(arg, list) or isinstance(arg, tuple):
# The old implementation did not recursively handle
# lists of lists. But neither did the old API itself.
values += canonicalize(arg)
elif isinstance(arg, dict):
# Yes, the comments in the old implementation are
# misleading. Keys of dicts are not included in the
# hash.
values += canonicalize(list(arg.values()))
else:
# We use unicode() instead of str().
values.append(str(arg))
return values
def wrapper(*params):
"""
Adds an Auth struct as the first argument when the
function is called.
"""
if self.session is not None:
# Use session authentication
auth = {'AuthMethod': "session",
'session': self.session}
else:
# Yes, this is the "canonicalization" method used.
args = canonicalize(params)
args.sort()
msg = "[" + "".join(args) + "]"
# We encode in UTF-8 before calculating the HMAC, which is
# an 8-bit algorithm.
digest = hmac.new(self.key, msg.encode('utf-8'), sha).hexdigest()
auth = {'AuthMethod': "hmac",
'node_id': self.node_id,
'value': digest}
# Automagically add auth struct to every call
params = (auth,) + params
return function(*params)
return wrapper
def __getattr__(self, methodname):
function = getattr(self.server, methodname)
return self.add_auth(function)
| 32.914729
| 109
| 0.545219
|
faf3a3b3fdc10a23c0c0612e6804ba0f7690b7a0
| 16,468
|
py
|
Python
|
lxml/tests/test_threading.py
|
2502302255/al789
|
20828306da7a301f4b287ec9c72eb2eae9273188
|
[
"MIT"
] | 28
|
2017-10-26T12:01:35.000Z
|
2021-01-01T09:32:46.000Z
|
lxml/tests/test_threading.py
|
2502302255/al789
|
20828306da7a301f4b287ec9c72eb2eae9273188
|
[
"MIT"
] | 1
|
2018-02-04T03:33:48.000Z
|
2018-05-08T22:30:01.000Z
|
lxml/tests/test_threading.py
|
2502302255/al789
|
20828306da7a301f4b287ec9c72eb2eae9273188
|
[
"MIT"
] | 1
|
2018-12-02T07:47:34.000Z
|
2018-12-02T07:47:34.000Z
|
# -*- coding: utf-8 -*-
"""
Tests for thread usage in lxml.etree.
"""
import re
import sys
import os.path
import unittest
import threading
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, HelperTestCase, BytesIO, _bytes
try:
from Queue import Queue
except ImportError:
from queue import Queue # Py3
class ThreadingTestCase(HelperTestCase):
"""Threading tests"""
etree = etree
def _run_thread(self, func):
thread = threading.Thread(target=func)
thread.start()
thread.join()
def _run_threads(self, count, func, main_func=None):
sync = threading.Event()
lock = threading.Lock()
counter = dict(started=0, finished=0, failed=0)
def sync_start(func):
with lock:
started = counter['started'] + 1
counter['started'] = started
if started < count + (main_func is not None):
sync.wait(4) # wait until the other threads have started up
assert sync.is_set()
sync.set() # all waiting => go!
try:
func()
except:
with lock:
counter['failed'] += 1
raise
else:
with lock:
counter['finished'] += 1
threads = [threading.Thread(target=sync_start, args=(func,)) for _ in range(count)]
for thread in threads:
thread.start()
if main_func is not None:
sync_start(main_func)
for thread in threads:
thread.join()
self.assertEqual(0, counter['failed'])
self.assertEqual(counter['finished'], counter['started'])
def test_subtree_copy_thread(self):
tostring = self.etree.tostring
XML = self.etree.XML
xml = _bytes("<root><threadtag/></root>")
main_root = XML(_bytes("<root/>"))
def run_thread():
thread_root = XML(xml)
main_root.append(thread_root[0])
del thread_root
self._run_thread(run_thread)
self.assertEqual(xml, tostring(main_root))
def test_main_xslt_in_thread(self):
XML = self.etree.XML
style = XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<foo><xsl:copy><xsl:value-of select="/a/b/text()" /></xsl:copy></foo>
</xsl:template>
</xsl:stylesheet>'''))
st = etree.XSLT(style)
result = []
def run_thread():
root = XML(_bytes('<a><b>B</b><c>C</c></a>'))
result.append( st(root) )
self._run_thread(run_thread)
self.assertEqual('''\
<?xml version="1.0"?>
<foo><a>B</a></foo>
''',
str(result[0]))
def test_thread_xslt(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(_bytes('<a><b>B</b><c>C</c></a>'))
def run_thread():
style = XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<foo><xsl:copy><xsl:value-of select="/a/b/text()" /></xsl:copy></foo>
</xsl:template>
</xsl:stylesheet>'''))
st = etree.XSLT(style)
root.append( st(root).getroot() )
self._run_thread(run_thread)
self.assertEqual(_bytes('<a><b>B</b><c>C</c><foo><a>B</a></foo></a>'),
tostring(root))
def test_thread_xslt_attr_replace(self):
# this is the only case in XSLT where the result tree can be
# modified in-place
XML = self.etree.XML
tostring = self.etree.tostring
style = self.etree.XSLT(XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<root class="abc">
<xsl:copy-of select="@class" />
<xsl:attribute name="class">xyz</xsl:attribute>
</root>
</xsl:template>
</xsl:stylesheet>''')))
result = []
def run_thread():
root = XML(_bytes('<ROOT class="ABC" />'))
result.append( style(root).getroot() )
self._run_thread(run_thread)
self.assertEqual(_bytes('<root class="xyz"/>'),
tostring(result[0]))
def test_thread_create_xslt(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(_bytes('<a><b>B</b><c>C</c></a>'))
stylesheets = []
def run_thread():
style = XML(_bytes('''\
<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output method="xml" />
<xsl:template match="/">
<div id="test">
<xsl:apply-templates/>
</div>
</xsl:template>
</xsl:stylesheet>'''))
stylesheets.append( etree.XSLT(style) )
self._run_thread(run_thread)
st = stylesheets[0]
result = tostring( st(root) )
self.assertEqual(_bytes('<div id="test">BC</div>'),
result)
def test_thread_error_log(self):
XML = self.etree.XML
ParseError = self.etree.ParseError
expected_error = [self.etree.ErrorTypes.ERR_TAG_NAME_MISMATCH]
children = "<a>test</a>" * 100
def parse_error_test(thread_no):
tag = "tag%d" % thread_no
xml = "<%s>%s</%s>" % (tag, children, tag.upper())
parser = self.etree.XMLParser()
for _ in range(10):
errors = None
try:
XML(xml, parser)
except self.etree.ParseError:
e = sys.exc_info()[1]
errors = e.error_log.filter_types(expected_error)
self.assertTrue(errors, "Expected error not found")
for error in errors:
self.assertTrue(
tag in error.message and tag.upper() in error.message,
"%s and %s not found in '%s'" % (
tag, tag.upper(), error.message))
self.etree.clear_error_log()
threads = []
for thread_no in range(1, 10):
t = threading.Thread(target=parse_error_test,
args=(thread_no,))
threads.append(t)
t.start()
parse_error_test(0)
for t in threads:
t.join()
def test_thread_mix(self):
XML = self.etree.XML
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
xml = _bytes('<a><b>B</b><c xmlns="test">C</c></a>')
root = XML(xml)
fragment = XML(_bytes("<other><tags/></other>"))
result = self.etree.Element("{myns}root", att = "someval")
def run_XML():
thread_root = XML(xml)
result.append(thread_root[0])
result.append(thread_root[-1])
def run_parse():
thread_root = self.etree.parse(BytesIO(xml)).getroot()
result.append(thread_root[0])
result.append(thread_root[-1])
def run_move_main():
result.append(fragment[0])
def run_build():
result.append(
Element("{myns}foo", attrib={'{test}attr':'val'}))
SubElement(result, "{otherns}tasty")
def run_xslt():
style = XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<xsl:copy><foo><xsl:value-of select="/a/b/text()" /></foo></xsl:copy>
</xsl:template>
</xsl:stylesheet>'''))
st = etree.XSLT(style)
result.append( st(root).getroot() )
for test in (run_XML, run_parse, run_move_main, run_xslt, run_build):
tostring(result)
self._run_thread(test)
self.assertEqual(
_bytes('<ns0:root xmlns:ns0="myns" att="someval"><b>B</b>'
'<c xmlns="test">C</c><b>B</b><c xmlns="test">C</c><tags/>'
'<a><foo>B</foo></a>'
'<ns0:foo xmlns:ns1="test" ns1:attr="val"/>'
'<ns1:tasty xmlns:ns1="otherns"/></ns0:root>'),
tostring(result))
def strip_first():
root = Element("newroot")
root.append(result[0])
while len(result):
self._run_thread(strip_first)
self.assertEqual(
_bytes('<ns0:root xmlns:ns0="myns" att="someval"/>'),
tostring(result))
def test_concurrent_attribute_names_in_dicts(self):
SubElement = self.etree.SubElement
names = list('abcdefghijklmnop')
runs_per_name = range(50)
result_matches = re.compile(
br'<thread_root>'
br'(?:<[a-p]{5} thread_attr_[a-p]="value" thread_attr2_[a-p]="value2"\s?/>)+'
br'</thread_root>').match
def testrun():
for _ in range(3):
root = self.etree.Element('thread_root')
for name in names:
tag_name = name * 5
new = []
for _ in runs_per_name:
el = SubElement(root, tag_name, {'thread_attr_' + name: 'value'})
new.append(el)
for el in new:
el.set('thread_attr2_' + name, 'value2')
s = etree.tostring(root)
self.assertTrue(result_matches(s))
# first, run only in sub-threads
self._run_threads(10, testrun)
# then, additionally include the main thread (and its parent dict)
self._run_threads(10, testrun, main_func=testrun)
def test_concurrent_proxies(self):
XML = self.etree.XML
root = XML(_bytes('<root><a>A</a><b xmlns="test">B</b><c/></root>'))
child_count = len(root)
def testrun():
for i in range(10000):
el = root[i%child_count]
del el
self._run_threads(10, testrun)
def test_concurrent_class_lookup(self):
XML = self.etree.XML
class TestElement(etree.ElementBase):
pass
class MyLookup(etree.CustomElementClassLookup):
repeat = range(100)
def lookup(self, t, d, ns, name):
count = 0
for i in self.repeat:
# allow other threads to run
count += 1
return TestElement
parser = self.etree.XMLParser()
parser.set_element_class_lookup(MyLookup())
root = XML(_bytes('<root><a>A</a><b xmlns="test">B</b><c/></root>'),
parser)
child_count = len(root)
def testrun():
for i in range(1000):
el = root[i%child_count]
del el
self._run_threads(10, testrun)
class ThreadPipelineTestCase(HelperTestCase):
"""Threading tests based on a thread worker pipeline.
"""
etree = etree
item_count = 40
class Worker(threading.Thread):
def __init__(self, in_queue, in_count, **kwargs):
threading.Thread.__init__(self)
self.in_queue = in_queue
self.in_count = in_count
self.out_queue = Queue(in_count)
self.__dict__.update(kwargs)
def run(self):
get, put = self.in_queue.get, self.out_queue.put
handle = self.handle
for _ in range(self.in_count):
put(handle(get()))
def handle(self, data):
raise NotImplementedError()
class ParseWorker(Worker):
def handle(self, xml, _fromstring=etree.fromstring):
return _fromstring(xml)
class RotateWorker(Worker):
def handle(self, element):
first = element[0]
element[:] = element[1:]
element.append(first)
return element
class ReverseWorker(Worker):
def handle(self, element):
element[:] = element[::-1]
return element
class ParseAndExtendWorker(Worker):
def handle(self, element, _fromstring=etree.fromstring):
element.extend(_fromstring(self.xml))
return element
class ParseAndInjectWorker(Worker):
def handle(self, element, _fromstring=etree.fromstring):
root = _fromstring(self.xml)
root.extend(element)
return root
class Validate(Worker):
def handle(self, element):
element.getroottree().docinfo.internalDTD.assertValid(element)
return element
class SerialiseWorker(Worker):
def handle(self, element):
return etree.tostring(element)
xml = (b'''\
<!DOCTYPE threadtest [
<!ELEMENT threadtest (thread-tag1,thread-tag2)+>
<!ATTLIST threadtest
version CDATA "1.0"
>
<!ELEMENT thread-tag1 EMPTY>
<!ELEMENT thread-tag2 (div)>
<!ELEMENT div (threaded)>
<!ATTLIST div
huhu CDATA #IMPLIED
>
<!ELEMENT threaded EMPTY>
<!ATTLIST threaded
host CDATA #REQUIRED
>
]>
<threadtest version="123">
''' + (b'''
<thread-tag1 />
<thread-tag2>
<div huhu="true">
<threaded host="here" />
</div>
</thread-tag2>
''') * 20 + b'''
</threadtest>''')
def _build_pipeline(self, item_count, *classes, **kwargs):
in_queue = Queue(item_count)
start = last = classes[0](in_queue, item_count, **kwargs)
start.setDaemon(True)
for worker_class in classes[1:]:
last = worker_class(last.out_queue, item_count, **kwargs)
last.setDaemon(True)
last.start()
return (in_queue, start, last)
def test_thread_pipeline_thread_parse(self):
item_count = self.item_count
xml = self.xml.replace(b'thread', b'THREAD') # use fresh tag names
# build and start the pipeline
in_queue, start, last = self._build_pipeline(
item_count,
self.ParseWorker,
self.RotateWorker,
self.ReverseWorker,
self.ParseAndExtendWorker,
self.Validate,
self.ParseAndInjectWorker,
self.SerialiseWorker,
xml=xml)
# fill the queue
put = start.in_queue.put
for _ in range(item_count):
put(xml)
# start the first thread and thus everything
start.start()
# make sure the last thread has terminated
last.join(60) # time out after 60 seconds
self.assertEqual(item_count, last.out_queue.qsize())
# read the results
get = last.out_queue.get
results = [get() for _ in range(item_count)]
comparison = results[0]
for i, result in enumerate(results[1:]):
self.assertEqual(comparison, result)
def test_thread_pipeline_global_parse(self):
item_count = self.item_count
xml = self.xml.replace(b'thread', b'GLOBAL') # use fresh tag names
XML = self.etree.XML
# build and start the pipeline
in_queue, start, last = self._build_pipeline(
item_count,
self.RotateWorker,
self.ReverseWorker,
self.ParseAndExtendWorker,
self.Validate,
self.SerialiseWorker,
xml=xml)
# fill the queue
put = start.in_queue.put
for _ in range(item_count):
put(XML(xml))
# start the first thread and thus everything
start.start()
# make sure the last thread has terminated
last.join(60) # time out after 90 seconds
self.assertEqual(item_count, last.out_queue.qsize())
# read the results
get = last.out_queue.get
results = [get() for _ in range(item_count)]
comparison = results[0]
for i, result in enumerate(results[1:]):
self.assertEqual(comparison, result)
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ThreadingTestCase)])
suite.addTests([unittest.makeSuite(ThreadPipelineTestCase)])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| 31.669231
| 91
| 0.549733
|
474642ad4496451cf9ae669774eaebc199db44c9
| 152
|
py
|
Python
|
hello_args.py
|
pdebuyl/cli01
|
6f8118806338aee6cfae6fc699614ee01b8f3436
|
[
"CC-BY-4.0"
] | null | null | null |
hello_args.py
|
pdebuyl/cli01
|
6f8118806338aee6cfae6fc699614ee01b8f3436
|
[
"CC-BY-4.0"
] | null | null | null |
hello_args.py
|
pdebuyl/cli01
|
6f8118806338aee6cfae6fc699614ee01b8f3436
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import print_function, division
import sys
print("Number of arguments:", len(sys.argv)-1)
print(*sys.argv[1:])
| 19
| 47
| 0.743421
|
db4872eb67556477b793bc928b673070796989dc
| 1,409
|
py
|
Python
|
env/Lib/site-packages/OpenGL/GLES2/ARM/shader_framebuffer_fetch.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 210
|
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
env/Lib/site-packages/OpenGL/GLES2/ARM/shader_framebuffer_fetch.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 72
|
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
env/Lib/site-packages/OpenGL/GLES2/ARM/shader_framebuffer_fetch.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 64
|
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
'''OpenGL extension ARM.shader_framebuffer_fetch
This module customises the behaviour of the
OpenGL.raw.GLES2.ARM.shader_framebuffer_fetch to provide a more
Python-friendly API
Overview (from the spec)
This extension enables fragment shaders to read existing framebuffer
data as input. This permits use-cases such as programmable blending,
and other operations that may not be possible to implement with
fixed-function blending.
This extension also adds the ability to indicate that a shader should
be run once per sample instead of once per pixel.
Reading framebuffer data as input in combination with multiple render
targets (MRT) may not be supported by all implementations. This
extension allows applications to query for this capability.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARM/shader_framebuffer_fetch.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ARM.shader_framebuffer_fetch import *
from OpenGL.raw.GLES2.ARM.shader_framebuffer_fetch import _EXTENSION_NAME
def glInitShaderFramebufferFetchARM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| 38.081081
| 73
| 0.814762
|
811af6f0010c700be17a5009d0dc946325f52353
| 477
|
py
|
Python
|
Python Crash Course by Eric Matthes/testing_code.py
|
Debo-Republic/Books-Exercises
|
d8d5971437fce77b3bbf15d340f5933b1826ccf0
|
[
"MIT"
] | null | null | null |
Python Crash Course by Eric Matthes/testing_code.py
|
Debo-Republic/Books-Exercises
|
d8d5971437fce77b3bbf15d340f5933b1826ccf0
|
[
"MIT"
] | null | null | null |
Python Crash Course by Eric Matthes/testing_code.py
|
Debo-Republic/Books-Exercises
|
d8d5971437fce77b3bbf15d340f5933b1826ccf0
|
[
"MIT"
] | null | null | null |
# Testing a name_function.py
from name_function import get_formatted_name
import unittest
class NameTestCase(unittest.TestCase):
"""Test to check the functioning of name_function.py"""
def test_first_last_name(self):
"""Do names like Debopriyo Bhowmick work ?"""
formatted_name = get_formatted_name('debopriyo', 'bhowmick')
self.assertEqual(formatted_name, 'Debopriyo Bhowmick')
if __name__ == '__main__':
unittest.main()
| 29.8125
| 69
| 0.706499
|
95e1c2c1cf16c43accf03026a8f7973a2a4583b2
| 4,862
|
py
|
Python
|
chrome/test/functional/webrtc_audio_call.py
|
GnorTech/chromium
|
e1c7731d5bd099ca5544fcf8eda3867d4ce5bab5
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2018-03-10T13:08:49.000Z
|
2018-03-10T13:08:49.000Z
|
chrome/test/functional/webrtc_audio_call.py
|
GnorTech/chromium
|
e1c7731d5bd099ca5544fcf8eda3867d4ce5bab5
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
chrome/test/functional/webrtc_audio_call.py
|
GnorTech/chromium
|
e1c7731d5bd099ca5544fcf8eda3867d4ce5bab5
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2020-11-04T07:19:31.000Z
|
2020-11-04T07:19:31.000Z
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import tempfile
import time
import media.audio_tools as audio_tools
# Note: pyauto_functional must come before pyauto.
import pyauto_functional
import pyauto
import webrtc_test_base
class WebrtcAudioCallTest(webrtc_test_base.WebrtcTestBase):
"""Test we can set up a WebRTC call and play audio through it."""
def setUp(self):
pyauto.PyUITest.setUp(self)
self.StartPeerConnectionServer()
def tearDown(self):
self.StopPeerConnectionServer()
pyauto.PyUITest.tearDown(self)
self.assertEquals('', self.CheckErrorsAndCrashes())
def testWebrtcAudioCallAndVerifyAudioIsPlaying(self):
"""Test that WebRTC is capable of transmitting at least some audio.
This test has some nontrivial prerequisites:
1. The target system must have an active microphone, it must be selected
as default input for the user that runs the test, and it must record a
certain minimum level of ambient noise (for instance server fans).
Verify that you are getting ambient noise in the microphone by either
recording it directly or checking your OS' microphone settings. Amplify
the microphone if the background noise is too low. The microphone should
capture noise consistently above 5% of its total range.
2. The target system must be configured to record its own input*.
* On Linux:
1. # sudo apt-get install pavucontrol
2. For the user who will run the test: # pavucontrol
3. In a separate terminal, # arecord dummy
4. In pavucontrol, go to the recording tab.
5. For the ALSA plug-in [aplay]: ALSA Capture from, change from <x> to
<Monitor of x>, where x is whatever your primary sound device is called.
6. Try launching chrome as the target user on the target machine, try
playing, say, a YouTube video, and record with # arecord -f dat mine.dat.
Verify the recording with aplay (should have recorded what you played
from chrome).
"""
self.assertTrue(self.IsLinux(), msg='Only supported on Linux.')
def CallWithAudio():
self._RunWebrtcCall(duration_seconds=5)
self._RecordAudioAndEnsureNotSilent(record_duration_seconds=10,
sound_producing_function=CallWithAudio)
def _RunWebrtcCall(self, duration_seconds):
self.LoadTestPageInTwoTabs()
# This sets up a audio-only call.
self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=0,
request_video=False))
self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=1,
request_video=False))
self.Connect('user_1', tab_index=0)
self.Connect('user_2', tab_index=1)
self.EstablishCall(from_tab_with_index=0, to_tab_with_index=1)
# Keep the call up while we detect audio.
time.sleep(duration_seconds)
# The hang-up will automatically propagate to the second tab.
self.HangUp(from_tab_with_index=0)
self.WaitUntilHangUpVerified(tab_index=1)
self.Disconnect(tab_index=0)
self.Disconnect(tab_index=1)
# Ensure we didn't miss any errors.
self.AssertNoFailures(tab_index=0)
self.AssertNoFailures(tab_index=1)
def _RecordAudioAndEnsureNotSilent(self, record_duration_seconds,
sound_producing_function):
_SIZE_OF_EMPTY_DAT_FILE_BYTES = 44
# The two temp files that will be potentially used in the test.
temp_file = None
file_no_silence = None
try:
temp_file = self._CreateTempFile()
record_thread = audio_tools.AudioRecorderThread(record_duration_seconds,
temp_file)
record_thread.start()
sound_producing_function()
record_thread.join()
if record_thread.error:
self.fail(record_thread.error)
file_no_silence = self._CreateTempFile()
audio_tools.RemoveSilence(temp_file, file_no_silence)
self.assertTrue(os.path.getsize(file_no_silence) >
_SIZE_OF_EMPTY_DAT_FILE_BYTES,
msg=('The test recorded only silence. Ensure your '
'machine is correctly configured for this test.'))
finally:
# Delete the temporary files used by the test.
if temp_file:
os.remove(temp_file)
if file_no_silence:
os.remove(file_no_silence)
def _CreateTempFile(self):
"""Returns an absolute path to an empty temp file."""
file_handle, path = tempfile.mkstemp(suffix='_webrtc.dat')
os.close(file_handle)
return path
if __name__ == '__main__':
pyauto_functional.Main()
| 37.984375
| 80
| 0.687783
|
8a79786df968187d21a7e3714fa454233a012122
| 748
|
py
|
Python
|
var/spack/repos/builtin/packages/r-teachingdemos/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/r-teachingdemos/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/r-teachingdemos/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RTeachingdemos(RPackage):
"""Demonstrations for Teaching and Learning.
Demonstration functions that can be used in a classroom to demonstrate
statistical concepts, or on your own to better understand the concepts
or the programming."""
cran = "TeachingDemos"
version('2.12', sha256='3e75405ce1affa406d6df85e06f96381412bc7a2810b25d8c81bfe64c4698644')
version('2.10', sha256='2ef4c2e36ba13e32f66000e84281a3616584c86b255bca8643ff3fe4f78ed704')
depends_on('r@2.10:', type=('build', 'run'))
| 34
| 94
| 0.762032
|
698c87179b3fe6ae062f392938caacbde14c19eb
| 524
|
py
|
Python
|
Arrays/108. Convert Sorted Array to Binary Search Tree.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | 1
|
2021-06-30T17:51:56.000Z
|
2021-06-30T17:51:56.000Z
|
Arrays/108. Convert Sorted Array to Binary Search Tree.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | null | null | null |
Arrays/108. Convert Sorted Array to Binary Search Tree.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | null | null | null |
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
def helper(left, right):
if left > right:
return None
# always choose left middle node as a root
p = (left + right) // 2
# preorder traversal: node -> left -> right
root = TreeNode(nums[p])
root.left = helper(left, p - 1)
root.right = helper(p + 1, right)
return root
return helper(0, len(nums) - 1)
| 30.823529
| 68
| 0.492366
|
41f0d0fc6445bf07e42649ccab688b570767a215
| 17
|
py
|
Python
|
testsuite/crlf.py
|
Zac-HD/pycodestyle
|
aa3417b6a51f5912e32d9c8c879e1b9dd660d5f8
|
[
"MIT"
] | 3,594
|
2016-02-23T07:13:52.000Z
|
2022-03-31T21:15:06.000Z
|
testsuite/crlf.py
|
Zac-HD/pycodestyle
|
aa3417b6a51f5912e32d9c8c879e1b9dd660d5f8
|
[
"MIT"
] | 581
|
2016-02-23T15:19:11.000Z
|
2022-03-31T23:47:20.000Z
|
testsuite/crlf.py
|
Zac-HD/pycodestyle
|
aa3417b6a51f5912e32d9c8c879e1b9dd660d5f8
|
[
"MIT"
] | 476
|
2016-02-25T01:27:27.000Z
|
2022-03-26T23:58:31.000Z
|
'''\
test
'''
| 4.25
| 5
| 0.235294
|
1063267f8f890f0b4cc14252148ee1dc90504beb
| 2,055
|
py
|
Python
|
docs/source/conf.py
|
DIYCharles/nanoleafapi
|
c1a71e1710f50280d4e18035a1f7d80bf24b26c9
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
DIYCharles/nanoleafapi
|
c1a71e1710f50280d4e18035a1f7d80bf24b26c9
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
DIYCharles/nanoleafapi
|
c1a71e1710f50280d4e18035a1f7d80bf24b26c9
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../../nanoleafapi'))
# -- Project information -----------------------------------------------------
project = 'nanoleafapi'
copyright = '2019, MylesMor'
author = 'MylesMor'
# The full version, including alpha/beta/rc tags
release = '1.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
master_doc = 'index'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 33.688525
| 79
| 0.675426
|
ce78b179be82885a76eaa774694b07264522353b
| 92
|
py
|
Python
|
tests/test_nightwalker.py
|
chineseluo/nightwalker
|
c7e9fb40401d632241b501d9c90f2e33522db51b
|
[
"Apache-2.0"
] | 1
|
2020-10-13T02:55:26.000Z
|
2020-10-13T02:55:26.000Z
|
tests/test_nightwalker.py
|
chineseluo/nightwalker
|
c7e9fb40401d632241b501d9c90f2e33522db51b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_nightwalker.py
|
chineseluo/nightwalker
|
c7e9fb40401d632241b501d9c90f2e33522db51b
|
[
"Apache-2.0"
] | null | null | null |
from nightwalker import __version__
def test_version():
assert __version__ == '0.1.0'
| 15.333333
| 35
| 0.728261
|
3dcf72672915cef1c60c9e6c6eb7181dc7273578
| 2,514
|
py
|
Python
|
tests/test_functionality.py
|
amogorkon/fuzzy
|
07779ea36c979224dbb6ebd87d19b57fd8e3ed9b
|
[
"MIT"
] | 47
|
2019-11-06T13:01:22.000Z
|
2022-03-24T07:48:48.000Z
|
tests/test_functionality.py
|
amogorkon/fuzzy
|
07779ea36c979224dbb6ebd87d19b57fd8e3ed9b
|
[
"MIT"
] | 19
|
2019-11-26T17:45:55.000Z
|
2022-02-28T08:44:01.000Z
|
tests/test_functionality.py
|
amogorkon/fuzzy
|
07779ea36c979224dbb6ebd87d19b57fd8e3ed9b
|
[
"MIT"
] | 16
|
2020-02-13T23:33:49.000Z
|
2022-03-20T13:13:04.000Z
|
"""
Functional test of the fuzzylogic lib 'fuzzy'.
"""
import os
import sys
here = os.path.split(os.path.abspath(os.path.dirname(__file__)))
src = os.path.join(here[0], "src")
sys.path.insert(0, src)
print(sys.path)
import unittest
from fuzzylogic.classes import Domain, Set
from fuzzylogic.functions import R, S, bounded_linear
from fuzzylogic.rules import rescale, weighted_sum
from numpy import array_equal
from pytest import fixture, raises
@fixture
def temp():
d = Domain("temperature", -100, 100, res=0.1) # in Celsius
d.cold = S(0, 15) # sic
d.hot = Set(R(10, 30)) # sic
d.warm = ~d.cold & ~d.hot
return d
@fixture
def simple():
d = Domain("simple", 0, 10)
d.low = S(0, 1)
d.high = R(8, 10)
return d
def test_array(simple):
assert array_equal(simple.low.array(), [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert array_equal(simple.high.array(), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 1.0])
assert (
len(simple.low.array()) == 11
) # unlike arrays and lists, upper boundary is INCLUDED
def test_value(temp):
assert temp(6) == {temp.cold: 0.6, temp.hot: 0, temp.warm: 0.4}
def test_rating():
"""Tom is surveying restaurants.
He doesn't need fancy logic but rather uses a simple approach
with weights.
He went into a small, dirty bar that served some
really good drink and food that wasn't nicely arranged but still
yummmy. He rates the different factors on a scale from 1 to 10,
uses a bounded_linear function to normalize over [0,1] and
passes both the weights (how much each aspect should weigh in total)
and the domain as parameters into weighted_sum.
However, he can't just use Domain(value) because that would return
a dict of memberships, instead he uses Domain.min(value) which
returns the minimum of all memberships no matter how many sets
there are. He creates a dict of membership values corresponding to
the weights and passes that into the parametrized weighted_sum func
as argument to get the final rating for this restaurant.
"""
R = Domain("rating", 1, 10, res=0.1)
R.norm = bounded_linear(1, 10)
weights = {"beverage": 0.3, "atmosphere": 0.2, "looks": 0.2, "taste": 0.3}
w_func = weighted_sum(weights=weights, target_d=R)
ratings = {
"beverage": R.min(9),
"atmosphere": R.min(5),
"looks": R.min(4),
"taste": R.min(8),
}
assert w_func(ratings) == 6.9
if __name__ == "__main__":
unittest.main()
| 29.928571
| 82
| 0.6607
|
8c960b29559773bfc7aaec1d020785b165c665dd
| 1,559
|
py
|
Python
|
Alignment/APEEstimation/python/TrackRefitter_38T_cff.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
Alignment/APEEstimation/python/TrackRefitter_38T_cff.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
Alignment/APEEstimation/python/TrackRefitter_38T_cff.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
from Configuration.Geometry.GeometryRecoDB_cff import *
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
from Configuration.StandardSequences.MagneticField_cff import *
from RecoVertex.BeamSpotProducer.BeamSpot_cfi import *
from RecoLocalTracker.SiStripRecHitConverter.StripCPEgeometric_cfi import *
TTRHBuilderGeometricAndTemplate = cms.ESProducer("TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string('StripCPEfromTrackAngle'), # cms.string('StripCPEgeometric'),
#StripCPE = cms.string('StripCPEgeometric'),
ComponentName = cms.string('WithGeometricAndTemplate'),
PixelCPE = cms.string('PixelCPEGeneric'),
#PixelCPE = cms.string('PixelCPETemplateReco'),
Matcher = cms.string('StandardMatcher'),
ComputeCoarseLocalPositionFromDisk = cms.bool(False)
)
from RecoTracker.TrackProducer.TrackRefitters_cff import *
TrackRefitterForApeEstimator = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone(
src = "MuSkim",
TrajectoryInEvent = True,
TTRHBuilder = "WithGeometricAndTemplate",
NavigationSchool = ''
)
TrackRefitterHighPurityForApeEstimator = TrackRefitterForApeEstimator.clone(
src = 'HighPuritySelector'
)
## FILTER for high purity tracks
import Alignment.APEEstimation.AlignmentTrackSelector_cff
HighPuritySelector = Alignment.APEEstimation.AlignmentTrackSelector_cff.HighPuritySelector
HighPuritySelector.src = 'MuSkim'
## SEQUENCE
RefitterHighPuritySequence = cms.Sequence(
offlineBeamSpot*
HighPuritySelector*
TrackRefitterForApeEstimator
)
| 31.816327
| 95
| 0.831944
|
07eecc9bf26d33f704ede95b7d019cecd9dc295a
| 3,127
|
py
|
Python
|
tests/models/validators/v2_2_1/jsd_d49f82923bc5dfda63adfd224e1a22f.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 32
|
2019-09-05T05:16:56.000Z
|
2022-03-22T09:50:38.000Z
|
tests/models/validators/v2_2_1/jsd_d49f82923bc5dfda63adfd224e1a22f.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 35
|
2019-09-07T18:58:54.000Z
|
2022-03-24T19:29:36.000Z
|
tests/models/validators/v2_2_1/jsd_d49f82923bc5dfda63adfd224e1a22f.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 18
|
2019-09-09T11:07:21.000Z
|
2022-03-25T08:49:59.000Z
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center GetTemplateVersions data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorD49F82923Bc5DfdA63ADfd224E1A22F(object):
"""GetTemplateVersions request schema definition."""
def __init__(self):
super(JSONSchemaValidatorD49F82923Bc5DfdA63ADfd224E1A22F, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"items": {
"properties": {
"composite": {
"type": "boolean"
},
"name": {
"type": "string"
},
"projectId": {
"type": "string"
},
"projectName": {
"type": "string"
},
"templateId": {
"type": "string"
},
"versionsInfo": {
"items": {
"properties": {
"description":
{
"type": "string"
},
"id": {
"type": "string"
},
"versionTime": {
"type": "number"
}
},
"type": "object"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 32.572917
| 82
| 0.554845
|
5d25524bc5f42fa46cc57013f72121ec3e73d780
| 731
|
py
|
Python
|
setup.py
|
akaihola/ipython_pytest
|
dcb0a78892b2dc94001c0abe5f176cdeada3b2f6
|
[
"BSD-3-Clause-Clear"
] | 35
|
2016-12-28T23:46:38.000Z
|
2022-02-09T22:48:37.000Z
|
setup.py
|
akaihola/ipython_pytest
|
dcb0a78892b2dc94001c0abe5f176cdeada3b2f6
|
[
"BSD-3-Clause-Clear"
] | 3
|
2017-01-30T10:01:05.000Z
|
2022-01-19T16:59:33.000Z
|
setup.py
|
akaihola/ipython_pytest
|
dcb0a78892b2dc94001c0abe5f176cdeada3b2f6
|
[
"BSD-3-Clause-Clear"
] | 8
|
2017-09-17T22:55:55.000Z
|
2021-01-02T15:24:36.000Z
|
from distutils.core import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name='ipython_pytest',
version='0.0.1',
author='Antti Kaihola',
author_email='antti.kaihola@eniram.fi',
py_modules=['ipython_pytest'],
url='https://github.com/akaihola/ipython_pytest',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.5',
'Operating System :: OS Independent'],
license='README.rst',
description='IPython extension to run pytest for the current cell.',
long_description=long_description,
)
| 30.458333
| 72
| 0.629275
|
8bdbe0653c7f14df9229b1f75521bc1b11f1bfde
| 48,969
|
py
|
Python
|
azurelinuxagent/ga/exthandlers.py
|
matkin-msft/jit_walinuxagent
|
ccd52807ab2c43548a4ef13d86ababeb7e3f8c1f
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/ga/exthandlers.py
|
matkin-msft/jit_walinuxagent
|
ccd52807ab2c43548a4ef13d86ababeb7e3f8c1f
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/ga/exthandlers.py
|
matkin-msft/jit_walinuxagent
|
ccd52807ab2c43548a4ef13d86ababeb7e3f8c1f
|
[
"Apache-2.0"
] | null | null | null |
# Microsoft Azure Linux Agent
#
# Copyright Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import glob
import json
import operator
import os
import os.path
import random
import re
import shutil
import stat
import subprocess
import time
import traceback
import zipfile
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.version as version
from azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_DELTA
from azurelinuxagent.common.event import add_event, WALAEventOperation, elapsed_milliseconds
from azurelinuxagent.common.exception import ExtensionError, ProtocolError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \
ExtensionStatus, \
ExtensionSubStatus, \
VMStatus, ExtHandler, \
get_properties, \
set_properties
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
from azurelinuxagent.common.utils.processutil import capture_from_process
from azurelinuxagent.common.protocol import get_protocol_util
from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION
# HandlerEnvironment.json schema version
HANDLER_ENVIRONMENT_VERSION = 1.0
EXTENSION_STATUS_ERROR = 'error'
VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning']
VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"]
HANDLER_PATTERN = "^([^-]+)-(\d+(?:\.\d+)*)"
HANDLER_NAME_PATTERN = re.compile(HANDLER_PATTERN+"$", re.IGNORECASE)
HANDLER_PKG_EXT = ".zip"
HANDLER_PKG_PATTERN = re.compile(HANDLER_PATTERN + r"\.zip$", re.IGNORECASE)
def validate_has_key(obj, key, fullname):
if key not in obj:
raise ExtensionError("Missing: {0}".format(fullname))
def validate_in_range(val, valid_range, name):
if val not in valid_range:
raise ExtensionError("Invalid {0}: {1}".format(name, val))
def parse_formatted_message(formatted_message):
if formatted_message is None:
return None
validate_has_key(formatted_message, 'lang', 'formattedMessage/lang')
validate_has_key(formatted_message, 'message', 'formattedMessage/message')
return formatted_message.get('message')
def parse_ext_substatus(substatus):
# Check extension sub status format
validate_has_key(substatus, 'status', 'substatus/status')
validate_in_range(substatus['status'], VALID_EXTENSION_STATUS,
'substatus/status')
status = ExtensionSubStatus()
status.name = substatus.get('name')
status.status = substatus.get('status')
status.code = substatus.get('code', 0)
formatted_message = substatus.get('formattedMessage')
status.message = parse_formatted_message(formatted_message)
return status
def parse_ext_status(ext_status, data):
if data is None or len(data) is None:
return
# Currently, only the first status will be reported
data = data[0]
# Check extension status format
validate_has_key(data, 'status', 'status')
status_data = data['status']
validate_has_key(status_data, 'status', 'status/status')
status = status_data['status']
if status not in VALID_EXTENSION_STATUS:
status = EXTENSION_STATUS_ERROR
applied_time = status_data.get('configurationAppliedTime')
ext_status.configurationAppliedTime = applied_time
ext_status.operation = status_data.get('operation')
ext_status.status = status
ext_status.code = status_data.get('code', 0)
formatted_message = status_data.get('formattedMessage')
ext_status.message = parse_formatted_message(formatted_message)
substatus_list = status_data.get('substatus')
if substatus_list is None:
return
for substatus in substatus_list:
if substatus is not None:
ext_status.substatusList.append(parse_ext_substatus(substatus))
def migrate_handler_state():
"""
Migrate handler state and status (if they exist) from an agent-owned directory into the
handler-owned config directory
Notes:
- The v2.0.x branch wrote all handler-related state into the handler-owned config
directory (e.g., /var/lib/waagent/Microsoft.Azure.Extensions.LinuxAsm-2.0.1/config).
- The v2.1.x branch original moved that state into an agent-owned handler
state directory (e.g., /var/lib/waagent/handler_state).
- This move can cause v2.1.x agents to multiply invoke a handler's install command. It also makes
clean-up more difficult since the agent must remove the state as well as the handler directory.
"""
handler_state_path = os.path.join(conf.get_lib_dir(), "handler_state")
if not os.path.isdir(handler_state_path):
return
for handler_path in glob.iglob(os.path.join(handler_state_path, "*")):
handler = os.path.basename(handler_path)
handler_config_path = os.path.join(conf.get_lib_dir(), handler, "config")
if os.path.isdir(handler_config_path):
for file in ("State", "Status"):
from_path = os.path.join(handler_state_path, handler, file.lower())
to_path = os.path.join(handler_config_path, "Handler" + file)
if os.path.isfile(from_path) and not os.path.isfile(to_path):
try:
shutil.move(from_path, to_path)
except Exception as e:
logger.warn(
"Exception occurred migrating {0} {1} file: {2}",
handler,
file,
str(e))
try:
shutil.rmtree(handler_state_path)
except Exception as e:
logger.warn("Exception occurred removing {0}: {1}", handler_state_path, str(e))
return
class ExtHandlerState(object):
NotInstalled = "NotInstalled"
Installed = "Installed"
Enabled = "Enabled"
def get_exthandlers_handler():
return ExtHandlersHandler()
class ExtHandlersHandler(object):
def __init__(self):
self.protocol_util = get_protocol_util()
self.protocol = None
self.ext_handlers = None
self.last_etag = None
self.last_upgrade_guids = {}
self.log_report = False
self.log_etag = True
self.log_process = False
self.report_status_error_state = ErrorState()
def run(self):
self.ext_handlers, etag = None, None
try:
self.protocol = self.protocol_util.get_protocol()
self.ext_handlers, etag = self.protocol.get_ext_handlers()
except Exception as e:
msg = u"Exception retrieving extension handlers: {0}".format(
ustr(e))
logger.warn(msg)
add_event(AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ExtensionProcessing,
is_success=False,
message=msg)
return
try:
msg = u"Handle extensions updates for incarnation {0}".format(etag)
logger.verbose(msg)
# Log status report success on new config
self.log_report = True
self.handle_ext_handlers(etag)
self.last_etag = etag
self.report_ext_handlers_status()
self.cleanup_outdated_handlers()
except Exception as e:
msg = u"Exception processing extension handlers: {0}".format(
ustr(e))
logger.warn(msg)
add_event(AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ExtensionProcessing,
is_success=False,
message=msg)
return
def run_status(self):
self.report_ext_handlers_status()
return
def get_upgrade_guid(self, name):
return self.last_upgrade_guids.get(name, (None, False))[0]
def get_log_upgrade_guid(self, ext_handler):
return self.last_upgrade_guids.get(ext_handler.name, (None, False))[1]
def set_log_upgrade_guid(self, ext_handler, log_val):
guid = self.get_upgrade_guid(ext_handler.name)
if guid is not None:
self.last_upgrade_guids[ext_handler.name] = (guid, log_val)
def is_new_guid(self, ext_handler):
last_guid = self.get_upgrade_guid(ext_handler.name)
if last_guid is None:
return True
return last_guid != ext_handler.properties.upgradeGuid
def cleanup_outdated_handlers(self):
handlers = []
pkgs = []
# Build a collection of uninstalled handlers and orphaned packages
# Note:
# -- An orphaned package is one without a corresponding handler
# directory
for item in os.listdir(conf.get_lib_dir()):
path = os.path.join(conf.get_lib_dir(), item)
if version.is_agent_package(path) or version.is_agent_path(path):
continue
if os.path.isdir(path):
if re.match(HANDLER_NAME_PATTERN, item) is None:
continue
try:
eh = ExtHandler()
separator = item.rfind('-')
eh.name = item[0:separator]
eh.properties.version = str(FlexibleVersion(item[separator+1:]))
handler = ExtHandlerInstance(eh, self.protocol)
except Exception:
continue
if handler.get_handler_state() != ExtHandlerState.NotInstalled:
continue
handlers.append(handler)
elif os.path.isfile(path) and \
not os.path.isdir(path[0:-len(HANDLER_PKG_EXT)]):
if not re.match(HANDLER_PKG_PATTERN, item):
continue
pkgs.append(path)
# Then, remove the orphaned packages
for pkg in pkgs:
try:
os.remove(pkg)
logger.verbose("Removed orphaned extension package {0}".format(pkg))
except OSError as e:
logger.warn("Failed to remove orphaned package {0}: {1}".format(pkg, e.strerror))
# Finally, remove the directories and packages of the
# uninstalled handlers
for handler in handlers:
handler.rm_ext_handler_dir()
pkg = os.path.join(conf.get_lib_dir(), handler.get_full_name() + HANDLER_PKG_EXT)
if os.path.isfile(pkg):
try:
os.remove(pkg)
logger.verbose("Removed extension package {0}".format(pkg))
except OSError as e:
logger.warn("Failed to remove extension package {0}: {1}".format(pkg, e.strerror))
def handle_ext_handlers(self, etag=None):
if self.ext_handlers.extHandlers is None or \
len(self.ext_handlers.extHandlers) == 0:
logger.verbose("No extension handler config found")
return
if conf.get_enable_overprovisioning():
artifacts_profile = self.protocol.get_artifacts_profile()
if artifacts_profile and artifacts_profile.is_on_hold():
logger.info("Extension handling is on hold")
return
self.ext_handlers.extHandlers.sort(key=operator.methodcaller('sort_key'))
for ext_handler in self.ext_handlers.extHandlers:
# TODO: handle install in sequence, enable in parallel
self.handle_ext_handler(ext_handler, etag)
def handle_ext_handler(self, ext_handler, etag):
ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol)
try:
state = ext_handler.properties.state
# The extension is to be enabled, there is an upgrade GUID
# and the GUID is NOT new
if state == u"enabled" and \
ext_handler.properties.upgradeGuid is not None and \
not self.is_new_guid(ext_handler):
ext_handler_i.ext_handler.properties.version = ext_handler_i.get_installed_version()
ext_handler_i.set_logger()
if self.last_etag != etag:
self.set_log_upgrade_guid(ext_handler, True)
msg = "New GUID is the same as the old GUID. Exiting without upgrading."
if self.get_log_upgrade_guid(ext_handler):
ext_handler_i.logger.info(msg)
self.set_log_upgrade_guid(ext_handler, False)
ext_handler_i.set_handler_state(ExtHandlerState.Enabled)
ext_handler_i.set_handler_status(status="Ready", message="No change")
ext_handler_i.set_operation(WALAEventOperation.SkipUpdate)
ext_handler_i.report_event(message=ustr(msg), is_success=True)
return
self.set_log_upgrade_guid(ext_handler, True)
ext_handler_i.decide_version(target_state=state)
if not ext_handler_i.is_upgrade and self.last_etag == etag:
if self.log_etag:
ext_handler_i.logger.verbose("Version {0} is current for etag {1}",
ext_handler_i.pkg.version,
etag)
self.log_etag = False
return
self.log_etag = True
ext_handler_i.logger.info("Target handler state: {0}", state)
if state == u"enabled":
self.handle_enable(ext_handler_i)
if ext_handler.properties.upgradeGuid is not None:
ext_handler_i.logger.info("New Upgrade GUID: {0}", ext_handler.properties.upgradeGuid)
self.last_upgrade_guids[ext_handler.name] = (ext_handler.properties.upgradeGuid, True)
elif state == u"disabled":
self.handle_disable(ext_handler_i)
# Remove the GUID from the dictionary so that it is upgraded upon re-enable
self.last_upgrade_guids.pop(ext_handler.name, None)
elif state == u"uninstall":
self.handle_uninstall(ext_handler_i)
# Remove the GUID from the dictionary so that it is upgraded upon re-install
self.last_upgrade_guids.pop(ext_handler.name, None)
else:
message = u"Unknown ext handler state:{0}".format(state)
raise ExtensionError(message)
except Exception as e:
ext_handler_i.set_handler_status(message=ustr(e), code=-1)
ext_handler_i.report_event(message=ustr(e), is_success=False)
def handle_enable(self, ext_handler_i):
self.log_process = True
old_ext_handler_i = ext_handler_i.get_installed_ext_handler()
if old_ext_handler_i is not None and \
old_ext_handler_i.version_gt(ext_handler_i):
msg = "Downgrade is not allowed. Skipping install and enable."
ext_handler_i.logger.error(msg)
ext_handler_i.set_operation(WALAEventOperation.Downgrade)
ext_handler_i.report_event(message=ustr(msg), is_success=True)
return
handler_state = ext_handler_i.get_handler_state()
ext_handler_i.logger.info("[Enable] current handler state is: {0}",
handler_state.lower())
if handler_state == ExtHandlerState.NotInstalled:
ext_handler_i.set_handler_state(ExtHandlerState.NotInstalled)
ext_handler_i.download()
ext_handler_i.update_settings()
if old_ext_handler_i is None:
ext_handler_i.install()
elif ext_handler_i.version_gt(old_ext_handler_i):
old_ext_handler_i.disable()
ext_handler_i.copy_status_files(old_ext_handler_i)
ext_handler_i.update()
old_ext_handler_i.uninstall()
old_ext_handler_i.rm_ext_handler_dir()
ext_handler_i.update_with_install()
else:
ext_handler_i.update_settings()
ext_handler_i.enable()
def handle_disable(self, ext_handler_i):
self.log_process = True
handler_state = ext_handler_i.get_handler_state()
ext_handler_i.logger.info("[Disable] current handler state is: {0}",
handler_state.lower())
if handler_state == ExtHandlerState.Enabled:
ext_handler_i.disable()
def handle_uninstall(self, ext_handler_i):
self.log_process = True
handler_state = ext_handler_i.get_handler_state()
ext_handler_i.logger.info("[Uninstall] current handler state is: {0}",
handler_state.lower())
if handler_state != ExtHandlerState.NotInstalled:
if handler_state == ExtHandlerState.Enabled:
ext_handler_i.disable()
ext_handler_i.uninstall()
ext_handler_i.rm_ext_handler_dir()
def report_ext_handlers_status(self):
"""
Go through handler_state dir, collect and report status
"""
vm_status = VMStatus(status="Ready", message="Guest Agent is running")
if self.ext_handlers is not None:
for ext_handler in self.ext_handlers.extHandlers:
try:
self.report_ext_handler_status(vm_status, ext_handler)
except ExtensionError as e:
add_event(
AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ExtensionProcessing,
is_success=False,
message=ustr(e))
logger.verbose("Report vm agent status")
try:
self.protocol.report_vm_status(vm_status)
if self.log_report:
logger.verbose("Completed vm agent status report")
self.report_status_error_state.reset()
except ProtocolError as e:
self.report_status_error_state.incr()
message = "Failed to report vm agent status: {0}".format(e)
add_event(AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ExtensionProcessing,
is_success=False,
message=message)
if self.report_status_error_state.is_triggered():
message = "Failed to report vm agent status for more than {0}"\
.format(ERROR_STATE_DELTA)
add_event(AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ExtensionProcessing,
is_success=False,
message=message)
self.report_status_error_state.reset()
def report_ext_handler_status(self, vm_status, ext_handler):
ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol)
handler_status = ext_handler_i.get_handler_status()
if handler_status is None:
return
guid = self.get_upgrade_guid(ext_handler.name)
if guid is not None:
handler_status.upgradeGuid = guid
handler_state = ext_handler_i.get_handler_state()
if handler_state != ExtHandlerState.NotInstalled:
try:
active_exts = ext_handler_i.report_ext_status()
handler_status.extensions.extend(active_exts)
except ExtensionError as e:
ext_handler_i.set_handler_status(message=ustr(e), code=-1)
try:
heartbeat = ext_handler_i.collect_heartbeat()
if heartbeat is not None:
handler_status.status = heartbeat.get('status')
except ExtensionError as e:
ext_handler_i.set_handler_status(message=ustr(e), code=-1)
vm_status.vmAgent.extensionHandlers.append(handler_status)
class ExtHandlerInstance(object):
def __init__(self, ext_handler, protocol):
self.ext_handler = ext_handler
self.protocol = protocol
self.operation = None
self.pkg = None
self.pkg_file = None
self.is_upgrade = False
self.logger = None
self.set_logger()
try:
fileutil.mkdir(self.get_log_dir(), mode=0o755)
except IOError as e:
self.logger.error(u"Failed to create extension log dir: {0}", e)
log_file = os.path.join(self.get_log_dir(), "CommandExecution.log")
self.logger.add_appender(logger.AppenderType.FILE,
logger.LogLevel.INFO, log_file)
def decide_version(self, target_state=None):
self.logger.verbose("Decide which version to use")
try:
pkg_list = self.protocol.get_ext_handler_pkgs(self.ext_handler)
except ProtocolError as e:
raise ExtensionError("Failed to get ext handler pkgs", e)
# Determine the desired and installed versions
requested_version = FlexibleVersion(str(self.ext_handler.properties.version))
installed_version_string = self.get_installed_version()
installed_version = requested_version \
if installed_version_string is None \
else FlexibleVersion(installed_version_string)
# Divide packages
# - Find the installed package (its version must exactly match)
# - Find the internal candidate (its version must exactly match)
# - Separate the public packages
internal_pkg = None
installed_pkg = None
public_pkgs = []
for pkg in pkg_list.versions:
pkg_version = FlexibleVersion(pkg.version)
if pkg_version == installed_version:
installed_pkg = pkg
if pkg.isinternal and pkg_version == requested_version:
internal_pkg = pkg
if not pkg.isinternal:
public_pkgs.append(pkg)
internal_version = FlexibleVersion(internal_pkg.version) \
if internal_pkg is not None \
else FlexibleVersion()
public_pkgs.sort(key=lambda p: FlexibleVersion(p.version), reverse=True)
# Determine the preferred version and type of upgrade occurring
preferred_version = max(requested_version, installed_version)
is_major_upgrade = preferred_version.major > installed_version.major
allow_minor_upgrade = self.ext_handler.properties.upgradePolicy == 'auto'
# Find the first public candidate which
# - Matches the preferred major version
# - Does not upgrade to a new, disallowed major version
# - And only increments the minor version if allowed
# Notes:
# - The patch / hotfix version is not considered
public_pkg = None
for pkg in public_pkgs:
pkg_version = FlexibleVersion(pkg.version)
if pkg_version.major == preferred_version.major \
and (not pkg.disallow_major_upgrade or not is_major_upgrade) \
and (allow_minor_upgrade or pkg_version.minor == preferred_version.minor):
public_pkg = pkg
break
# If there are no candidates, locate the highest public version whose
# major matches that installed
if internal_pkg is None and public_pkg is None:
for pkg in public_pkgs:
pkg_version = FlexibleVersion(pkg.version)
if pkg_version.major == installed_version.major:
public_pkg = pkg
break
public_version = FlexibleVersion(public_pkg.version) \
if public_pkg is not None \
else FlexibleVersion()
# Select the candidate
# - Use the public candidate if there is no internal candidate or
# the public is more recent (e.g., a hotfix patch)
# - Otherwise use the internal candidate
if internal_pkg is None or (public_pkg is not None and public_version > internal_version):
selected_pkg = public_pkg
else:
selected_pkg = internal_pkg
selected_version = FlexibleVersion(selected_pkg.version) \
if selected_pkg is not None \
else FlexibleVersion()
# Finally, update the version only if not downgrading
# Note:
# - A downgrade, which will be bound to the same major version,
# is allowed if the installed version is no longer available
if target_state == u"uninstall":
if installed_pkg is None:
msg = "Failed to find installed version of {0} " \
"to uninstall".format(self.ext_handler.name)
self.logger.warn(msg)
self.pkg = installed_pkg
self.ext_handler.properties.version = str(installed_version) \
if installed_version is not None else None
elif selected_pkg is None \
or (installed_pkg is not None and selected_version < installed_version):
self.pkg = installed_pkg
self.ext_handler.properties.version = str(installed_version) \
if installed_version is not None else None
else:
self.pkg = selected_pkg
self.ext_handler.properties.version = str(selected_pkg.version)
# Note if the selected package is greater than that installed
if installed_pkg is None \
or FlexibleVersion(self.pkg.version) > FlexibleVersion(installed_pkg.version):
self.is_upgrade = True
if self.pkg is None:
raise ExtensionError("Failed to find any valid extension package")
self.logger.verbose("Use version: {0}", self.pkg.version)
self.set_logger()
return
def set_logger(self):
prefix = "[{0}]".format(self.get_full_name())
self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix)
def version_gt(self, other):
self_version = self.ext_handler.properties.version
other_version = other.ext_handler.properties.version
return FlexibleVersion(self_version) > FlexibleVersion(other_version)
def get_installed_ext_handler(self):
lastest_version = self.get_installed_version()
if lastest_version is None:
return None
installed_handler = ExtHandler()
set_properties("ExtHandler", installed_handler, get_properties(self.ext_handler))
installed_handler.properties.version = lastest_version
return ExtHandlerInstance(installed_handler, self.protocol)
def get_installed_version(self):
lastest_version = None
for path in glob.iglob(os.path.join(conf.get_lib_dir(), self.ext_handler.name + "-*")):
if not os.path.isdir(path):
continue
separator = path.rfind('-')
version_from_path = FlexibleVersion(path[separator+1:])
state_path = os.path.join(path, 'config', 'HandlerState')
if not os.path.exists(state_path) or \
fileutil.read_file(state_path) == \
ExtHandlerState.NotInstalled:
logger.verbose("Ignoring version of uninstalled extension: "
"{0}".format(path))
continue
if lastest_version is None or lastest_version < version_from_path:
lastest_version = version_from_path
return str(lastest_version) if lastest_version is not None else None
def copy_status_files(self, old_ext_handler_i):
self.logger.info("Copy status files from old plugin to new")
old_ext_dir = old_ext_handler_i.get_base_dir()
new_ext_dir = self.get_base_dir()
old_ext_mrseq_file = os.path.join(old_ext_dir, "mrseq")
if os.path.isfile(old_ext_mrseq_file):
shutil.copy2(old_ext_mrseq_file, new_ext_dir)
old_ext_status_dir = old_ext_handler_i.get_status_dir()
new_ext_status_dir = self.get_status_dir()
if os.path.isdir(old_ext_status_dir):
for status_file in os.listdir(old_ext_status_dir):
status_file = os.path.join(old_ext_status_dir, status_file)
if os.path.isfile(status_file):
shutil.copy2(status_file, new_ext_status_dir)
def set_operation(self, op):
self.operation = op
def report_event(self, message="", is_success=True, duration=0):
ext_handler_version = self.ext_handler.properties.version
add_event(name=self.ext_handler.name, version=ext_handler_version, message=message,
op=self.operation, is_success=is_success, duration=duration)
def download(self):
begin_utc = datetime.datetime.utcnow()
self.logger.verbose("Download extension package")
self.set_operation(WALAEventOperation.Download)
if self.pkg is None:
raise ExtensionError("No package uri found")
package = None
chosen_uri = None
uris_shuffled = self.pkg.uris
random.shuffle(uris_shuffled)
for uri in uris_shuffled:
try:
package = self.protocol.download_ext_handler_pkg(uri.uri)
if package is not None:
chosen_uri = uri
break
except Exception as e:
logger.warn("Error while downloading extension: {0}", e)
if package is None or chosen_uri is None:
raise ExtensionError("Failed to download extension")
self.logger.verbose("Unpack extension package")
self.pkg_file = os.path.join(conf.get_lib_dir(), os.path.basename(chosen_uri.uri) + ".zip")
try:
fileutil.write_file(self.pkg_file, bytearray(package), asbin=True)
zipfile.ZipFile(self.pkg_file).extractall(self.get_base_dir())
os.remove(self.pkg_file)
except IOError as e:
fileutil.clean_ioerror(e,
paths=[self.get_base_dir(), self.pkg_file])
raise ExtensionError(u"Failed to write and unzip plugin", e)
# Add user execute permission to all files under the base dir
for file in fileutil.get_all_files(self.get_base_dir()):
fileutil.chmod(file, os.stat(file).st_mode | stat.S_IXUSR)
duration = elapsed_milliseconds(begin_utc)
self.report_event(message="Download succeeded", duration=duration)
self.logger.info("Initialize extension directory")
# Save HandlerManifest.json
man_file = fileutil.search_file(self.get_base_dir(),
'HandlerManifest.json')
if man_file is None:
raise ExtensionError("HandlerManifest.json not found")
try:
man = fileutil.read_file(man_file, remove_bom=True)
fileutil.write_file(self.get_manifest_file(), man)
except IOError as e:
fileutil.clean_ioerror(e,
paths=[self.get_base_dir(), self.pkg_file])
raise ExtensionError(u"Failed to save HandlerManifest.json", e)
# Create status and config dir
try:
status_dir = self.get_status_dir()
fileutil.mkdir(status_dir, mode=0o700)
seq_no, status_path = self.get_status_file_path()
if seq_no > -1:
now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
status = {
"version": 1.0,
"timestampUTC": now,
"status": {
"name": self.ext_handler.name,
"operation": "Enabling Handler",
"status": "transitioning",
"code": 0
}
}
fileutil.write_file(status_path, json.dumps(status))
conf_dir = self.get_conf_dir()
fileutil.mkdir(conf_dir, mode=0o700)
except IOError as e:
fileutil.clean_ioerror(e,
paths=[self.get_base_dir(), self.pkg_file])
raise ExtensionError(u"Failed to create status or config dir", e)
# Save HandlerEnvironment.json
self.create_handler_env()
def enable(self):
self.set_operation(WALAEventOperation.Enable)
man = self.load_manifest()
enable_cmd = man.get_enable_command()
self.logger.info("Enable extension [{0}]".format(enable_cmd))
self.launch_command(enable_cmd, timeout=300)
self.set_handler_state(ExtHandlerState.Enabled)
self.set_handler_status(status="Ready", message="Plugin enabled")
def disable(self):
self.set_operation(WALAEventOperation.Disable)
man = self.load_manifest()
disable_cmd = man.get_disable_command()
self.logger.info("Disable extension [{0}]".format(disable_cmd))
self.launch_command(disable_cmd, timeout=900)
self.set_handler_state(ExtHandlerState.Installed)
self.set_handler_status(status="NotReady", message="Plugin disabled")
def install(self):
man = self.load_manifest()
install_cmd = man.get_install_command()
self.logger.info("Install extension [{0}]".format(install_cmd))
self.set_operation(WALAEventOperation.Install)
self.launch_command(install_cmd, timeout=900)
self.set_handler_state(ExtHandlerState.Installed)
def uninstall(self):
try:
self.set_operation(WALAEventOperation.UnInstall)
man = self.load_manifest()
uninstall_cmd = man.get_uninstall_command()
self.logger.info("Uninstall extension [{0}]".format(uninstall_cmd))
self.launch_command(uninstall_cmd)
except ExtensionError as e:
self.report_event(message=ustr(e), is_success=False)
def rm_ext_handler_dir(self):
try:
base_dir = self.get_base_dir()
if os.path.isdir(base_dir):
self.logger.info("Remove extension handler directory: {0}",
base_dir)
shutil.rmtree(base_dir)
except IOError as e:
message = "Failed to remove extension handler directory: {0}".format(e)
self.report_event(message=message, is_success=False)
self.logger.warn(message)
def update(self):
self.set_operation(WALAEventOperation.Update)
man = self.load_manifest()
update_cmd = man.get_update_command()
self.logger.info("Update extension [{0}]".format(update_cmd))
self.launch_command(update_cmd, timeout=900)
def update_with_install(self):
man = self.load_manifest()
if man.is_update_with_install():
self.install()
else:
self.logger.info("UpdateWithInstall not set. "
"Skip install during upgrade.")
self.set_handler_state(ExtHandlerState.Installed)
def get_largest_seq_no(self):
seq_no = -1
conf_dir = self.get_conf_dir()
for item in os.listdir(conf_dir):
item_path = os.path.join(conf_dir, item)
if os.path.isfile(item_path):
try:
separator = item.rfind(".")
if separator > 0 and item[separator + 1:] == 'settings':
curr_seq_no = int(item.split('.')[0])
if curr_seq_no > seq_no:
seq_no = curr_seq_no
except (ValueError, IndexError, TypeError):
self.logger.verbose("Failed to parse file name: {0}", item)
continue
return seq_no
def get_status_file_path(self):
seq_no = self.get_largest_seq_no()
path = None
if seq_no > -1:
path = os.path.join(
self.get_status_dir(),
"{0}.status".format(seq_no))
return seq_no, path
def collect_ext_status(self, ext):
# see github issue 1116
self.logger.verbose("Collect extension status")
seq_no, ext_status_file = self.get_status_file_path()
if seq_no == -1:
return None
ext_status = ExtensionStatus(seq_no=seq_no)
try:
data_str = fileutil.read_file(ext_status_file)
data = json.loads(data_str)
parse_ext_status(ext_status, data)
except IOError as e:
ext_status.message = u"Failed to get status file {0}".format(e)
ext_status.code = -1
ext_status.status = "error"
except (ExtensionError, ValueError) as e:
ext_status.message = u"Malformed status file {0}".format(e)
ext_status.code = -1
ext_status.status = "error"
return ext_status
def report_ext_status(self):
active_exts = []
# TODO Refactor or remove this common code pattern (for each extension subordinate to an ext_handler, do X).
for ext in self.ext_handler.properties.extensions:
ext_status = self.collect_ext_status(ext)
if ext_status is None:
continue
try:
self.protocol.report_ext_status(self.ext_handler.name, ext.name,
ext_status)
active_exts.append(ext.name)
except ProtocolError as e:
self.logger.error(u"Failed to report extension status: {0}", e)
return active_exts
def collect_heartbeat(self):
man = self.load_manifest()
if not man.is_report_heartbeat():
return
heartbeat_file = os.path.join(conf.get_lib_dir(),
self.get_heartbeat_file())
if not os.path.isfile(heartbeat_file):
raise ExtensionError("Failed to get heart beat file")
if not self.is_responsive(heartbeat_file):
return {
"status": "Unresponsive",
"code": -1,
"message": "Extension heartbeat is not responsive"
}
try:
heartbeat_json = fileutil.read_file(heartbeat_file)
heartbeat = json.loads(heartbeat_json)[0]['heartbeat']
except IOError as e:
raise ExtensionError("Failed to get heartbeat file:{0}".format(e))
except (ValueError, KeyError) as e:
raise ExtensionError("Malformed heartbeat file: {0}".format(e))
return heartbeat
@staticmethod
def is_responsive(heartbeat_file):
"""
Was heartbeat_file updated within the last ten (10) minutes?
:param heartbeat_file: str
:return: bool
"""
last_update = int(time.time() - os.stat(heartbeat_file).st_mtime)
return last_update <= 600
def launch_command(self, cmd, timeout=300):
begin_utc = datetime.datetime.utcnow()
self.logger.verbose("Launch command: [{0}]", cmd)
base_dir = self.get_base_dir()
try:
# This should be .run(), but due to the wide variety
# of Python versions we must support we must use .communicate().
process = subprocess.Popen(os.path.join(base_dir, cmd),
shell=True,
cwd=base_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ,
preexec_fn=os.setsid)
except OSError as e:
raise ExtensionError("Failed to launch '{0}': {1}".format(cmd, e.strerror))
msg = capture_from_process(process, cmd, timeout)
ret = process.poll()
if ret is None or ret != 0:
raise ExtensionError("Non-zero exit code: {0}, {1}\n{2}".format(ret, cmd, msg))
duration = elapsed_milliseconds(begin_utc)
self.report_event(message="{0}\n{1}".format(cmd, msg), duration=duration)
def load_manifest(self):
man_file = self.get_manifest_file()
try:
data = json.loads(fileutil.read_file(man_file))
except (IOError, OSError) as e:
raise ExtensionError('Failed to load manifest file ({0}): {1}'.format(man_file, e.strerror))
except ValueError:
raise ExtensionError('Malformed manifest file ({0}).'.format(man_file))
return HandlerManifest(data[0])
def update_settings_file(self, settings_file, settings):
settings_file = os.path.join(self.get_conf_dir(), settings_file)
try:
fileutil.write_file(settings_file, settings)
except IOError as e:
fileutil.clean_ioerror(e,
paths=[settings_file])
raise ExtensionError(u"Failed to update settings file", e)
def update_settings(self):
if self.ext_handler.properties.extensions is None or \
len(self.ext_handler.properties.extensions) == 0:
# This is the behavior of waagent 2.0.x
# The new agent has to be consistent with the old one.
self.logger.info("Extension has no settings, write empty 0.settings")
self.update_settings_file("0.settings", "")
return
for ext in self.ext_handler.properties.extensions:
settings = {
'publicSettings': ext.publicSettings,
'protectedSettings': ext.protectedSettings,
'protectedSettingsCertThumbprint': ext.certificateThumbprint
}
ext_settings = {
"runtimeSettings": [{
"handlerSettings": settings
}]
}
settings_file = "{0}.settings".format(ext.sequenceNumber)
self.logger.info("Update settings file: {0}", settings_file)
self.update_settings_file(settings_file, json.dumps(ext_settings))
def create_handler_env(self):
env = [{
"name": self.ext_handler.name,
"version": HANDLER_ENVIRONMENT_VERSION,
"handlerEnvironment": {
"logFolder": self.get_log_dir(),
"configFolder": self.get_conf_dir(),
"statusFolder": self.get_status_dir(),
"heartbeatFile": self.get_heartbeat_file()
}
}]
try:
fileutil.write_file(self.get_env_file(), json.dumps(env))
except IOError as e:
fileutil.clean_ioerror(e,
paths=[self.get_base_dir(), self.pkg_file])
raise ExtensionError(u"Failed to save handler environment", e)
def set_handler_state(self, handler_state):
state_dir = self.get_conf_dir()
state_file = os.path.join(state_dir, "HandlerState")
try:
if not os.path.exists(state_dir):
fileutil.mkdir(state_dir, mode=0o700)
fileutil.write_file(state_file, handler_state)
except IOError as e:
fileutil.clean_ioerror(e, paths=[state_file])
self.logger.error("Failed to set state: {0}", e)
def get_handler_state(self):
state_dir = self.get_conf_dir()
state_file = os.path.join(state_dir, "HandlerState")
if not os.path.isfile(state_file):
return ExtHandlerState.NotInstalled
try:
return fileutil.read_file(state_file)
except IOError as e:
self.logger.error("Failed to get state: {0}", e)
return ExtHandlerState.NotInstalled
def set_handler_status(self, status="NotReady", message="", code=0):
state_dir = self.get_conf_dir()
handler_status = ExtHandlerStatus()
handler_status.name = self.ext_handler.name
handler_status.version = str(self.ext_handler.properties.version)
handler_status.message = message
handler_status.code = code
handler_status.status = status
status_file = os.path.join(state_dir, "HandlerStatus")
try:
handler_status_json = json.dumps(get_properties(handler_status))
if handler_status_json is not None:
fileutil.write_file(status_file, handler_status_json)
else:
self.logger.error("Failed to create JSON document of handler status for {0} version {1}".format(
self.ext_handler.name,
self.ext_handler.properties.version))
except (IOError, ValueError, ProtocolError) as e:
fileutil.clean_ioerror(e,
paths=[status_file])
self.logger.error("Failed to save handler status: {0}", traceback.format_exc())
def get_handler_status(self):
state_dir = self.get_conf_dir()
status_file = os.path.join(state_dir, "HandlerStatus")
if not os.path.isfile(status_file):
return None
try:
data = json.loads(fileutil.read_file(status_file))
handler_status = ExtHandlerStatus()
set_properties("ExtHandlerStatus", handler_status, data)
return handler_status
except (IOError, ValueError) as e:
self.logger.error("Failed to get handler status: {0}", e)
def get_full_name(self):
return "{0}-{1}".format(self.ext_handler.name,
self.ext_handler.properties.version)
def get_base_dir(self):
return os.path.join(conf.get_lib_dir(), self.get_full_name())
def get_status_dir(self):
return os.path.join(self.get_base_dir(), "status")
def get_conf_dir(self):
return os.path.join(self.get_base_dir(), 'config')
def get_heartbeat_file(self):
return os.path.join(self.get_base_dir(), 'heartbeat.log')
def get_manifest_file(self):
return os.path.join(self.get_base_dir(), 'HandlerManifest.json')
def get_env_file(self):
return os.path.join(self.get_base_dir(), 'HandlerEnvironment.json')
def get_log_dir(self):
return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name,
str(self.ext_handler.properties.version))
class HandlerEnvironment(object):
def __init__(self, data):
self.data = data
def get_version(self):
return self.data["version"]
def get_log_dir(self):
return self.data["handlerEnvironment"]["logFolder"]
def get_conf_dir(self):
return self.data["handlerEnvironment"]["configFolder"]
def get_status_dir(self):
return self.data["handlerEnvironment"]["statusFolder"]
def get_heartbeat_file(self):
return self.data["handlerEnvironment"]["heartbeatFile"]
class HandlerManifest(object):
def __init__(self, data):
if data is None or data['handlerManifest'] is None:
raise ExtensionError('Malformed manifest file.')
self.data = data
def get_name(self):
return self.data["name"]
def get_version(self):
return self.data["version"]
def get_install_command(self):
return self.data['handlerManifest']["installCommand"]
def get_uninstall_command(self):
return self.data['handlerManifest']["uninstallCommand"]
def get_update_command(self):
return self.data['handlerManifest']["updateCommand"]
def get_enable_command(self):
return self.data['handlerManifest']["enableCommand"]
def get_disable_command(self):
return self.data['handlerManifest']["disableCommand"]
def is_report_heartbeat(self):
return self.data['handlerManifest'].get('reportHeartbeat', False)
def is_update_with_install(self):
update_mode = self.data['handlerManifest'].get('updateMode')
if update_mode is None:
return True
return update_mode.lower() == "updatewithinstall"
| 40.909774
| 116
| 0.616124
|
67edb04a08649649f8922b353eff0fe098f88269
| 18,722
|
py
|
Python
|
dstn_int_att.py
|
awesome-archive/dstn
|
edeef7bccee68a1b3756bb895bcdb982253c96d3
|
[
"MIT"
] | 1
|
2019-07-03T01:29:14.000Z
|
2019-07-03T01:29:14.000Z
|
dstn_int_att.py
|
1oscar/dstn
|
edeef7bccee68a1b3756bb895bcdb982253c96d3
|
[
"MIT"
] | null | null | null |
dstn_int_att.py
|
1oscar/dstn
|
edeef7bccee68a1b3756bb895bcdb982253c96d3
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import datetime
import ctr_funcs as func
import config_dstn as cfg
import os
import shutil
# config
str_txt = cfg.output_file_name
base_path = './tmp'
model_saving_addr = base_path + '/dstn_i_' + str_txt + '/'
output_file_name = base_path + '/dstn_i_' + str_txt + '.txt'
num_csv_col = cfg.num_csv_col
train_file_name = cfg.train_file_name
val_file_name = cfg.val_file_name
test_file_name = cfg.test_file_name
batch_size = cfg.batch_size
n_ft = cfg.n_ft
k = cfg.k
eta = cfg.eta
n_epoch = cfg.n_epoch
max_num_lower_ct = cfg.max_num_lower_ct
record_step_size = cfg.record_step_size
layer_dim = cfg.layer_dim
opt_alg = cfg.opt_alg
n_one_hot_slot = cfg.n_one_hot_slot
n_mul_hot_slot = cfg.n_mul_hot_slot
num_aux_type = cfg.num_aux_type
n_one_hot_slot_aux = cfg.n_one_hot_slot_aux
n_mul_hot_slot_aux = cfg.n_mul_hot_slot_aux
max_len_per_slot_aux = cfg.max_len_per_slot_aux
num_aux_inst_in_data = cfg.num_aux_inst_in_data
max_num_aux_inst_used = cfg.max_num_aux_inst_used
max_len_per_slot = cfg.max_len_per_slot
att_hidden_dim = cfg.att_hidden_dim
label_col_idx = 0
record_defaults = [[0]]*num_csv_col
record_defaults[0] = [0.0]
total_num_ft_col = num_csv_col - 1
# create dir
if not os.path.exists(base_path):
os.mkdir(base_path)
# remove dir
if os.path.isdir(model_saving_addr):
shutil.rmtree(model_saving_addr)
###########################################################
###########################################################
print('Loading data start!')
tf.set_random_seed(123)
# load training data
train_ft, train_label = func.tf_input_pipeline(train_file_name, batch_size, n_epoch, label_col_idx, record_defaults)
# load val data
n_val_inst = func.count_lines(val_file_name[0])
val_ft, val_label = func.tf_input_pipeline(val_file_name, n_val_inst, 1, label_col_idx, record_defaults)
n_val_batch = n_val_inst//batch_size
# load test data
test_ft, test_label = func.tf_input_pipeline_test(test_file_name, batch_size, 1, label_col_idx, record_defaults)
print('Loading data done!')
########################################################################
def partition_input(x_input):
# generate idx_list
len_list = []
len_list.append(n_one_hot_slot)
len_list.append(n_mul_hot_slot*max_len_per_slot)
for i in range(num_aux_type):
len_list.append(n_one_hot_slot_aux[i]*num_aux_inst_in_data[i])
len_list.append(n_mul_hot_slot_aux[i]*max_len_per_slot_aux[i]*num_aux_inst_in_data[i])
len_list = np.array(len_list)
idx_list = np.cumsum(len_list)
# shape=[None, n_one_hot_slot]
x_input_one_hot = x_input[:, 0:idx_list[0]]
x_input_mul_hot = x_input[:, idx_list[0]:idx_list[1]]
# shape=[None, n_mul_hot_slot, max_len_per_slot]
x_input_mul_hot = tf.reshape(x_input_mul_hot, (-1, n_mul_hot_slot, max_len_per_slot))
# aux
x_input_one_hot_aux = {}
x_input_mul_hot_aux = {}
for i in range(num_aux_type):
# take out
temp_1 = x_input[:, idx_list[2*i+1]:idx_list[2*i+2]]
# reshape
temp_1 = tf.reshape(temp_1, (-1, num_aux_inst_in_data[i], n_one_hot_slot_aux[i]))
# shape=[None, max_num_ctxt, n_one_hot_slot]
x_input_one_hot_aux[i] = temp_1[:, 0:max_num_aux_inst_used[i], :]
# take out
temp_2 = x_input[:, idx_list[2*i+2]:idx_list[2*i+3]]
temp_2 = tf.reshape(temp_2, (-1, num_aux_inst_in_data[i], n_mul_hot_slot_aux[i], \
max_len_per_slot_aux[i]))
# shape=[None, max_num_ctxt, n_mul_hot_slot, max_len_per_slot]
x_input_mul_hot_aux[i] = temp_2[:, 0:max_num_aux_inst_used[i], :, :]
return x_input_one_hot, x_input_mul_hot, x_input_one_hot_aux, x_input_mul_hot_aux
# add mask
def get_masked_one_hot(x_input_one_hot):
data_mask = tf.cast(tf.greater(x_input_one_hot, 0), tf.float32)
data_mask = tf.expand_dims(data_mask, axis = 2)
data_mask = tf.tile(data_mask, (1,1,k))
# output: (?, n_one_hot_slot, k)
data_embed_one_hot = tf.nn.embedding_lookup(emb_mat, x_input_one_hot)
data_embed_one_hot_masked = tf.multiply(data_embed_one_hot, data_mask)
return data_embed_one_hot_masked
def get_masked_mul_hot(x_input_mul_hot):
data_mask = tf.cast(tf.greater(x_input_mul_hot, 0), tf.float32)
data_mask = tf.expand_dims(data_mask, axis = 3)
data_mask = tf.tile(data_mask, (1,1,1,k))
# output: (?, n_mul_hot_slot, max_len_per_slot, k)
data_embed_mul_hot = tf.nn.embedding_lookup(emb_mat, x_input_mul_hot)
data_embed_mul_hot_masked = tf.multiply(data_embed_mul_hot, data_mask)
return data_embed_mul_hot_masked
def get_masked_one_hot_aux(x_input_one_hot_ctxt):
data_mask = tf.cast(tf.greater(x_input_one_hot_ctxt, 0), tf.float32)
data_mask = tf.expand_dims(data_mask, axis = 3)
data_mask = tf.tile(data_mask, (1,1,1,k))
# output: (?, max_num_ctxt, n_one_hot_slot, k)
data_embed_one_hot = tf.nn.embedding_lookup(emb_mat, x_input_one_hot_ctxt)
data_embed_one_hot_masked = tf.multiply(data_embed_one_hot, data_mask)
return data_embed_one_hot_masked
def get_masked_mul_hot_aux(x_input_mul_hot_ctxt):
data_mask = tf.cast(tf.greater(x_input_mul_hot_ctxt, 0), tf.float32)
data_mask = tf.expand_dims(data_mask, axis = 4)
data_mask = tf.tile(data_mask, (1,1,1,1,k))
# output: (?, n_mul_hot_slot, max_len_per_slot, k)
data_embed_mul_hot = tf.nn.embedding_lookup(emb_mat, x_input_mul_hot_ctxt)
data_embed_mul_hot_masked = tf.multiply(data_embed_mul_hot, data_mask)
return data_embed_mul_hot_masked
def prepare_input_embed(x_input_one_hot, x_input_mul_hot):
# output: (?, n_one_hot_slot, k)
data_embed_one_hot = get_masked_one_hot(x_input_one_hot)
data_embed_one_hot = tf.reshape(data_embed_one_hot, [-1, n_one_hot_slot*k])
# output: (?, n_mul_hot_slot, max_len_per_slot, k)
data_embed_mul_hot = get_masked_mul_hot(x_input_mul_hot)
data_embed_mul_hot_pooling = tf.reduce_sum(data_embed_mul_hot, 2)
data_embed_mul_hot_pooling = tf.reshape(data_embed_mul_hot_pooling, [-1, n_mul_hot_slot*k])
# concatenate (col-wise; keep num of rows unchanged)
data_embed_ori = tf.concat([data_embed_one_hot, data_embed_mul_hot_pooling], 1)
return data_embed_ori
##################################
# should keep max_num_ctxt dim
def prepare_input_embed_aux_interaction(x_input_one_hot_ctxt, x_input_mul_hot_ctxt, \
max_num_ctxt, cur_n_one_hot_slot, cur_n_mul_hot_slot):
# output: (?, max_num_ctxt, n_one_hot_slot, k)
data_embed_one_hot_ctxt = get_masked_one_hot_aux(x_input_one_hot_ctxt)
# output: (?, max_num_ctxt, n_mul_hot_slot, max_len_per_slot, k)
data_embed_mul_hot_ctxt = get_masked_mul_hot_aux(x_input_mul_hot_ctxt)
# if max_num_ctxt = 1, then this dim will be automatically collapsed
data_embed_mul_hot_pooling_ctxt = tf.reduce_sum(data_embed_mul_hot_ctxt, 3)
data_embed_one_hot_ctxt = tf.reshape(data_embed_one_hot_ctxt, \
[-1, max_num_ctxt, cur_n_one_hot_slot*k])
data_embed_mul_hot_pooling_ctxt = tf.reshape(data_embed_mul_hot_pooling_ctxt, \
[-1, max_num_ctxt, cur_n_mul_hot_slot*k])
# output dim: none * max_num_ctxt * (n_one_hot_slot + n_mul_hot_slot)k
data_embed_ctxt = tf.concat([data_embed_one_hot_ctxt, data_embed_mul_hot_pooling_ctxt], 2)
return data_embed_ctxt
########################################
# data_embed_ori - none * total_embed_dim
# data_embed_ctxt - none* max_num_ctxt * total_embed_dim
def get_wgt_sum_embed_aux(data_embed_ori, data_embed_ctxt, W1_ctxt, b1_ctxt, W2_ctxt, b2_ctxt, \
max_num_ctxt, total_embed_dim_ctxt):
# dim: none * 1 * total_embed_dim
data_embed_ori_exp = tf.expand_dims(data_embed_ori, 1)
# tile, dim: none * max_num_ctxt * total_embed_dim
data_embed_ori_tile = tf.tile(data_embed_ori_exp, [1, max_num_ctxt, 1])
# concat, dim: none * max_num_ctxt * 2 total_embed_dim
data_concat = tf.concat([data_embed_ori_tile, data_embed_ctxt], 2)
data_concat = tf.reshape(data_concat, [-1, total_embed_dim_ctxt])
# dim: none * max_num_ctxt * att_hidden_dim
hidden = tf.matmul(data_concat, W1_ctxt) + b1_ctxt
hidden = tf.nn.relu(hidden)
hidden = tf.nn.dropout(hidden, keep_prob)
# dim: none * max_num_ctxt * 1 [must have 1 at the last dim]
wgt_ctxt = tf.exp(tf.matmul(hidden, W2_ctxt) + b2_ctxt)
wgt_ctxt = tf.reshape(wgt_ctxt, [-1, max_num_ctxt, 1])
# dim: none * max_num_ctxt * total_embed_dim
temp = wgt_ctxt * data_embed_ctxt
# sum over dim max_num_ctxt
# dim: none * total_embed_dim (same dim as data_embed_ori)
output = tf.reduce_sum(temp, 1)
return output
###########################################################
# input for DNN (embedding ids)
x_input = tf.placeholder(tf.int32, shape=[None, total_num_ft_col])
x_input_one_hot, x_input_mul_hot, x_input_one_hot_aux, x_input_mul_hot_aux \
= partition_input(x_input)
# target vect
y_target = tf.placeholder(tf.float32, shape=[None, 1])
# dropout keep prob
keep_prob = tf.placeholder(tf.float32)
# emb_mat dim add 1 -> for padding (idx = 0)
with tf.device('/cpu:0'):
emb_mat = tf.Variable(tf.random_normal([n_ft + 1, k], stddev=0.01))
# attention weight
W1_list = {}; b1_list = {}; W2_list = {}; b2_list = {}
total_embed_dim = {}
for i in range(num_aux_type):
total_embed_dim[i] = k*(n_one_hot_slot + n_mul_hot_slot \
+ n_one_hot_slot_aux[i] + n_mul_hot_slot_aux[i])
std_a = np.sqrt(2.0/(total_embed_dim[i]+att_hidden_dim))
std_b = np.sqrt(2.0/att_hidden_dim)
W1_list[i] = tf.Variable(tf.random_normal([total_embed_dim[i], att_hidden_dim], \
stddev=std_a))
b1_list[i] = tf.Variable(tf.random_normal([att_hidden_dim], stddev=std_b))
W2_list[i] = tf.Variable(tf.random_normal([att_hidden_dim, 1], stddev=std_b))
b2_list[i] = tf.Variable(tf.random_normal([1], stddev=0.01))
####### DNN part: ori ########
data_embed_ori = prepare_input_embed(x_input_one_hot, x_input_mul_hot)
# ####### DNN part: ctxt, clk, non_clk ########
# ####### interaction (data_embed_ori, data_embed_aux) ########
data_embed_aux = {}
wgt_sum_embed_aux = {}
for i in range(num_aux_type):
data_embed_aux[i] = prepare_input_embed_aux_interaction(x_input_one_hot_aux[i], \
x_input_mul_hot_aux[i], max_num_aux_inst_used[i], \
n_one_hot_slot_aux[i], n_mul_hot_slot_aux[i])
wgt_sum_embed_aux[i] = get_wgt_sum_embed_aux(data_embed_ori, data_embed_aux[i], \
W1_list[i], b1_list[i], W2_list[i], b2_list[i], \
max_num_aux_inst_used[i], total_embed_dim[i])
# ################################
# big concatenation
data_embed = tf.concat([data_embed_ori, wgt_sum_embed_aux[0]], 1)
for i in range(1, len(data_embed_aux)):
data_embed = tf.concat([data_embed, wgt_sum_embed_aux[i]], 1)
################################
# include output layer
n_layer = len(layer_dim)
cur_layer = data_embed
data_embed_shape = data_embed.get_shape().as_list()
in_dim = data_embed_shape[1]
# loop to create DNN struct
for i in range(0, n_layer):
out_dim = layer_dim[i]
weight = tf.Variable(tf.random_normal(shape=[in_dim, out_dim], stddev=np.sqrt(2.0/(in_dim+out_dim))))
bias = tf.Variable(tf.constant(0.1, shape=[out_dim]))
# output layer, linear activation
if i == n_layer - 1:
cur_layer = tf.matmul(cur_layer, weight) + bias
else:
cur_layer = tf.nn.relu(tf.matmul(cur_layer, weight) + bias)
cur_layer = tf.nn.dropout(cur_layer, keep_prob)
in_dim = layer_dim[i]
y_hat = cur_layer
# log loss
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_hat, labels=y_target))
pred_score = tf.sigmoid(y_hat)
if opt_alg == 'Adam':
optimizer = tf.train.AdamOptimizer(eta).minimize(loss)
else:
# default
optimizer = tf.train.AdagradOptimizer(eta).minimize(loss)
########################################
# Launch the graph.
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
func.print_time()
print('Load val data')
# load val data
val_ft_inst, val_label_inst = sess.run([val_ft, val_label])
print('Done loading eval data')
# Add ops to save and restore all the variables
saver = tf.train.Saver()
train_loss_list = []
val_avg_auc_list = []
epoch_list = []
best_n_round = 0
best_val_avg_auc = 0
early_stop_flag = 0
lower_ct = 0
func.print_time()
print('Start train loop')
epoch = -1
try:
while not coord.should_stop():
epoch += 1
train_ft_inst, train_label_inst = sess.run([train_ft, train_label])
train_label_inst = np.transpose([train_label_inst])
sess.run(optimizer, feed_dict={x_input:train_ft_inst, \
y_target:train_label_inst, keep_prob:1.0})
# record loss and accuracy every step_size generations
if (epoch+1)%record_step_size == 0:
epoch_list.append(epoch)
train_loss_temp = sess.run(loss, feed_dict={ \
x_input:train_ft_inst, \
y_target:train_label_inst, keep_prob:1})
train_loss_list.append(train_loss_temp)
val_pred_score_all = []
val_label_all = []
for iii in range(n_val_batch):
# get batch
start_idx = iii*batch_size
end_idx = (iii+1)*batch_size
cur_val_ft = val_ft_inst[start_idx: end_idx]
cur_val_label = val_label_inst[start_idx: end_idx]
# pred score
cur_val_pred_score = sess.run(pred_score, feed_dict={ \
x_input:cur_val_ft, keep_prob:1})
val_pred_score_all.append(cur_val_pred_score.flatten())
val_label_all.append(cur_val_label)
# calculate auc
val_pred_score_re = func.list_flatten(val_pred_score_all)
val_label_re = func.list_flatten(val_label_all)
val_auc_temp, _, _ = func.cal_auc(val_pred_score_re, val_label_re)
# record all val results
val_avg_auc_list.append(val_auc_temp)
# record best and save models
if val_auc_temp > best_val_avg_auc:
best_val_avg_auc = val_auc_temp
best_n_round = epoch
# Save the variables to disk
save_path = saver.save(sess, model_saving_addr)
print("Model saved in file: %s" % save_path)
# count of consecutive lower
if val_auc_temp < best_val_avg_auc:
lower_ct += 1
# once higher or equal, set to 0
else:
lower_ct = 0
if lower_ct >= max_num_lower_ct:
early_stop_flag = 1
auc_and_loss = [epoch+1, train_loss_temp, val_auc_temp]
auc_and_loss = [np.round(xx,4) for xx in auc_and_loss]
func.print_time()
print('Generation # {}. Train Loss: {:.4f}. Val Avg AUC: {:.4f}.'\
.format(*auc_and_loss))
if early_stop_flag == 1:
break
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
# after training
saver.restore(sess, model_saving_addr)
print("Model restored.")
# load test data
test_pred_score_all = []
test_label_all = []
test_loss_all = []
try:
while True:
test_ft_inst, test_label_inst = sess.run([test_ft, test_label])
cur_test_pred_score = sess.run(pred_score, feed_dict={ \
x_input:test_ft_inst, keep_prob:1})
test_pred_score_all.append(cur_test_pred_score.flatten())
test_label_all.append(test_label_inst)
cur_test_loss = sess.run(loss, feed_dict={ \
x_input:test_ft_inst, \
y_target: np.transpose([test_label_inst]), keep_prob:1})
test_loss_all.append(cur_test_loss)
except tf.errors.OutOfRangeError:
print('Done loading testing data -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
# calculate auc
test_pred_score_re = func.list_flatten(test_pred_score_all)
test_label_re = func.list_flatten(test_label_all)
test_auc, _, _ = func.cal_auc(test_pred_score_re, test_label_re)
test_rmse = func.cal_rmse(test_pred_score_re, test_label_re)
test_loss = np.mean(test_loss_all)
# rounding
test_auc = np.round(test_auc, 4)
test_rmse = np.round(test_rmse, 4)
test_loss = np.round(test_loss, 5)
train_loss_list = [np.round(xx,4) for xx in train_loss_list]
val_avg_auc_list = [np.round(xx,4) for xx in val_avg_auc_list]
print('test_auc = ', test_auc)
print('test_rmse =', test_rmse)
print('test_loss =', test_loss)
print('train_loss_list =', train_loss_list)
print('val_avg_auc_list =', val_avg_auc_list)
# write output to file
with open(output_file_name, 'a') as f:
now = datetime.datetime.now()
time_str = now.strftime(cfg.time_style)
f.write(time_str + '\n')
f.write('train_file_name = ' + train_file_name[0] + '\n')
f.write('learning_rate = ' + str(eta) + ', n_epoch = ' + str(n_epoch) \
+ ', emb_dize = ' + str(k) + '\n')
f.write('test_auc = ' + str(test_auc) + '\n')
f.write('test_rmse = ' + str(test_rmse) + '\n')
f.write('test_loss = ' + str(test_loss) + '\n')
f.write('train_loss_list =' + str(train_loss_list) + '\n')
f.write('val_avg_auc_list =' + str(val_avg_auc_list) + '\n')
f.write('-'*50 + '\n')
| 41.977578
| 116
| 0.647687
|
7a86f530de6e2d6277130c9f4af543da8b573863
| 1,382
|
py
|
Python
|
experta/deffacts.py
|
Kirito56/ExpertaMadman
|
e14ab93e6e86ef942be3ee5487425a6f483f0dad
|
[
"MIT"
] | null | null | null |
experta/deffacts.py
|
Kirito56/ExpertaMadman
|
e14ab93e6e86ef942be3ee5487425a6f483f0dad
|
[
"MIT"
] | null | null | null |
experta/deffacts.py
|
Kirito56/ExpertaMadman
|
e14ab93e6e86ef942be3ee5487425a6f483f0dad
|
[
"MIT"
] | null | null | null |
from functools import update_wrapper
import inspect
class DefFacts:
def __new__(cls, nonexpected=None, order=0):
obj = super(DefFacts, cls).__new__(cls)
if nonexpected is not None:
raise SyntaxError("DefFacts must be instanced to allow decoration")
obj.__wrapped = None
obj._wrapped_self = None
obj.order = order
return obj
@property
def _wrapped(self):
return self.__wrapped
@_wrapped.setter
def _wrapped(self, value):
if inspect.isgeneratorfunction(value):
self.__wrapped = value
return update_wrapper(self, self.__wrapped)
else:
raise TypeError("DefFact can only decorate generators.")
def __repr__(self): # pragma: no cover
return "DefFacts(%r)" % (self._wrapped)
def __call__(self, *args, **kwargs):
if self._wrapped is not None:
if self._wrapped_self is None:
gen = self._wrapped(*args, **kwargs)
else:
gen = self._wrapped(self._wrapped_self, *args, **kwargs)
return (x.copy() for x in gen)
elif not args:
raise RuntimeError("Usage error.")
else:
self._wrapped = args[0]
return self
def __get__(self, instance, owner):
self._wrapped_self = instance
return self
| 28.204082
| 79
| 0.59479
|
6b06a4b3ff264ec1e90d9e815afc8765ae80063d
| 6,280
|
py
|
Python
|
lib/tests/streamlit/caching_test.py
|
domoritz/streamlit
|
5e8e0ec1b46ac0b322dc48d27494be674ad238fa
|
[
"Apache-2.0"
] | 1
|
2020-04-01T19:53:28.000Z
|
2020-04-01T19:53:28.000Z
|
lib/tests/streamlit/caching_test.py
|
sudachen/streamlit
|
f5326d68eb914eb5bb49da01b7f406ba4f5845d0
|
[
"Apache-2.0"
] | null | null | null |
lib/tests/streamlit/caching_test.py
|
sudachen/streamlit
|
f5326d68eb914eb5bb49da01b7f406ba4f5845d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""st.caching unit tests."""
import threading
import unittest
import pytest
from mock import patch
import streamlit as st
from streamlit import caching
from tests import testutil
class CacheTest(testutil.DeltaGeneratorTestCase):
def tearDown(self):
# Some of these tests reach directly into _cache_info and twiddle it.
# Reset default values on teardown.
st.caching._cache_info.within_cached_func = 0
st.caching._cache_info.suppress_st_function_warning = 0
def test_simple(self):
@st.cache
def foo():
return 42
self.assertEqual(foo(), 42)
self.assertEqual(foo(), 42)
def test_deprecated_kwarg(self):
with pytest.raises(Exception) as e:
@st.cache(ignore_hash=True)
def foo():
return 42
assert (
"The `ignore_hash` argument has been renamed to `allow_output_mutation`."
in str(e.value)
)
@patch.object(st, "warning")
def test_args(self, warning):
called = [False]
@st.cache
def f(x):
called[0] = True
return x
self.assertFalse(called[0])
f(0)
self.assertTrue(called[0])
called = [False] # Reset called
f(0)
self.assertFalse(called[0])
f(1)
self.assertTrue(called[0])
warning.assert_not_called()
@patch.object(st, "warning")
def test_mutate_return(self, warning):
@st.cache
def f():
return [0, 1]
r = f()
r[0] = 1
warning.assert_not_called()
r2 = f()
warning.assert_called()
self.assertEqual(r, r2)
@patch.object(st, "warning")
def test_mutate_args(self, warning):
@st.cache
def foo(d):
d["answer"] += 1
return d["answer"]
d = {"answer": 0}
self.assertNotEqual(foo(d), foo(d))
warning.assert_not_called()
@patch("streamlit.caching._show_cached_st_function_warning")
def test_cached_st_function_warning(self, warning):
st.text("foo")
warning.assert_not_called()
@st.cache
def cached_func():
st.text("Inside cached func")
cached_func()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test warning suppression
@st.cache(suppress_st_warning=True)
def suppressed_cached_func():
st.text("No warnings here!")
suppressed_cached_func()
warning.assert_not_called()
# Test nested st.cache functions
@st.cache
def outer():
@st.cache
def inner():
st.text("Inside nested cached func")
return inner()
outer()
warning.assert_called_once()
warning.reset_mock()
# Test st.cache functions that raise errors
with self.assertRaises(RuntimeError):
@st.cache
def cached_raise_error():
st.text("About to throw")
raise RuntimeError("avast!")
cached_raise_error()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test st.cache functions with widgets
@st.cache
def cached_widget():
st.button("Press me!")
cached_widget()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
def test_caching_counter(self):
"""Test that _within_cached_function_counter behaves properly in
multiple threads."""
def get_counter():
return caching._cache_info.within_cached_func
def set_counter(val):
caching._cache_info.within_cached_func = val
self.assertEqual(0, get_counter())
set_counter(1)
self.assertEqual(1, get_counter())
values_in_thread = []
def thread_test():
values_in_thread.append(get_counter())
set_counter(55)
values_in_thread.append(get_counter())
thread = threading.Thread(target=thread_test)
thread.start()
thread.join()
self.assertEqual([0, 55], values_in_thread)
# The other thread should not have modified the main thread
self.assertEqual(1, get_counter())
# Temporarily turn off these tests since there's no Cache object in __init__
# right now.
class CachingObjectTest(unittest.TestCase):
def off_test_simple(self):
val = 42
for _ in range(2):
c = st.Cache()
if c:
c.value = val
self.assertEqual(c.value, val)
def off_test_allow_output_mutation(self):
val = 42
for _ in range(2):
c = st.Cache(allow_output_mutation=True)
if c:
c.value = val
self.assertEqual(c.value, val)
def off_test_has_changes(self):
val = 42
for _ in range(2):
c = st.Cache()
if c.has_changes():
c.value = val
self.assertEqual(c.value, val)
@patch.object(st, "warning")
def off_test_mutate(self, warning):
for _ in range(2):
c = st.Cache()
if c:
c.value = [0, 1]
c.value[0] = 1
warning.assert_called()
| 24.53125
| 85
| 0.587739
|
e63a84e3a6de7c5441de25474a0a797c5ae70898
| 17,069
|
py
|
Python
|
sure_tosca-flask-server/sure_tosca/test/test_default_controller.py
|
QCDIS/CONF
|
6ddb37b691754bbba97c85228d266ac050c4baa4
|
[
"Apache-2.0"
] | null | null | null |
sure_tosca-flask-server/sure_tosca/test/test_default_controller.py
|
QCDIS/CONF
|
6ddb37b691754bbba97c85228d266ac050c4baa4
|
[
"Apache-2.0"
] | null | null | null |
sure_tosca-flask-server/sure_tosca/test/test_default_controller.py
|
QCDIS/CONF
|
6ddb37b691754bbba97c85228d266ac050c4baa4
|
[
"Apache-2.0"
] | 1
|
2021-04-05T09:49:03.000Z
|
2021-04-05T09:49:03.000Z
|
# coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
import os
from sure_tosca.models.node_template import NodeTemplateModel # noqa: E501
from sure_tosca.models.topology_template import TopologyTemplateModel # noqa: E501
from sure_tosca.models.tosca_template import ToscaTemplateModel # noqa: E501
from sure_tosca.test import BaseTestCase
class TestDefaultController(BaseTestCase):
"""DefaultController integration test stubs"""
def test_get_all_ancestor_properties(self):
"""Test case for get_all_ancestor_properties
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates/{node_name}/ancestors_properties'.format(
id=id_example, node_name='compute'),
method='GET')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.is_json)
self.assertIsInstance(response.json, list)
def test_get_all_ancestor_types(self):
"""Test case for get_all_ancestor_types
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates/{node_name}/ancestors_types'.format(
id=id_example, node_name='compute'),
method='GET')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.is_json)
self.assertIsInstance(response.json, list)
def test_get_ancestors_requirements(self):
"""Test case for get_ancestors_requirements
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates/{node_name}/ancestors_requirements'.format(
id=id_example, node_name='kubernetes'),
method='GET')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.is_json)
self.assertIsInstance(response.json, list)
def test_get_dsl_definitions(self):
"""Test case for get_dsl_definitions
"""
# query_string = [('anchors', 'anchors_example'), ('derived_from', 'derived_from_example')]
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/dsl_definitions'.format(id=id_example),
method='GET')
self.assertTrue(response.is_json)
def test_get_imports(self):
"""Test case for get_imports
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/imports'.format(id=id_example),
method='GET')
self.assertTrue(response.is_json)
def test_get_node_outputs(self):
"""Test case for get_node_outputs
"""
id_example = self.upload_file('application_example_outputs.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates/{node_name}/outputs'.format(
id=id_example, node_name='compute'),
method='GET')
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, dict)
def test_get_node_properties(self):
"""Test case for get_node_properties
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates/{node_name}/properties'.format(
id=id_example, node_name='compute'),
method='GET')
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, dict)
def test_get_node_requirements(self):
"""Test case for get_node_requirements
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates/{node_name}/requirements'.format(
id=id_example, node_name='kubernetes'),
method='GET')
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, list)
def test_get_node_templates(self):
"""Test case for get_node_templates
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
query_string = [('type_name', None),
('node_name', 'compute'),
('has_interfaces', True),
('has_properties', None),
('has_attributes', None),
('has_requirements', None),
('has_capabilities', None),
('has_artifacts', None)]
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates'.format(id=id_example),
method='GET',
query_string=query_string)
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, list)
query_string = [('type_name', None),
('node_name', None),
('has_interfaces', None),
('has_properties', None),
('has_attributes', None),
('has_requirements', None),
('has_capabilities', None),
('has_artifacts', None)]
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates'.format(id=id_example),
method='GET',
query_string=query_string)
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, list)
query_string = [('type_name', 'tosca.nodes.QC.Container.Application.Docker'),
('node_name', None),
('has_interfaces', None),
('has_properties', None),
('has_attributes', None),
('has_requirements', None),
('has_capabilities', None),
('has_artifacts', None)]
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates'.format(id=id_example),
method='GET',
query_string=query_string)
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, list)
query_string = [('type_name', 'tosca.nodes.QC.Application'),
('node_name', None),
('has_interfaces', True),
('has_properties', None),
('has_attributes', None),
('has_requirements', None),
('has_capabilities', None),
('has_artifacts', None)]
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates'.format(id=id_example),
method='GET',
query_string=query_string)
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, list)
def test_get_node_type_name(self):
"""Test case for get_node_type_name
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates/{node_name}/type_name'.format(
id=id_example, node_name='compute'),
method='GET')
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, str)
def test_get_parent_type_name(self):
"""Test case for get_parent_type_name
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates/{node_name}/derived_from'.format(
id=id_example, node_name='kubernetes'),
method='GET')
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, str)
def test_get_related_nodes(self):
"""Test case for get_related_nodes
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates/{node_name}/related'.format(
id=id_example, node_name='ws-pema'),
method='GET')
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, list)
def test_get_relationship_templates(self):
"""Test case for get_relationship_templates
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
query_string = [('type_name', None),
('derived_from', None)]
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/relationship_templates'.format(id=id_example),
method='GET',
query_string=query_string)
self.assertTrue(response.is_json)
def test_get_topology_template(self):
"""Test case for get_topology_template
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template'.format(id=id_example),
method='GET')
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, dict)
def test_get_tosca_template(self):
"""Test case for get_tosca_template
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}'.format(id=id_example),
method='GET')
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, dict)
def test_get_types(self):
"""Test case for get_types
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
query_string = [('kind_of_type', 'interface_types'),
('has_interfaces', None),
('type_name', 'tosca.interfaces.QC.CloudsStorm'),
('has_properties', None),
('has_attributes', None),
('has_requirements', None),
('has_capabilities', None),
('has_artifacts', None),
('derived_from', None)]
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/types'.format(id=id_example),
method='GET',
query_string=query_string)
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, list)
query_string = [('kind_of_type', 'interface_types'),
('has_interfaces', None),
('type_name', None),
('has_properties', None),
('has_attributes', None),
('has_requirements', None),
('has_capabilities', None),
('has_artifacts', None),
('derived_from', 'tosca.interfaces.node.lifecycle.Standard')]
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/types'.format(id=id_example),
method='GET',
query_string=query_string)
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, list)
def test_set_node_properties(self):
"""Test case for set_node_properties
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
properties = {'properties': {'cpu_frequency': '2 GHz'}}
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates/{node_name}/properties'.format(
id=id_example, node_name='compute'),
method='PUT',
data=json.dumps(properties),
content_type='application/json')
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, list)
def test_get_node_templates2(self):
"""Test case for get_node_templates
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/topology_template/node_templates'.format(id=id_example),
method='GET',
query_string=None)
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, list)
self.assertTrue(response.json)
def test_get_default_interface(self):
"""Test case for get_default_interface
"""
id_example = self.upload_file('application_example_2_topologies.yaml')
self.assertTrue(id_example.strip().isdigit())
query_string = [('instance_name', 'instance_name_example'),
('operation_name', 'provision')]
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template/{id}/interface/{interface_type}/default'.format(id=id_example,
interface_type='tosca.interfaces.QC.CloudsStorm'),
method='GET',
query_string=query_string)
self.assertTrue(response.is_json)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json, dict)
def upload_file(self, file_name):
tosca_path = "../../../TOSCA/"
input_tosca_file_path = tosca_path + '/' + file_name
if not os.path.exists(input_tosca_file_path):
tosca_path = "../TOSCA/"
input_tosca_file_path = tosca_path + '/' + file_name
dir_path = os.path.dirname(os.path.realpath(__file__))
self.assertEqual(True, os.path.exists(input_tosca_file_path),
'Starting from: ' + dir_path + ' Input TOSCA file: ' + input_tosca_file_path + ' not found')
with open(input_tosca_file_path, 'r') as file:
contents = file.read()
byte_contents = bytes(contents, 'utf8')
data = dict(file=(BytesIO(byte_contents), input_tosca_file_path))
response = self.client.open(
'/tosca-sure/1.0.0/tosca_template',
method='POST',
data=data,
content_type='multipart/form-data')
return response.data.decode('utf-8').replace('\n', '')
if __name__ == '__main__':
import unittest
unittest.main()
| 41.329298
| 144
| 0.615443
|
73ac822f1bfb70eeee2c3e3bff157291549a14bc
| 102
|
py
|
Python
|
__init__.py
|
singh-yashwant/mayplotlib
|
373b45f410d66ed771a35b4d2b241122454b5afe
|
[
"MIT"
] | null | null | null |
__init__.py
|
singh-yashwant/mayplotlib
|
373b45f410d66ed771a35b4d2b241122454b5afe
|
[
"MIT"
] | null | null | null |
__init__.py
|
singh-yashwant/mayplotlib
|
373b45f410d66ed771a35b4d2b241122454b5afe
|
[
"MIT"
] | null | null | null |
def print1():
print("code for first program")
def print2():
print("code for second program")
| 17
| 36
| 0.656863
|
34d7af3a42ef55a1e98319f0432807be1c179db5
| 1,328
|
py
|
Python
|
contacts/migrations/0001_initial.py
|
liviamendes/agenda-django-project
|
d602bb5e762ea477c3c97b5a475ad79036c0c93d
|
[
"MIT"
] | null | null | null |
contacts/migrations/0001_initial.py
|
liviamendes/agenda-django-project
|
d602bb5e762ea477c3c97b5a475ad79036c0c93d
|
[
"MIT"
] | null | null | null |
contacts/migrations/0001_initial.py
|
liviamendes/agenda-django-project
|
d602bb5e762ea477c3c97b5a475ad79036c0c93d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-06-30 14:35
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('last_name', models.CharField(blank=True, max_length=255)),
('phone', models.CharField(max_length=255)),
('email', models.CharField(blank=True, max_length=255)),
('creation_date', models.DateTimeField(default=django.utils.timezone.now)),
('description', models.TextField(blank=True)),
('categoria', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='contacts.categoria')),
],
),
]
| 35.891892
| 122
| 0.594127
|
079c71f90ac88efbed9f9c72b160b78ee55577e7
| 1,704
|
py
|
Python
|
game/tests/betabraintest.py
|
hyde777/betabrain
|
3d95ffcf49e33f0cce97bd0ad272ba9d500b77d4
|
[
"MIT"
] | 1
|
2018-09-08T07:13:16.000Z
|
2018-09-08T07:13:16.000Z
|
game/tests/betabraintest.py
|
hyde777/betabrain
|
3d95ffcf49e33f0cce97bd0ad272ba9d500b77d4
|
[
"MIT"
] | null | null | null |
game/tests/betabraintest.py
|
hyde777/betabrain
|
3d95ffcf49e33f0cce97bd0ad272ba9d500b77d4
|
[
"MIT"
] | null | null | null |
import unittest
import betabrain
class TestBetaBrainGame(unittest.TestCase):
def setUp(self):
betabrain.tries = list()
def test_generate_solution(self):
solution = betabrain.generateProblem()
self.assertEqual(len(solution), 5)
def test_problem_content(self):
solution = betabrain.generateProblem()
for p in solution:
self.assertEqual(p in betabrain.colors, True)
def test_correct_checker_output(self):
problem = ['R', 'B', 'W', 'R', 'G']
solution = problem
self.assertEqual(betabrain.check(problem, solution), (5, 0))
def test_missplaced_pegs_checker_output(self):
problem = ['R', 'B', 'W', 'R', 'G']
solution = ['G', 'G', 'G', 'W', 'W']
self.assertEqual(betabrain.check(problem, solution), (0, 2))
def test_full_false_pegs(self):
problem = ['R', 'R', 'R', 'R', 'R']
solution = ['G', 'G', 'G', 'G', 'G']
self.assertEqual(betabrain.check(problem, solution), (0, 0))
def test_incorrect_checker_output(self):
problem = ['R', 'B', 'W', 'R', 'G']
solution = ['R', 'R', 'G', 'R', 'W']
self.assertEqual(betabrain.check(problem, solution), (2, 2))
def test_tries(self):
solution = ['R', 'P', 'G', 'R', 'G']
problems = (['R', 'B', 'W', 'R', 'G'], ['R', 'R', 'P', 'R', 'G'])
for problem in problems:
betabrain.check(problem, solution)
result = list([(['R', 'B', 'W', 'R', 'G'], (3, 0)), (['R', 'R', 'P', 'R', 'G'], (3, 2))])
self.assertEqual(len(betabrain.tries), 2)
self.assertEqual(betabrain.tries, result)
if __name__ == '__main__':
unittest.main()
| 29.894737
| 97
| 0.551056
|
71e1ce707e0766472d8aeed55173fe68d9059b84
| 11,795
|
py
|
Python
|
ethereum/tools/tester.py
|
BreezeRo/pyethereum
|
440af8f4ebf26d837d5c633ef09ef0e2338df43e
|
[
"MIT"
] | 1
|
2021-01-27T04:35:52.000Z
|
2021-01-27T04:35:52.000Z
|
ethereum/tools/tester.py
|
BreezeRo/pyethereum
|
440af8f4ebf26d837d5c633ef09ef0e2338df43e
|
[
"MIT"
] | null | null | null |
ethereum/tools/tester.py
|
BreezeRo/pyethereum
|
440af8f4ebf26d837d5c633ef09ef0e2338df43e
|
[
"MIT"
] | null | null | null |
from ethereum.utils import sha3, privtoaddr, int_to_addr, to_string, big_endian_to_int, checksum_encode, int_to_big_endian, encode_hex
from ethereum.genesis_helpers import mk_basic_state
from ethereum.pow import chain as pow_chain
from ethereum.transactions import Transaction
from ethereum.consensus_strategy import get_consensus_strategy
from ethereum.config import config_homestead, config_tangerine, config_spurious, config_metropolis, default_config, Env
from ethereum.pow.ethpow import Miner
from ethereum.messages import apply_transaction, apply_message
from ethereum.common import verify_execution_results, mk_block_from_prevstate, set_execution_results
from ethereum.meta import make_head_candidate
from ethereum.abi import ContractTranslator
import rlp
# Initialize accounts
accounts = []
keys = []
for account_number in range(10):
keys.append(sha3(to_string(account_number)))
accounts.append(privtoaddr(keys[-1]))
k0, k1, k2, k3, k4, k5, k6, k7, k8, k9 = keys[:10]
a0, a1, a2, a3, a4, a5, a6, a7, a8, a9 = accounts[:10]
base_alloc = {}
minimal_alloc = {}
for a in accounts:
base_alloc[a] = {'balance': 10**24}
for i in range(1, 9):
base_alloc[int_to_addr(i)] = {'balance': 1}
minimal_alloc[int_to_addr(i)] = {'balance': 1}
minimal_alloc[accounts[0]] = {'balance': 10**18}
# Initialize languages
languages = {}
try:
import serpent
languages['serpent'] = serpent
except ImportError:
pass
from ethereum.tools._solidity import get_solidity
_solidity = get_solidity()
if _solidity:
languages['solidity'] = _solidity
try:
from viper import compiler
languages['viper'] = compiler
except ImportError:
pass
class TransactionFailed(Exception):
pass
from ethereum.abi import ContractTranslator
import types
STARTGAS = 3141592
GASPRICE = 0
from ethereum.slogging import configure_logging
config_string = ':info'
# configure_logging(config_string=config_string)
class ABIContract(object): # pylint: disable=too-few-public-methods
def __init__(self, _tester, _abi, address):
self.address = address
if isinstance(_abi, ContractTranslator):
abi_translator = _abi
else:
abi_translator = ContractTranslator(_abi)
self.translator = abi_translator
for function_name in self.translator.function_data:
if self.translator.function_data[function_name]['is_constant']:
function = self.method_factory(_tester.call, function_name)
else:
function = self.method_factory(_tester.tx, function_name)
method = types.MethodType(function, self)
setattr(self, function_name, method)
@staticmethod
def method_factory(tx_or_call, function_name):
""" Return a proxy for calling a contract method with automatic encoding of
argument and decoding of results.
"""
def kall(self, *args, **kwargs):
key = kwargs.get('sender', k0)
result = tx_or_call( # pylint: disable=protected-access
sender=key,
to=self.address,
value=kwargs.get('value', 0),
data=self.translator.encode(function_name, args),
startgas=kwargs.get('startgas', STARTGAS),
gasprice=kwargs.get('gasprice', GASPRICE)
)
if result is False:
return result
if result == b'':
return None
o = self.translator.decode(function_name, result)
return o[0] if len(o) == 1 else o
return kall
def get_env(env):
d = {
None: config_spurious,
'mainnet': default_config,
'homestead': config_homestead,
'tangerine': config_tangerine,
'spurious': config_spurious,
'metropolis': config_metropolis,
}
return env if isinstance(env, Env) else Env(config=d[env])
class State(object):
def __init__(self, genesis):
self.state = genesis
def tx(self, sender=k0, to=b'\x00' * 20, value=0,
data=b'', startgas=STARTGAS, gasprice=GASPRICE):
sender_addr = privtoaddr(sender)
transaction = Transaction(self.state.get_nonce(sender_addr), gasprice, startgas,
to, value, data).sign(sender)
success, output = apply_transaction(self.state, transaction)
if not success:
raise TransactionFailed()
return output
def call(self, sender=k0, to=b'\x00' * 20, value=0,
data=b'', startgas=STARTGAS, gasprice=GASPRICE):
sender_addr = privtoaddr(sender)
result = apply_message(
self.state.ephemeral_clone(),
sender=sender_addr,
to=to,
value=value,
data=data,
gas=startgas)
if result is None:
raise TransactionFailed()
return result
class Chain(object):
def __init__(self, alloc=base_alloc, env=None, genesis=None):
if genesis:
self.chain = pow_chain.Chain(genesis, reset_genesis=True)
else:
self.chain = pow_chain.Chain(
mk_basic_state(
alloc,
None,
get_env(env)),
reset_genesis=True)
self.cs = get_consensus_strategy(self.chain.env.config)
self.block = mk_block_from_prevstate(
self.chain, timestamp=self.chain.state.timestamp + 1)
self.head_state = self.chain.state.ephemeral_clone()
self.cs.initialize(self.head_state, self.block)
self.last_sender = None
self.last_tx = None
def direct_tx(self, transaction):
self.last_tx = transaction
if self.last_sender is not None and privtoaddr(
self.last_sender) != transaction.sender:
self.last_sender = None
success, output = apply_transaction(self.head_state, transaction)
self.block.transactions.append(transaction)
if not success:
raise TransactionFailed()
return output
def tx(self, sender=k0, to=b'\x00' * 20, value=0,
data=b'', startgas=STARTGAS, gasprice=GASPRICE):
sender_addr = privtoaddr(sender)
self.last_sender = sender
transaction = Transaction(self.head_state.get_nonce(sender_addr), gasprice, startgas,
to, value, data).sign(sender)
output = self.direct_tx(transaction)
return output
def call(self, sender=k0, to=b'\x00' * 20, value=0,
data=b'', startgas=STARTGAS, gasprice=GASPRICE):
sender_addr = privtoaddr(sender)
result = apply_message(
self.head_state.ephemeral_clone(),
sender=sender_addr,
to=to,
value=value,
data=data,
gas=startgas)
if result is None:
raise TransactionFailed()
return result
def last_gas_used(self, with_tx=False):
if len(self.head_state.receipts) == 1:
diff = self.head_state.receipts[-1].gas_used
else:
diff = self.head_state.receipts[-1].gas_used - \
self.head_state.receipts[-2].gas_used
return diff - (not with_tx) * self.last_tx.intrinsic_gas_used
def contract(self, sourcecode, args=[], sender=k0, value=0,
language=None, l=None, startgas=STARTGAS, gasprice=GASPRICE):
assert not (l and language)
language = l or language
if language == 'evm':
assert len(args) == 0
return self.tx(sender=sender, to=b'', value=value,
data=sourcecode, startgas=startgas, gasprice=gasprice)
else:
compiler = languages[language]
interface = compiler.mk_full_signature(sourcecode)
ct = ContractTranslator(interface)
code = compiler.compile(
sourcecode) + (ct.encode_constructor_arguments(args) if args else b'')
addr = self.tx(
sender=sender,
to=b'',
value=value,
data=code,
startgas=startgas,
gasprice=gasprice)
return ABIContract(self, ct, addr)
def mine(self, number_of_blocks=1, coinbase=a0):
self.cs.finalize(self.head_state, self.block)
set_execution_results(self.head_state, self.block)
self.block = Miner(self.block).mine(rounds=100, start_nonce=0)
assert self.chain.add_block(self.block)
b = self.block
for i in range(1, number_of_blocks):
b, _ = make_head_candidate(
self.chain, parent=b, timestamp=self.chain.state.timestamp + 14, coinbase=coinbase)
b = Miner(b).mine(rounds=100, start_nonce=0)
assert self.chain.add_block(b)
self.change_head(b.header.hash, coinbase)
return b
def change_head(self, parent, coinbase=a0):
self.head_state = self.chain.mk_poststate_of_blockhash(
parent).ephemeral_clone()
self.block = mk_block_from_prevstate(
self.chain,
self.head_state,
timestamp=self.chain.state.timestamp,
coinbase=coinbase)
self.cs.initialize(self.head_state, self.block)
def snapshot(self):
self.head_state.commit()
return self.head_state.snapshot(), len(
self.block.transactions), self.block.number
def revert(self, snapshot):
state_snapshot, txcount, blknum = snapshot
assert blknum == self.block.number, "Cannot revert beyond block boundaries!"
self.block.transactions = self.block.transactions[:txcount]
self.head_state.revert(state_snapshot)
def int_to_0x_hex(v):
o = encode_hex(int_to_big_endian(v))
if o and o[0] == '0':
return '0x' + o[1:]
else:
return '0x' + o
def mk_state_test_prefill(c):
env = {
"currentCoinbase": checksum_encode(c.head_state.block_coinbase),
"currentDifficulty": int_to_0x_hex(c.head_state.block_difficulty),
"currentGasLimit": int_to_0x_hex(c.head_state.gas_limit),
"currentNumber": int_to_0x_hex(c.head_state.block_number),
"currentTimestamp": int_to_0x_hex(c.head_state.timestamp),
"previousHash": "0x" + encode_hex(c.head_state.prev_headers[0].hash),
}
pre = c.head_state.to_dict()
return {"env": env, "pre": pre}
def mk_state_test_postfill(c, prefill, filler_mode=False):
txdata = c.last_tx.to_dict()
modified_tx_data = {
"data": [txdata["data"]],
"gasLimit": [int_to_0x_hex(txdata["startgas"])],
"gasPrice": int_to_0x_hex(txdata["gasprice"]),
"nonce": int_to_0x_hex(txdata["nonce"]),
"secretKey": '0x' + encode_hex(c.last_sender),
"to": txdata["to"],
"value": [int_to_0x_hex(txdata["value"])],
}
c.head_state.commit()
postStateHash = '0x' + encode_hex(c.head_state.trie.root_hash)
if c.chain.config == config_homestead:
config = 'Homestead'
elif c.chain.config == config_tangerine:
config = 'EIP150'
elif c.chain.config == config_spurious:
config = 'EIP158'
elif c.chain.config == config_metropolis:
config = 'Metropolis'
else:
raise Exception("Cannot get config")
o = {
"env": prefill["env"],
"pre": prefill["pre"],
"transaction": modified_tx_data,
}
if not filler_mode:
o["post"] = {config: [{"hash": postStateHash,
"indexes": {"data": 0, "gas": 0, "value": 0}}]}
else:
o["expect"] = [{"indexes": {"data": 0, "gas": 0, "value": 0}, "network": [
"Metropolis"], "result": c.head_state.to_dict()}]
return o
| 35.42042
| 134
| 0.623315
|
4d4d2cc69b0eec34b626a84ee237c1c4c4c540a2
| 2,986
|
py
|
Python
|
paddlespeech/cli/base_commands.py
|
Honei/PaddleSpeech
|
83b941fc439696b1dd8fc0d044a5b29309574c3b
|
[
"Apache-2.0"
] | 1
|
2021-12-23T01:04:00.000Z
|
2021-12-23T01:04:00.000Z
|
paddlespeech/cli/base_commands.py
|
catcat0921/PaddleSpeech
|
775c4befbd4253eab9440c996f267683e7a2a4f0
|
[
"Apache-2.0"
] | null | null | null |
paddlespeech/cli/base_commands.py
|
catcat0921/PaddleSpeech
|
775c4befbd4253eab9440c996f267683e7a2a4f0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from .entry import commands
from .utils import cli_register
from .utils import explicit_command_register
from .utils import get_command
__all__ = [
'BaseCommand',
'HelpCommand',
]
@cli_register(name='paddlespeech')
class BaseCommand:
def execute(self, argv: List[str]) -> bool:
help = get_command('paddlespeech.help')
return help().execute(argv)
@cli_register(name='paddlespeech.help', description='Show help for commands.')
class HelpCommand:
def execute(self, argv: List[str]) -> bool:
msg = 'Usage:\n'
msg += ' paddlespeech <command> <options>\n\n'
msg += 'Commands:\n'
for command, detail in commands['paddlespeech'].items():
if command.startswith('_'):
continue
if '_description' not in detail:
continue
msg += ' {:<15} {}\n'.format(command,
detail['_description'])
print(msg)
return True
@cli_register(
name='paddlespeech.version',
description='Show version and commit id of current package.')
class VersionCommand:
def execute(self, argv: List[str]) -> bool:
try:
from .. import __version__
version = __version__
except ImportError:
version = 'Not an official release'
try:
from .. import __commit__
commit_id = __commit__
except ImportError:
commit_id = 'Not found'
msg = 'Package Version:\n'
msg += ' {}\n\n'.format(version)
msg += 'Commit ID:\n'
msg += ' {}\n\n'.format(commit_id)
print(msg)
return True
# Dynamic import when running specific command
_commands = {
'asr': ['Speech to text infer command.', 'ASRExecutor'],
'cls': ['Audio classification infer command.', 'CLSExecutor'],
'st': ['Speech translation infer command.', 'STExecutor'],
'text': ['Text command.', 'TextExecutor'],
'tts': ['Text to Speech infer command.', 'TTSExecutor'],
'vector': ['Speech to vector embedding infer command.', 'VectorExecutor'],
}
for com, info in _commands.items():
explicit_command_register(
name='paddlespeech.{}'.format(com),
description=info[0],
cls='paddlespeech.cli.{}.{}'.format(com, info[1]))
| 31.765957
| 78
| 0.626256
|
2508a1b9fb6ce788f0b7b5e240da62b33e0fee76
| 52,639
|
py
|
Python
|
mergify_engine/tests/functional/test_engine.py
|
eladb/mergify-engine
|
ca2eb9926be29d38feaf029caaa63cbb4c74659e
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/tests/functional/test_engine.py
|
eladb/mergify-engine
|
ca2eb9926be29d38feaf029caaa63cbb4c74659e
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/tests/functional/test_engine.py
|
eladb/mergify-engine
|
ca2eb9926be29d38feaf029caaa63cbb4c74659e
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2018 Mehdi Abaakouk <sileht@sileht.net>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os.path
import time
from unittest import mock
import yaml
from mergify_engine import check_api
from mergify_engine import config
from mergify_engine import context
from mergify_engine import debug
from mergify_engine import engine
from mergify_engine import tasks
from mergify_engine.clients import github
from mergify_engine.tests.functional import base
LOG = logging.getLogger(__name__)
def run_smart_strict_workflow_periodic_task():
tasks.smart_strict_workflow_periodic_task.apply_async()
class TestEngineV2Scenario(base.FunctionalTestBase):
"""Mergify engine tests.
Tests user github resource and are slow, so we must reduce the number
of scenario as much as possible for now.
"""
def setUp(self):
with open(engine.mergify_rule_path, "r") as f:
engine.MERGIFY_RULE = yaml.safe_load(
f.read().replace("mergify[bot]", "mergify-test[bot]")
)
super(TestEngineV2Scenario, self).setUp()
def test_invalid_configuration(self):
rules = {
"pull_request_rules": [{"name": "foobar", "wrong key": 123,},],
}
self.setup_repo(yaml.dump(rules))
p, _ = self.create_pr()
ctxt = context.Context(self.cli_integration, p.raw_data, {})
checks = ctxt.pull_engine_check_runs
assert len(checks) == 1
check = checks[0]
assert check["output"]["title"] == "The Mergify configuration is invalid"
assert check["output"]["summary"] == (
"* extra keys not allowed @ data['pull_request_rules'][0]['wrong key']\n"
"* required key not provided @ data['pull_request_rules'][0]['actions']\n"
"* required key not provided @ data['pull_request_rules'][0]['conditions']"
)
def test_invalid_yaml_configuration(self):
self.setup_repo("- this is totally invalid yaml\\n\n - *\n*")
p, _ = self.create_pr()
ctxt = context.Context(self.cli_integration, p.raw_data, {})
checks = ctxt.pull_engine_check_runs
assert len(checks) == 1
check = checks[0]
assert check["output"]["title"] == "The Mergify configuration is invalid"
# Use startswith because the message has some weird \x00 char
assert check["output"]["summary"].startswith(
"""Invalid YAML at [line 3, column 2]
```
while scanning an alias
in "<byte string>", line 3, column 1:
*
^
expected alphabetic or numeric character, but found"""
)
check_id = check["id"]
annotations = list(
ctxt.client.items(
f"check-runs/{check_id}/annotations", api_version="antiope",
)
)
assert annotations == [
{
"path": ".mergify.yml",
"blob_href": mock.ANY,
"start_line": 3,
"start_column": 2,
"end_line": 3,
"end_column": 2,
"annotation_level": "failure",
"title": "Invalid YAML",
"message": mock.ANY,
"raw_details": None,
}
]
def test_invalid_new_configuration(self):
rules = {
"pull_request_rules": [
{
"name": "foobar",
"conditions": ["branch=master"],
"actions": {"comment": {"message": "hello"},},
},
],
}
self.setup_repo(yaml.dump(rules))
p, _ = self.create_pr(files={".mergify.yml": "not valid"})
ctxt = context.Context(self.cli_integration, p.raw_data, {})
checks = ctxt.pull_engine_check_runs
assert len(checks) == 1
check = checks[0]
assert check["output"]["title"] == "The new Mergify configuration is invalid"
assert check["output"]["summary"] == "expected a dictionary"
def test_backport_cancelled(self):
stable_branch = self.get_full_branch_name("stable/3.1")
rules = {
"pull_request_rules": [
{
"name": "backport",
"conditions": [
f"base={self.master_branch_name}",
"label=backport-3.1",
],
"actions": {"backport": {"branches": [stable_branch]}},
}
]
}
self.setup_repo(yaml.dump(rules), test_branches=[stable_branch])
p, _ = self.create_pr()
self.add_label(p, "backport-3.1")
p.remove_from_labels("backport-3.1")
self.wait_for("pull_request", {"action": "unlabeled"})
ctxt = context.Context(self.cli_integration, p.raw_data, {})
checks = list(
c
for c in ctxt.pull_engine_check_runs
if c["name"] == "Rule: backport (backport)"
)
self.assertEqual("neutral", checks[0]["conclusion"])
self.assertEqual(
"The rule doesn't match anymore, this action has been cancelled",
checks[0]["output"]["title"],
)
def test_request_reviews_users(self):
rules = {
"pull_request_rules": [
{
"name": "request_reviews",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"request_reviews": {"users": ["mergify-test1"]}},
}
]
}
self.setup_repo(yaml.dump(rules))
p, _ = self.create_pr()
pulls = list(self.r_o_admin.get_pulls(base=self.master_branch_name))
self.assertEqual(1, len(pulls))
requests = pulls[0].get_review_requests()
self.assertEqual(
sorted(["mergify-test1"]), sorted([user.login for user in requests[0]])
)
def test_request_reviews_teams(self):
# Add a team to the repo with write permissions so it can review
team = list(self.o_admin.get_teams())[0]
team.set_repo_permission(self.r_o_admin, "push")
rules = {
"pull_request_rules": [
{
"name": "request_reviews",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"request_reviews": {"teams": [team.slug]}},
}
]
}
self.setup_repo(yaml.dump(rules))
p, _ = self.create_pr()
pulls = list(self.r_o_admin.get_pulls(base=self.master_branch_name))
self.assertEqual(1, len(pulls))
requests = pulls[0].get_review_requests()
self.assertEqual(
sorted([team.slug]), sorted([team.slug for team in requests[1]])
)
def test_debugger(self):
rules = {
"pull_request_rules": [
{
"name": "comment",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"comment": {"message": "WTF?"}},
}
]
}
self.setup_repo(yaml.dump(rules))
p, _ = self.create_pr()
debug.report(p.html_url)
def test_backport_no_branch(self):
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [
f"base={self.master_branch_name}",
"label=backport-#3.1",
],
"actions": {"merge": {"method": "merge", "rebase_fallback": None}},
},
{
"name": "Backport",
"conditions": [
f"base={self.master_branch_name}",
"label=backport-#3.1",
],
"actions": {"backport": {"branches": ["crashme"]}},
},
]
}
self.setup_repo(yaml.dump(rules), test_branches=[])
p, commits = self.create_pr(two_commits=True)
self.add_label(p, "backport-#3.1")
self.wait_for("pull_request", {"action": "closed"})
ctxt = context.Context(self.cli_integration, p.raw_data, {})
checks = list(
c
for c in ctxt.pull_engine_check_runs
if c["name"] == "Rule: Backport (backport)"
)
assert "failure" == checks[0]["conclusion"]
assert "No backport have been created" == checks[0]["output"]["title"]
assert (
"* Backport to branch `crashme` failed: Branch not found"
== checks[0]["output"]["summary"]
)
def _do_backport_conflicts(self, ignore_conflicts):
stable_branch = self.get_full_branch_name("stable/#3.1")
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [
f"base={self.master_branch_name}",
"label=backport-#3.1",
],
"actions": {"merge": {"method": "rebase"}},
},
{
"name": "Backport to stable/#3.1",
"conditions": [
f"base={self.master_branch_name}",
"label=backport-#3.1",
],
"actions": {
"backport": {
"branches": [stable_branch],
"ignore_conflicts": ignore_conflicts,
}
},
},
]
}
self.setup_repo(yaml.dump(rules), test_branches=[stable_branch])
# Commit something in stable
self.git("checkout", "--quiet", stable_branch)
# Write in the file that create_pr will create in master
with open(os.path.join(self.git.tmp, "conflicts"), "wb") as f:
f.write(b"conflicts incoming")
self.git("add", "conflicts")
self.git("commit", "--no-edit", "-m", "add conflict")
self.git("push", "--quiet", "main", stable_branch)
p, commits = self.create_pr(files={"conflicts": "ohoh"})
self.add_label(p, "backport-#3.1")
self.wait_for("pull_request", {"action": "closed"})
ctxt = context.Context(self.cli_integration, p.raw_data, {})
return (
p,
list(
c
for c in ctxt.pull_engine_check_runs
if c["name"] == "Rule: Backport to stable/#3.1 (backport)"
),
)
def test_backport_conflicts(self):
stable_branch = self.get_full_branch_name("stable/#3.1")
p, checks = self._do_backport_conflicts(False)
# Retrieve the new commit id that has been be cherry-picked
self.git("fetch", "main")
commit_id = (
self.git("show-ref", "--hash", f"main/{self.master_branch_name}")
.decode("utf-8")
.strip()
)
assert "failure" == checks[0]["conclusion"]
assert "No backport have been created" == checks[0]["output"]["title"]
assert (
f"""* Backport to branch `{stable_branch}` failed
Cherry-pick of {commit_id} has failed:
```
On branch mergify/bp/{stable_branch}/pr-{p.number}
Your branch is up to date with 'origin/{stable_branch}'.
You are currently cherry-picking commit {commit_id[:7]}.
(fix conflicts and run "git cherry-pick --continue")
(use "git cherry-pick --skip" to skip this patch)
(use "git cherry-pick --abort" to cancel the cherry-pick operation)
Unmerged paths:
(use "git add <file>..." to mark resolution)
both added: conflicts
no changes added to commit (use "git add" and/or "git commit -a")
```
"""
== checks[0]["output"]["summary"]
)
def test_backport_ignore_conflicts(self):
stable_branch = self.get_full_branch_name("stable/#3.1")
p, checks = self._do_backport_conflicts(True)
pull = list(self.r_o_admin.get_pulls(base=stable_branch))[0]
assert "success" == checks[0]["conclusion"]
assert "Backports have been created" == checks[0]["output"]["title"]
assert (
f"* [#%d %s](%s) has been created for branch `{stable_branch}`"
% (pull.number, pull.title, pull.html_url,)
== checks[0]["output"]["summary"]
)
assert [label.name for label in pull.labels] == ["conflicts"]
def _do_test_backport(self, method, config=None):
stable_branch = self.get_full_branch_name("stable/#3.1")
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [
f"base={self.master_branch_name}",
"label=backport-#3.1",
],
"actions": {"merge": {"method": method, "rebase_fallback": None}},
},
{
"name": "Backport to stable/#3.1",
"conditions": [
f"base={self.master_branch_name}",
"label=backport-#3.1",
],
"actions": {"backport": config or {"branches": [stable_branch]}},
},
]
}
self.setup_repo(yaml.dump(rules), test_branches=[stable_branch])
p, commits = self.create_pr(two_commits=True)
# Create another PR to be sure we don't mess things up
# see https://github.com/Mergifyio/mergify-engine/issues/849
self.create_pr(base=stable_branch)
self.add_label(p, "backport-#3.1")
self.wait_for("pull_request", {"action": "closed"})
pulls = list(
self.r_o_admin.get_pulls(state="all", base=self.master_branch_name)
)
self.assertEqual(1, len(pulls))
self.assertEqual(True, pulls[0].merged)
self.assertEqual("closed", pulls[0].state)
pulls = list(self.r_o_admin.get_pulls(state="all", base=stable_branch))
self.assertEqual(2, len(pulls))
self.assertEqual(False, pulls[0].merged)
self.assertEqual(False, pulls[1].merged)
bp_pull = pulls[0]
assert bp_pull.title == f"Pull request n1 from fork (bp #{p.number})"
ctxt = context.Context(self.cli_integration, p.raw_data, {})
checks = list(
c
for c in ctxt.pull_engine_check_runs
if c["name"] == "Rule: Backport to stable/#3.1 (backport)"
)
assert "success" == checks[0]["conclusion"]
assert "Backports have been created" == checks[0]["output"]["title"]
assert (
f"* [#%d %s](%s) has been created for branch `{stable_branch}`"
% (bp_pull.number, bp_pull.title, bp_pull.html_url,)
== checks[0]["output"]["summary"]
)
self.assertEqual(
[f"mergify/bp/{stable_branch}/pr-{p.number}"],
[
b.name
for b in self.r_o_admin.get_branches()
if b.name.startswith("mergify/bp")
],
)
return pulls[0]
def test_backport_merge_commit(self):
p = self._do_test_backport("merge")
self.assertEquals(2, p.commits)
def test_backport_merge_commit_regexes(self):
prefix = self.get_full_branch_name("stable")
p = self._do_test_backport("merge", config={"regexes": [f"^{prefix}/.*$"]})
self.assertEquals(2, p.commits)
def test_backport_squash_and_merge(self):
p = self._do_test_backport("squash")
self.assertEquals(1, p.commits)
def test_backport_rebase_and_merge(self):
p = self._do_test_backport("rebase")
self.assertEquals(2, p.commits)
def test_merge_squash(self):
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [f"base={self.master_branch_name}", "label=squash"],
"actions": {"merge": {"method": "squash"}},
},
]
}
self.setup_repo(yaml.dump(rules))
p1, _ = self.create_pr(files={"foo": "bar"})
p2, _ = self.create_pr(two_commits=True)
p1.merge()
self.add_label(p2, "squash")
self.wait_for("pull_request", {"action": "closed"})
p2.update()
self.assertEqual(2, p2.commits)
self.assertEqual(True, p2.merged)
def test_merge_strict_squash(self):
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [f"base={self.master_branch_name}", "label=squash"],
"actions": {"merge": {"strict": "smart", "method": "squash"}},
},
]
}
self.setup_repo(yaml.dump(rules))
p1, _ = self.create_pr(files={"foo": "bar"})
p2, _ = self.create_pr(two_commits=True)
p1.merge()
self.add_label(p2, "squash")
run_smart_strict_workflow_periodic_task()
self.wait_for("pull_request", {"action": "closed"})
p2.update()
self.assertEqual(3, p2.commits)
self.assertEqual(True, p2.merged)
def test_merge_strict_rebase(self):
rules = {
"pull_request_rules": [
{
"name": "smart strict merge on master",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
"#approved-reviews-by>=1",
],
"actions": {"merge": {"strict": True, "strict_method": "rebase"}},
}
]
}
stable_branch = self.get_full_branch_name("stable/3.1")
self.setup_repo(yaml.dump(rules), test_branches=[stable_branch])
p, _ = self.create_pr()
p2, commits = self.create_pr()
p.merge()
self.wait_for("pull_request", {"action": "closed"}),
previous_master_sha = self.r_o_admin.get_commits()[0].sha
self.create_status(p2)
self.create_review(p2, commits[0])
self.wait_for("pull_request", {"action": "synchronize"})
p2 = self.r_o_admin.get_pull(p2.number)
commits2 = list(p2.get_commits())
self.assertEquals(1, len(commits2))
self.assertNotEqual(commits[0].sha, commits2[0].sha)
self.assertEqual(commits[0].commit.message, commits2[0].commit.message)
# Retry to merge pr2
self.create_status(p2)
self.wait_for("pull_request", {"action": "closed"})
master_sha = self.r_o_admin.get_commits()[0].sha
self.assertNotEqual(previous_master_sha, master_sha)
pulls = list(self.r_o_admin.get_pulls(base=self.master_branch_name))
self.assertEqual(0, len(pulls))
def test_merge_strict_default(self):
rules = {
"pull_request_rules": [
{
"name": "smart strict merge on master",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
"#approved-reviews-by>=1",
],
"actions": {"merge": {"strict": True}},
}
]
}
stable_branch = self.get_full_branch_name("stable/3.1")
self.setup_repo(yaml.dump(rules), test_branches=[stable_branch])
p, _ = self.create_pr()
p2, commits = self.create_pr()
p.merge()
self.wait_for("pull_request", {"action": "closed"})
previous_master_sha = self.r_o_admin.get_commits()[0].sha
self.create_status(p2)
self.create_review(p2, commits[0])
self.wait_for("pull_request", {"action": "synchronize"})
p2 = self.r_o_admin.get_pull(p2.number)
commits2 = list(p2.get_commits())
# Check master have been merged into the PR
self.assertIn(
f"Merge branch '{self.master_branch_name}' into {self.get_full_branch_name('fork/pr2')}",
commits2[-1].commit.message,
)
# Retry to merge pr2
self.create_status(p2)
self.wait_for("pull_request", {"action": "closed"})
master_sha = self.r_o_admin.get_commits()[0].sha
self.assertNotEqual(previous_master_sha, master_sha)
pulls = list(self.r_o_admin.get_pulls(base=self.master_branch_name))
self.assertEqual(0, len(pulls))
def test_merge_smart_strict(self):
rules = {
"pull_request_rules": [
{
"name": "strict merge on master",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
"#approved-reviews-by>=1",
],
"actions": {"merge": {"strict": "smart"}},
}
]
}
stable_branch = self.get_full_branch_name("stable/3.1")
self.setup_repo(yaml.dump(rules), test_branches=[stable_branch])
p, _ = self.create_pr()
p2, commits = self.create_pr()
p.merge()
self.wait_for("pull_request", {"action": "closed"})
previous_master_sha = self.r_o_admin.get_commits()[0].sha
self.create_status(p2)
self.create_review(p2, commits[0])
r = self.app.get(
"/queues/%s" % (config.INSTALLATION_ID),
headers={
"X-Hub-Signature": "sha1=whatever",
"Content-type": "application/json",
},
)
assert r.json() == {
"mergifyio-testing/%s" % self.name: {self.master_branch_name: [p2.number]}
}
# We can run celery beat inside tests, so run the task manually
run_smart_strict_workflow_periodic_task()
r = self.app.get(
"/queues/%s" % (config.INSTALLATION_ID),
headers={
"X-Hub-Signature": "sha1=whatever",
"Content-type": "application/json",
},
)
assert r.json() == {
"mergifyio-testing/%s" % self.name: {self.master_branch_name: [p2.number]}
}
self.wait_for("pull_request", {"action": "synchronize"})
p2 = self.r_o_admin.get_pull(p2.number)
commits2 = list(p2.get_commits())
# Check master have been merged into the PR
self.assertIn(
f"Merge branch '{self.master_branch_name}' into {self.get_full_branch_name('fork/pr2')}",
commits2[-1].commit.message,
)
ctxt = context.Context(self.cli_integration, p2.raw_data, {})
for check in ctxt.pull_check_runs:
if check["name"] == "Rule: strict merge on master (merge)":
assert (
"will be merged soon.\n\n"
f"The following pull requests are queued: #{p2.number}\n\n"
"The required statuses are:\n\n"
f"- [X] `base={self.master_branch_name}`\n"
"- [ ] `status-success=continuous-integration/fake-ci`\n"
"- [X] `#approved-reviews-by>=1`"
) in check["output"]["summary"]
break
else:
assert False, "Merge check not found"
# Retry to merge pr2
self.create_status(p2)
self.wait_for("pull_request", {"action": "closed"})
master_sha = self.r_o_admin.get_commits()[0].sha
self.assertNotEqual(previous_master_sha, master_sha)
pulls = list(self.r_o_admin.get_pulls(base=self.master_branch_name))
self.assertEqual(0, len(pulls))
def test_merge_failure_smart_strict(self):
rules = {
"pull_request_rules": [
{
"name": "strict merge on master",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
],
"actions": {"merge": {"strict": "smart"}},
}
]
}
stable_branch = self.get_full_branch_name("stable/3.1")
self.setup_repo(yaml.dump(rules), test_branches=[stable_branch])
p, _ = self.create_pr()
p2, commits = self.create_pr()
p3, commits = self.create_pr()
p.merge()
self.wait_for("pull_request", {"action": "closed"})
previous_master_sha = self.r_o_admin.get_commits()[0].sha
self.create_status(p2, "continuous-integration/fake-ci", "success")
# We can run celery beat inside tests, so run the task manually
run_smart_strict_workflow_periodic_task()
self.wait_for("pull_request", {"action": "synchronize"})
self.create_status(p3, "continuous-integration/fake-ci", "success")
p2 = self.r_o_admin.get_pull(p2.number)
commits2 = list(p2.get_commits())
self.assertIn(
f"Merge branch '{self.master_branch_name}' into {self.get_full_branch_name('fork/pr2')}",
commits2[-1].commit.message,
)
self.create_status(p2, "continuous-integration/fake-ci", "failure")
# FIXME(sileht): Previous actions tracker was posting a "Rule XXXX (merge)" with
# neutral status saying the Merge doesn't match anymore, the new one doesn't
# It's not a big deal as the the rule doesn't match anymore anyways.:w
self.wait_for("check_run", {"check_run": {"conclusion": "neutral"}})
# Should got to the next PR
run_smart_strict_workflow_periodic_task()
self.wait_for("pull_request", {"action": "synchronize"})
p3 = self.r_o_admin.get_pull(p3.number)
commits3 = list(p3.get_commits())
self.assertIn(
f"Merge branch '{self.master_branch_name}' into {self.get_full_branch_name('fork/pr')}",
commits3[-1].commit.message,
)
self.create_status(p3, "continuous-integration/fake-ci", "success")
self.wait_for("pull_request", {"action": "closed"})
master_sha = self.r_o_admin.get_commits()[0].sha
self.assertNotEqual(previous_master_sha, master_sha)
pulls = list(self.r_o_admin.get_pulls(base=self.master_branch_name))
self.assertEqual(1, len(pulls))
def test_short_teams(self):
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
"approved-reviews-by=@testing",
],
"actions": {"merge": {"method": "rebase"}},
}
]
}
self.setup_repo(yaml.dump(rules))
p, commits = self.create_pr()
installation = {"id": config.INSTALLATION_ID}
client = github.get_client(p.base.user.login, p.base.repo.name, installation)
pull = context.Context(client, p.raw_data, {})
logins = pull.resolve_teams(
["user", "@testing", "@unknown/team", "@invalid/team/break-here"]
)
assert sorted(logins) == sorted(
[
"user",
"@unknown/team",
"@invalid/team/break-here",
"sileht",
"jd",
"mergify-test1",
]
)
def test_teams(self):
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
"approved-reviews-by=@mergifyio-testing/testing",
],
"actions": {"merge": {"method": "rebase"}},
}
]
}
self.setup_repo(yaml.dump(rules))
p, commits = self.create_pr()
installation = {"id": config.INSTALLATION_ID}
client = github.get_client(p.base.user.login, p.base.repo.name, installation)
pull = context.Context(client, p.raw_data, {})
logins = pull.resolve_teams(
[
"user",
"@mergifyio-testing/testing",
"@unknown/team",
"@invalid/team/break-here",
]
)
assert sorted(logins) == sorted(
[
"user",
"@unknown/team",
"@invalid/team/break-here",
"jd",
"sileht",
"mergify-test1",
]
)
def _test_merge_custom_msg(
self, header, method="squash", msg=None, commit_msg=None
):
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
],
"actions": {"merge": {"method": method}},
}
]
}
self.setup_repo(yaml.dump(rules))
if msg is None:
msg = "This is the title\n\nAnd this is the message"
p, _ = self.create_pr(message=f"It fixes it\n\n## {header}{msg}")
self.create_status(p)
self.wait_for("pull_request", {"action": "closed"})
pulls = list(
self.r_o_admin.get_pulls(state="all", base=self.master_branch_name)
)
self.assertEqual(1, len(pulls))
self.assertEqual(True, pulls[0].merged)
commit = self.r_o_admin.get_commits()[0].commit
if commit_msg is None:
commit_msg = msg
assert commit_msg == commit.message
def test_merge_custom_msg(self):
return self._test_merge_custom_msg("Commit Message:\n")
def test_merge_custom_msg_case(self):
return self._test_merge_custom_msg("Commit message\n")
def test_merge_custom_msg_rn(self):
return self._test_merge_custom_msg("Commit Message\r\n")
def test_merge_custom_msg_merge(self):
return self._test_merge_custom_msg("Commit Message:\n", "merge")
def test_merge_custom_msg_template(self):
return self._test_merge_custom_msg(
"Commit Message:\n",
"merge",
msg="{{title}}\n\nThanks to {{author}}",
commit_msg="Pull request n1 from fork\n\nThanks to mergify-test2",
)
def test_merge_invalid_custom_msg(self):
rules = {
"pull_request_rules": [
{
"name": "merge",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
],
"actions": {"merge": {"method": "merge"}},
}
]
}
self.setup_repo(yaml.dump(rules))
msg = "This is the title\n\nAnd this is the message {{invalid}}"
p, _ = self.create_pr(message=f"It fixes it\n\n## Commit Message\n{msg}")
self.create_status(p)
pulls = list(
self.r_o_admin.get_pulls(state="all", base=self.master_branch_name)
)
assert 1 == len(pulls)
assert pulls[0].merged is False
ctxt = context.Context(self.cli_integration, p.raw_data, {})
checks = list(
c for c in ctxt.pull_engine_check_runs if c["name"] == "Rule: merge (merge)"
)
assert "completed" == checks[0]["status"]
assert checks[0]["conclusion"] == "action_required"
assert (
"Unknown pull request attribute: invalid" == checks[0]["output"]["summary"]
)
assert "Invalid commit message" == checks[0]["output"]["title"]
# Edit and fixes the typo
p.edit(body="It fixes it\n\n## Commit Message\n\nHere it is valid now")
self.wait_for("pull_request", {"action": "closed"})
self.wait_for(
"check_run",
{"check_run": {"conclusion": "success", "status": "completed"}},
)
# delete check run cache
del ctxt.__dict__["pull_check_runs"]
checks = list(
c for c in ctxt.pull_engine_check_runs if c["name"] == "Rule: merge (merge)"
)
assert "completed" == checks[0]["status"]
assert checks[0]["conclusion"] == "success"
pulls = list(
self.r_o_admin.get_pulls(state="all", base=self.master_branch_name)
)
assert 1 == len(pulls)
assert pulls[0].merged
def test_merge_custom_msg_title_body(self):
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
],
"actions": {
"merge": {"method": "merge", "commit_message": "title+body"}
},
}
]
}
self.setup_repo(yaml.dump(rules))
msg = "It fixes it"
p, _ = self.create_pr(message=msg)
self.create_status(p)
self.wait_for("pull_request", {"action": "closed"})
pulls = list(
self.r_o_admin.get_pulls(state="all", base=self.master_branch_name)
)
self.assertEqual(1, len(pulls))
self.assertEqual(True, pulls[0].merged)
commit = self.r_o_admin.get_commits()[0].commit
self.assertEqual(f"Pull request n1 from fork\n\n{msg}", commit.message)
def test_merge_and_closes_issues(self):
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
],
"actions": {"merge": {"method": "merge"}},
}
]
}
self.setup_repo(yaml.dump(rules))
i = self.r_o_admin.create_issue(
title="Such a bug", body="I can't explain, but don't work"
)
p, commits = self.create_pr(message="It fixes it\n\nCloses #%s" % i.number)
self.create_status(p)
self.wait_for("pull_request", {"action": "closed"})
pulls = list(
self.r_o_admin.get_pulls(state="all", base=self.master_branch_name)
)
self.assertEqual(1, len(pulls))
self.assertEqual(p.number, pulls[0].number)
self.assertEqual(True, pulls[0].merged)
self.assertEqual("closed", pulls[0].state)
issue = self.r_o_admin.get_issue(i.number)
self.assertEqual("closed", issue.state)
def test_rebase(self):
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
"#approved-reviews-by>=1",
],
"actions": {"merge": {"method": "rebase"}},
}
]
}
self.setup_repo(yaml.dump(rules))
p2, commits = self.create_pr()
self.create_status(p2)
self.create_review(p2, commits[0])
self.wait_for("pull_request", {"action": "closed"})
pulls = list(
self.r_o_admin.get_pulls(state="all", base=self.master_branch_name)
)
self.assertEqual(1, len(pulls))
self.assertEqual(True, pulls[0].merged)
self.assertEqual("closed", pulls[0].state)
def test_merge_branch_protection_ci(self):
rules = {
"pull_request_rules": [
{
"name": "merge",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"merge": {}},
}
]
}
self.setup_repo(yaml.dump(rules))
# Check policy of that branch is the expected one
rule = {
"protection": {
"required_status_checks": {
"strict": False,
"contexts": ["continuous-integration/fake-ci"],
},
"required_pull_request_reviews": None,
"restrictions": None,
"enforce_admins": False,
}
}
self.branch_protection_protect(self.master_branch_name, rule)
p, _ = self.create_pr()
self.wait_for(
"check_run", {"check_run": {"conclusion": None, "status": "in_progress"}},
)
ctxt = context.Context(self.cli_integration, p.raw_data, {})
checks = list(
c for c in ctxt.pull_engine_check_runs if c["name"] == "Rule: merge (merge)"
)
self.assertEqual(None, checks[0]["conclusion"])
self.assertEqual("in_progress", checks[0]["status"])
self.assertIn(
"Waiting for the Branch Protection to be validated",
checks[0]["output"]["title"],
)
self.create_status(p)
self.wait_for("pull_request", {"action": "closed"})
pulls = list(
self.r_o_admin.get_pulls(state="all", base=self.master_branch_name)
)
self.assertEqual(1, len(pulls))
self.assertEqual(True, pulls[0].merged)
def test_merge_branch_protection_strict(self):
rules = {
"pull_request_rules": [
{
"name": "merge",
"conditions": [
f"base={self.master_branch_name}",
"status-success=continuous-integration/fake-ci",
],
"actions": {"merge": {}},
}
]
}
self.setup_repo(yaml.dump(rules))
# Check policy of that branch is the expected one
rule = {
"protection": {
"required_status_checks": {
"strict": True,
"contexts": ["continuous-integration/fake-ci"],
},
"required_pull_request_reviews": None,
"restrictions": None,
"enforce_admins": False,
}
}
p1, _ = self.create_pr()
p2, _ = self.create_pr()
p1.merge()
self.branch_protection_protect(self.master_branch_name, rule)
self.wait_for("pull_request", {"action": "closed"})
self.create_status(p2)
self.wait_for("check_run", {"check_run": {"conclusion": "failure"}})
ctxt = context.Context(self.cli_integration, p2.raw_data, {})
checks = list(
c for c in ctxt.pull_engine_check_runs if c["name"] == "Rule: merge (merge)"
)
self.assertEqual("failure", checks[0]["conclusion"])
self.assertIn(
"Branch protection setting 'strict' conflicts with "
"Mergify configuration",
checks[0]["output"]["title"],
)
def _init_test_refresh(self):
rules = {
"pull_request_rules": [
{
"name": "nothing",
"conditions": [f"base!={self.master_branch_name}"],
"actions": {"merge": {}},
}
]
}
self.setup_repo(yaml.dump(rules))
p1, commits1 = self.create_pr()
p2, commits2 = self.create_pr()
rules = {
"pull_request_rules": [
{
"name": "automerge",
"conditions": ["label!=wip"],
"actions": {"merge": {}},
}
]
}
self.git("checkout", self.master_branch_name)
with open(self.git.tmp + "/.mergify.yml", "w") as f:
f.write(yaml.dump(rules))
self.git("add", ".mergify.yml")
self.git("commit", "--no-edit", "-m", "automerge everything")
self.git("push", "--quiet", "main", self.master_branch_name)
pulls = list(self.r_o_admin.get_pulls(base=self.master_branch_name))
self.assertEqual(2, len(pulls))
return p1, p2
def test_refresh_pull(self):
p1, p2 = self._init_test_refresh()
self.app.post(
"/refresh/%s/pull/%s" % (p1.base.repo.full_name, p1.number),
headers={"X-Hub-Signature": "sha1=" + base.FAKE_HMAC},
)
self.app.post(
"/refresh/%s/pull/%s" % (p2.base.repo.full_name, p2.number),
headers={"X-Hub-Signature": "sha1=" + base.FAKE_HMAC},
)
self.wait_for("pull_request", {"action": "closed"})
self.wait_for("pull_request", {"action": "closed"})
pulls = list(self.r_o_admin.get_pulls(base=self.master_branch_name))
self.assertEqual(0, len(pulls))
def test_command_refresh(self):
rules = {
"pull_request_rules": [
{
"name": "nothing",
"conditions": [f"base!={self.master_branch_name}"],
"actions": {"merge": {}},
}
]
}
self.setup_repo(yaml.dump(rules))
p, commits = self.create_pr()
ctxt = context.Context(self.cli_integration, p.raw_data, {})
check_api.set_check_run(
ctxt,
"Summary",
"completed",
"success",
output={"title": "whatever", "summary": "erased"},
)
assert len(ctxt.pull_check_runs) == 1
assert ctxt.pull_check_runs[0]["name"] == "Summary"
completed_at = ctxt.pull_check_runs[0]["completed_at"]
p.create_issue_comment("@mergifyio refresh")
self.wait_for("issue_comment", {"action": "created"})
del ctxt.__dict__["pull_check_runs"]
assert len(ctxt.pull_check_runs) == 1
assert ctxt.pull_check_runs[0]["name"] == "Summary"
assert completed_at != ctxt.pull_check_runs[0]["completed_at"]
p.update()
comments = list(p.get_issue_comments())
self.assertEqual("**Command `refresh`: success**", comments[-1].body)
def test_refresh_branch(self):
p1, p2 = self._init_test_refresh()
self.app.post(
"/refresh/%s/branch/master" % (p1.base.repo.full_name),
headers={"X-Hub-Signature": "sha1=" + base.FAKE_HMAC},
)
self.wait_for("pull_request", {"action": "closed"})
self.wait_for("pull_request", {"action": "closed"})
pulls = list(self.r_o_admin.get_pulls(base=self.master_branch_name))
self.assertEqual(0, len(pulls))
def test_refresh_repo(self):
p1, p2 = self._init_test_refresh()
self.app.post(
"/refresh/%s" % (p1.base.repo.full_name),
headers={"X-Hub-Signature": "sha1=" + base.FAKE_HMAC},
)
self.wait_for("pull_request", {"action": "closed"})
self.wait_for("pull_request", {"action": "closed"})
pulls = list(self.r_o_admin.get_pulls(base=self.master_branch_name))
self.assertEqual(0, len(pulls))
def test_change_mergify_yml(self):
rules = {
"pull_request_rules": [
{
"name": "nothing",
"conditions": [f"base!={self.master_branch_name}"],
"actions": {"merge": {}},
}
]
}
self.setup_repo(yaml.dump(rules))
rules["pull_request_rules"].append(
{"name": "foobar", "conditions": ["label!=wip"], "actions": {"merge": {}}}
)
p1, commits1 = self.create_pr(files={".mergify.yml": yaml.dump(rules)})
ctxt = context.Context(self.cli_integration, p1.raw_data, {})
assert len(ctxt.pull_check_runs) == 1
assert ctxt.pull_check_runs[0]["name"] == "Summary"
def test_marketplace_event(self):
with mock.patch(
"mergify_engine.branch_updater.sub_utils.get_subscription"
) as get_sub:
get_sub.return_value = self.subscription
r = self.app.post(
"/marketplace",
headers={
"X-Hub-Signature": "sha1=whatever",
"Content-type": "application/json",
},
json={
"sender": {"login": "jd"},
"marketplace_purchase": {
"account": {
"login": "mergifyio-testing",
"type": "Organization",
}
},
},
)
assert r.content == b"Event queued"
assert r.status_code == 202
def test_refresh_on_conflict(self):
rules = {
"pull_request_rules": [
{
"name": "comment-on-conflict",
"conditions": ["conflict"],
"actions": {"comment": {"message": "It conflict!"}},
}
]
}
self.setup_repo(yaml.dump(rules), files={"TESTING": "foobar"})
p1, _ = self.create_pr(files={"TESTING": "p1"})
p2, _ = self.create_pr(files={"TESTING": "p2"})
p1.merge()
# Since we use celery eager system for testing, countdown= are ignored.
# Wait a bit than Github refresh the mergeable_state before running the
# engine
if base.RECORD:
time.sleep(10)
self.wait_for("pull_request", {"action": "closed"})
self.wait_for(
"issue_comment", {"action": "created", "comment": {"body": "It conflict!"}},
)
def test_command_update(self):
rules = {
"pull_request_rules": [
{
"name": "auto-rebase-on-conflict",
"conditions": ["conflict"],
"actions": {"comment": {"message": "nothing"}},
}
]
}
self.setup_repo(yaml.dump(rules), files={"TESTING": "foobar"})
p1, _ = self.create_pr(files={"TESTING2": "foobar"})
p2, _ = self.create_pr(files={"TESTING3": "foobar"})
p1.merge()
self.wait_for("pull_request", {"action": "closed"})
self.create_message(p2, "@mergifyio update")
oldsha = p2.head.sha
p2.update()
assert p2.commits == 2
assert oldsha != p2.head.sha
def test_command_rebase_ok(self):
rules = {
"pull_request_rules": [
{
"name": "auto-rebase-on-label",
"conditions": ["label=rebase"],
"actions": {"comment": {"message": "@mergifyio rebase it please"}},
}
]
}
self.setup_repo(yaml.dump(rules), files={"TESTING": "foobar\n"})
p1, _ = self.create_pr(files={"TESTING": "foobar\n\n\np1"})
p2, _ = self.create_pr(files={"TESTING": "p2\n\nfoobar\n"})
p1.merge()
self.add_label(p2, "rebase")
self.wait_for("pull_request", {"action": "synchronize"})
oldsha = p2.head.sha
p2.merge()
p2.update()
assert oldsha != p2.head.sha
f = p2.base.repo.get_contents("TESTING")
assert f.decoded_content == b"p2\n\nfoobar\n\n\np1"
def test_requested_reviews(self):
team = list(self.o_admin.get_teams())[0]
team.set_repo_permission(self.r_o_admin, "push")
rules = {
"pull_request_rules": [
{
"name": "user",
"conditions": [
f"base={self.master_branch_name}",
"review-requested=sileht",
],
"actions": {"comment": {"message": "review-requested user"}},
},
{
"name": "team",
"conditions": [
f"base={self.master_branch_name}",
"review-requested=@testing",
],
"actions": {"comment": {"message": "review-requested team"}},
},
],
}
self.setup_repo(yaml.dump(rules))
p1, _ = self.create_pr()
p1.create_review_request(reviewers=["sileht"])
self.wait_for("pull_request", {"action": "review_requested"})
self.wait_for("issue_comment", {"action": "created"})
p2, _ = self.create_pr()
p2.create_review_request(team_reviewers=[team.slug])
self.wait_for("pull_request", {"action": "review_requested"})
self.wait_for("issue_comment", {"action": "created"})
self.assertEqual("review-requested user", list(p1.get_issue_comments())[0].body)
self.assertEqual("review-requested team", list(p2.get_issue_comments())[0].body)
def test_command_backport(self):
stable_branch = self.get_full_branch_name("stable/#3.1")
feature_branch = self.get_full_branch_name("feature/one")
rules = {
"pull_request_rules": [
{
"name": "auto-backport",
"conditions": [f"base={self.master_branch_name}"],
"actions": {
"comment": {
"message": f"@mergifyio backport {stable_branch} {feature_branch}"
}
},
}
]
}
self.setup_repo(yaml.dump(rules), test_branches=[stable_branch, feature_branch])
p, _ = self.create_pr()
self.wait_for("issue_comment", {"action": "created"})
p.merge()
self.wait_for("pull_request", {"action": "closed"})
self.wait_for("issue_comment", {"action": "created"})
pulls = list(self.r_o_admin.get_pulls(state="all", base=stable_branch))
self.assertEqual(1, len(pulls))
pulls = list(self.r_o_admin.get_pulls(state="all", base=feature_branch))
self.assertEqual(1, len(pulls))
def test_truncated_check_output(self):
# not used anyhow
rules = {
"pull_request_rules": [{"name": "noop", "conditions": [], "actions": {}}]
}
self.setup_repo(yaml.dump(rules))
pr, commits = self.create_pr()
pull = context.Context(self.cli_integration, pr.raw_data, {})
check = check_api.set_check_run(
pull,
"Test",
"completed",
"success",
{"summary": "a" * 70000, "title": "bla"},
)
assert check["output"]["summary"] == ("a" * 65532 + "…")
def test_pull_request_complete(self):
rules = {
"pull_request_rules": [{"name": "noop", "conditions": [], "actions": {}}]
}
self.setup_repo(yaml.dump(rules))
p, _ = self.create_pr()
installation = {"id": config.INSTALLATION_ID}
client = github.get_client(p.base.user.login, p.base.repo.name, installation)
ctxt = context.Context(client, {"number": p.number}, {})
self.assertEqual(p.number, ctxt.pull["number"])
self.assertEqual("open", ctxt.pull["state"])
self.assertEqual("clean", ctxt.pull["mergeable_state"])
| 34.676548
| 101
| 0.525998
|
27ba746ab024eac4b16cb4d67d5d79f3eae74167
| 6,527
|
py
|
Python
|
views.py
|
dirkbo/django_factory
|
252adf3ed46dc9f230210f53632fb7c767814475
|
[
"Apache-2.0"
] | 1
|
2015-12-19T11:42:48.000Z
|
2015-12-19T11:42:48.000Z
|
views.py
|
dirkbo/django_factory
|
252adf3ed46dc9f230210f53632fb7c767814475
|
[
"Apache-2.0"
] | null | null | null |
views.py
|
dirkbo/django_factory
|
252adf3ed46dc9f230210f53632fb7c767814475
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import logging
import csv
import json
# Django imports
from django.http import HttpResponseBadRequest, HttpResponse, JsonResponse
from django.shortcuts import render
from django.conf import settings
from django.utils import timezone
from django.views.decorators.gzip import gzip_page
from django.core.cache import cache
from django.template.loader import get_template, render_to_string
from hashlib import sha224
from datetime import datetime, timedelta
# Get an instance of a logger
from django.views.generic import TemplateView
logger = logging.getLogger(__name__)
def factory_render(request, template, context, verbose=False):
output = context.get('output', 'html')
if output == 'html':
return render(request, template, context)
if output == 'json':
json_data = dict()
logger.info(context)
for key in context:
value = context[key]
logger.debug("K: %s V: %s" % (key, value))
try:
json_data[key] = json.dumps(value)
except TypeError:
try:
if value.id:
nv = dict()
nv['class'] = value.__class__.__name__
nv['id'] = value.id
except AttributeError:
try:
st = str(value)
json_data[key] = st
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
logger.info(message)
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
logger.warn(message)
return JsonResponse(json_data)
if output == "csv":
fn = template.replace('.html', '')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % fn
writer = csv.writer(response)
for key in context:
value = context[key]
writer.writerow([key, value])
return response
def render_asset(template, request, content_type="text/plain",
force_shrink=True):
language = getattr(request, 'LANGUAGE_CODE', "")
key = "asset-%s-%s" % (template, language)
resp = cache.get(key, False)
debug = settings.DEBUG
if resp is False:
try:
context = dict()
context['hide_log'] = not getattr(settings, 'DEBUG', False)
logger.info("Hide js log: %s", context['hide_log'])
resp = render_to_string(template_name=template, request=request, context=context)
except Exception as e:
if debug:
raise e
else:
logger.warning(e)
return HttpResponseBadRequest("bad Request: %s" % template)
if not debug or force_shrink is True:
try:
if content_type.endswith("javascript"):
logger.info("js minified")
from jsmin import jsmin
resp = jsmin(resp)
else:
logger.info("content Type: %s", content_type)
except Exception as e:
logger.warning("error shrinking js: %s", e)
try:
if content_type.endswith("css"):
from cssmin import cssmin
resp = cssmin(resp)
except Exception as e:
logger.warning("error shrinking css: %s", e)
resp = HttpResponse(resp)
etag = sha224(resp.content).hexdigest()
resp['Content-Type'] = content_type
resp['etag'] = etag
now = timezone.now()
modified = now.astimezone(timezone.utc).strftime('%H:%M:%S-%a/%d/%b/%Y')
resp['Last-Modified'] = modified
cache_time_seconds = 60 * 60
if debug:
cache_time_seconds = 60
resp['Cache-Control'] = 'public, max-age={}'.format(cache_time_seconds)
expires = now.astimezone(timezone.utc) + timedelta(seconds=cache_time_seconds)
resp['Expires'] = expires.strftime('%H:%M:%S-%a/%d/%b/%Y')
cache.set(key, resp, cache_time_seconds)
logger.info("caching: %s" % template)
return resp
@gzip_page
def js_file(request, path):
return render_asset('js/%s.js.html' % path, request,
content_type="text/javascript")
@gzip_page
def css_file(request, path):
return render_asset('css/%s.css.html' % path, request,
content_type="text/css")
@gzip_page
def manifest(request):
# manifest.json
# include path in urls:
#
# path('manifest.json', manifest, name='html5_manifest'),
#
# Make sure path is on highest level, directly under /
import json
language = getattr(request, 'LANGUAGE_CODE', "en").lower()
context = dict()
context["manifest_version"] = 2
pwa_settings = getattr(settings, 'PWA_SETTINGS', {})
pwa_settings_texts = pwa_settings.get('texts', {})
pwa_settings_text = pwa_settings_texts.get(language, {})
context["name"] = pwa_settings_text.get("long_name", "A Blog")
context["short_name"] = pwa_settings_text.get("short_name", "Blog")
context["description"] = pwa_settings_text.get("description", "Description")
# Get language specific texts
context["start_url"] = pwa_settings.get('start_url', '/')
context["display"] = pwa_settings.get('display', '')
context["orientation"] = pwa_settings.get('orientation', '')
context["background_color"] = pwa_settings.get("background_color", "#512498")
context["theme_color"] = pwa_settings.get("theme_color", "#512498")
context["icons"] = pwa_settings.get("icons", [])
context["permissions"] = pwa_settings.get("permissions", [])
return HttpResponse(json.dumps(context), content_type="text/json")
@gzip_page
def pwa_serviceworker(request):
# Service worker view
# include url
# path('pwa-serviceworker.js', pwa_serviceworker, name='pwa_serviceworker'),
# Make sure url is directly under /, can only access same level or deeper.
template = get_template('js/service_worker.js.html')
html = template.render()
return HttpResponse(html, content_type="application/x-javascript")
| 35.862637
| 93
| 0.598131
|
86a9fc011d9780283a9981df4728c37b64d0461d
| 3,859
|
py
|
Python
|
nlqa.py
|
johl/nlqa
|
6ed3f753523664caab2830775e34c24e1ba30766
|
[
"MIT"
] | 4
|
2018-06-09T20:45:33.000Z
|
2019-12-10T15:42:43.000Z
|
nlqa.py
|
johl/nlqa
|
6ed3f753523664caab2830775e34c24e1ba30766
|
[
"MIT"
] | 1
|
2018-06-09T20:47:29.000Z
|
2018-06-09T20:57:30.000Z
|
nlqa.py
|
johl/nlqa
|
6ed3f753523664caab2830775e34c24e1ba30766
|
[
"MIT"
] | null | null | null |
import spacy
import requests
import json
def extract_entities(question, language="en"):
"""
Extract item and property out of question.
Process question. We assume the token with part of speech marker 'NOUN'
to be the property and token with pos marker 'PROPN' to be the item of
the query.
Parameters:
question (str): natural language question about a property of an item
language (str): (optional) language code for propcessing (default "en").
Returns:
entities: dictionary with ['property']['lemma'] and ['item']['lemma'] set
to extracted item and property.
"""
nlp = spacy.load(language)
doc = nlp(question)
entities = {}
entities['property'] = {}
entities['item'] = {}
for token in doc:
if (token.pos_ == 'NOUN'):
entities['property']['lemma'] = token.lemma_
if (token.pos_ == 'PROPN'):
entities['item']['lemma'] = token.lemma_
return entities
def search_entities(entities, language="en"):
"""
Search for item ID and property ID on Wikidata.
Take entities dictionary, search on Wikidata for the lemmas of the item
and the property through an API call to wbsearchentities.
Parameters:
entities (dict): dictionary with ['property']['lemma']
and ['item']['lemma'] set
language (str): (optional) language code for search (default "en").
Returns:
entities: dictionary with ['property']['id'] and ['item']['id'] set
"""
endpoint = "https://www.wikidata.org/w/api.php"
params = "?language=" + language + "&format=json"
action = "&action=wbsearchentities&search="
itemsearch = requests.get(endpoint + params + action +
entities['item']['lemma'])
item = json.loads(itemsearch.text)['search'][0]['id']
propertysearch = requests.get(endpoint + params + action +
entities['property']['lemma'] +
'&type=property')
property = json.loads(propertysearch.text)['search'][0]['id']
entities['item']['id'] = item
entities['property']['id'] = property
return entities
def query_sparql(entities, language="en"):
"""
Perform SPARQL query for item and property.
Build SPARQL query with item ID and property ID from enitities dictionary
and run it on SPARQL endpoint at Wikidata.
Parameters:
entities (dict): dictionary with ['property']['id']
and ['item']['id'] set
language (str): (optional) language code for query (default "en").
Returns:
answer: String with label of the result of the query
"""
item = entities['item']['id']
property = entities['property']['id']
endpoint = "https://query.wikidata.org/bigdata/namespace/wdq/sparql"
sparql = "SELECT ?answerLabel WHERE {\n"
sparql = sparql + "wd:{} wdt:{} ?answer.".format(item, property)
sparql = sparql + "\nSERVICE wikibase:label "
sparql = sparql + '{bd:serviceParam wikibase:language "'
sparql = sparql + language + '".}\n}'
query = requests.get(endpoint + "?query=" + sparql + "&format=json")
answer = json.loads(query.text)
answer = answer['results']['bindings'][0]['answerLabel']['value']
return answer
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--question', help='question to Wikidata')
parser.add_argument('--language', help='language code of the question')
args = parser.parse_args()
if (args.question is None or args.language is None):
parser.print_help()
else:
print(query_sparql(
search_entities(
extract_entities(args.question, language=args.language),
language=args.language),
language=args.language))
| 35.081818
| 77
| 0.623996
|
4c19bbab219baf866cb91348a551bdfaedff0ff7
| 1,158
|
py
|
Python
|
Lib/distutils/tests/test_bdist_wininst.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
Lib/distutils/tests/test_bdist_wininst.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
Lib/distutils/tests/test_bdist_wininst.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
"""Tests for distutils.command.bdist_wininst."""
import unittest
from test.support import run_unittest
from distutils.command.bdist_wininst import bdist_wininst
from distutils.tests import support
@unittest.skipIf(getattr(bdist_wininst, '_unsupported', False),
'bdist_wininst is not supported in this install')
class BuildWinInstTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_get_exe_bytes(self):
# issue5731: command was broken on non-windows platforms
# this test makes sure it works now for every platform
# let's create a command
pkg_pth, dist = self.create_dist()
cmd = bdist_wininst(dist)
cmd.ensure_finalized()
# let's run the code that finds the right wininst*.exe file
# and make sure it finds it and returns its content
# no matter what platform we have
exe_file = cmd.get_exe_bytes()
self.assertGreater(len(exe_file), 10)
def test_suite():
return unittest.makeSuite(BuildWinInstTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| 34.058824
| 67
| 0.690846
|
23ebe2e7f0d2764d31c846c4fe4c8e56c28a4c83
| 11,145
|
py
|
Python
|
kaggle_runner/logs.py
|
pennz/kaggle_runner
|
19b979ae86f1fcaff5d17f55f4d8bc3d3f2a4ced
|
[
"MIT"
] | 1
|
2020-08-06T09:07:49.000Z
|
2020-08-06T09:07:49.000Z
|
kaggle_runner/logs.py
|
pennz/kaggle_runner
|
19b979ae86f1fcaff5d17f55f4d8bc3d3f2a4ced
|
[
"MIT"
] | 1
|
2020-05-13T10:49:42.000Z
|
2020-05-15T22:52:37.000Z
|
kaggle_runner/logs.py
|
pennz/kaggle_runner
|
19b979ae86f1fcaff5d17f55f4d8bc3d3f2a4ced
|
[
"MIT"
] | null | null | null |
import datetime
import time
from collections import defaultdict, deque
import numpy as np
import tensorflow as tf
import fastai
import torch
import torch.distributed as dist
from fastai.callbacks import csv_logger
from kaggle_runner.utils.kernel_utils import is_dist_avail_and_initialized
def metric_get_log(phase, epoch, epoch_loss, meter, start):
"""logging the metrics at the end of an epoch
Args:
phase:
epoch:
epoch_loss:
meter:
start:
Returns:
"""
dices, iou = meter.get_metrics()
dice, dice_neg, dice_pos = dices
print(
"Loss: %0.4f | dice: %0.4f | dice_neg: %0.4f | dice_pos: %0.4f | IoU: %0.4f"
" | epoch: %d | phase: %s"
% (epoch_loss, dice, dice_neg, dice_pos, iou, epoch, phase)
)
return dice, iou
class NBatchProgBarLogger(tf.keras.callbacks.ProgbarLogger):
def __init__(
self,
count_mode="samples",
stateful_metrics=None,
display_per_batches=1,
verbose=1,
early_stop=False,
patience_displays=0,
epsilon=1e-7,
):
super(NBatchProgBarLogger, self).__init__(count_mode, stateful_metrics)
self.display_per_batches = 1 if display_per_batches < 1 else display_per_batches
self.step_idx = 0 # across epochs
self.display_idx = 0 # across epochs
self.seen = 0
self.verbose = verbose
# better way is subclass EarlyStopping callback.
self.early_stop = early_stop
self.patience_displays = patience_displays
self.losses = np.empty(patience_displays, dtype=np.float32)
self.losses_sum_display = 0
self.epsilon = epsilon
self.stopped_step = 0
self.batch_size = 0
self.epochs = 0
def on_train_begin(self, logs=None):
self.epochs = self.params["epochs"]
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get("size", 0)
# In case of distribution strategy we can potentially run multiple
# steps at the same time, we should account for that in the `seen`
# calculation.
num_steps = logs.get("num_steps", 1)
if self.use_steps:
self.batch_size = num_steps
else:
self.batch_size = batch_size * num_steps
before_seen = self.seen
self.seen += self.batch_size
after_seen = self.seen
for k in self.params["metrics"]:
if k in logs:
self.log_values.append((k, logs[k]))
self.step_idx += 1
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.early_stop:
# only record for this batch, not the display. Should work
loss = logs.get("loss")
self.losses_sum_display += loss
if self.step_idx % self.display_per_batches == 0:
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
if self.early_stop:
avg_loss_per_display = (
self.losses_sum_display / self.display_per_batches
)
self.losses_sum_display = 0 # clear mannually
self.losses[
self.display_idx % self.patience_displays
] = avg_loss_per_display
# but it still SGD, variance still, it just smaller by factor of
# display_per_batches
display_info_start_step = self.step_idx - self.display_per_batches + 1
print(
f"\nmean(display): {avg_loss_per_display}, Step {display_info_start_step }({before_seen}) to {self.step_idx}({after_seen}) for {self.display_idx}th display step"
)
self.display_idx += 1 # used in index, so +1 later
if self.display_idx >= self.patience_displays:
std = np.std(
self.losses
) # as SGD, always variance, so not a good way, need to learn from early stopping
print(
f"mean(over displays): {np.mean(self.losses)}, std:{std} for Display {self.display_idx-self.patience_displays} to {self.display_idx-1}"
)
if std < self.epsilon:
self.stopped_step = self.step_idx
self.model.stop_training = True
print(
f"Early Stop criterion met: std is {std} at Step"
f" {self.step_idx} for {self.display_idx}th display"
"steps"
)
def on_train_end(self, logs=None):
if self.stopped_step > 0 and self.verbose > 0:
print("Step %05d: early stopping" % (self.stopped_step + 1))
class MetricLogger:
def __init__(self, delimiter="\t", log_file_name="metric.log"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
self.log_file = open(log_file_name, "a", buffering=1)
def __del__(self):
self.log_file.close()
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def clear(self):
for meter in self.meters.values():
if meter is not None:
meter.clear()
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(
type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 1
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
# "max mem: {memory:.0f}",
]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
# print the first to let you know it is running....
if i % (print_freq) == 0 or i == len(iterable):
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
self.print_and_log_to_file(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
# memory=torch.cuda.max_memory_allocated() / MB, # FIXME add support for CPU
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
self.print_and_log_to_file(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
def print_and_log_to_file(self, s):
print(s)
self.log_file.write(s + "\n")
class CSVLoggerBufferCustomized(csv_logger.CSVLogger):
"A `LearnerCallback` that saves history of metrics while training `learn` into CSV `filename`."
def __init__(
self,
learn: fastai.basic_train.Learner,
filename: str = "history",
append: bool = False,
buffer_type: int = 1,
):
super(CSVLoggerBufferCustomized, self).__init__(
learn, filename, append)
self.buffer_type = buffer_type # flush the file to get quick result
def on_train_begin(self, **kwargs) -> None:
"Prepare file with metric names."
self.path.parent.mkdir(parents=True, exist_ok=True)
self.file = (
self.path.open("a", buffering=self.buffer_type)
if self.append
else self.path.open("w", buffering=self.buffer_type)
)
self.file.write(
",".join(self.learn.recorder.names[: (
None if self.add_time else -1)])
+ "\n"
)
class SmoothedValue:
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
Args:
Returns:
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def clear(self):
self.total = 0.0
self.count = 0
self.deque.clear()
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""Warning: does not synchronize the deque!"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total],
dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
if len(self.deque) == 0:
return "_NULL_"
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
| 31.483051
| 181
| 0.549035
|
c9ab529dd78fc04a47a7b02c3b7691c43fc7a4a4
| 4,339
|
py
|
Python
|
designateclient/functionaltests/v2/test_recordsets.py
|
mail2nsrajesh/python-designateclient
|
3bb401758c00a9d66383484c60933421d9a21d63
|
[
"Apache-2.0"
] | 34
|
2015-01-16T21:45:17.000Z
|
2021-11-15T09:38:53.000Z
|
designateclient/functionaltests/v2/test_recordsets.py
|
mail2nsrajesh/python-designateclient
|
3bb401758c00a9d66383484c60933421d9a21d63
|
[
"Apache-2.0"
] | null | null | null |
designateclient/functionaltests/v2/test_recordsets.py
|
mail2nsrajesh/python-designateclient
|
3bb401758c00a9d66383484c60933421d9a21d63
|
[
"Apache-2.0"
] | 15
|
2015-01-16T21:45:27.000Z
|
2020-05-26T05:07:29.000Z
|
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tempest.lib.exceptions import CommandFailed
from designateclient.functionaltests.base import BaseDesignateTest
from designateclient.functionaltests.datagen import random_a_recordset_name
from designateclient.functionaltests.datagen import random_zone_name
from designateclient.functionaltests.v2.fixtures import RecordsetFixture
from designateclient.functionaltests.v2.fixtures import ZoneFixture
class TestRecordset(BaseDesignateTest):
def setUp(self):
super(TestRecordset, self).setUp()
self.ensure_tld_exists('com')
self.zone = self.useFixture(ZoneFixture(
name=random_zone_name(),
email='test@example.com',
)).zone
name = random_a_recordset_name(self.zone.name)
self.recordset = self.useFixture(RecordsetFixture(
zone_id=self.zone.id,
name=name,
records='1.2.3.4',
description='An a recordset',
type='A',
ttl=1234,
)).recordset
self.assertEqual(self.recordset.name, name)
self.assertEqual(self.recordset.records, '1.2.3.4')
self.assertEqual(self.recordset.description, 'An a recordset')
self.assertEqual(self.recordset.type, 'A')
self.assertEqual(self.recordset.ttl, '1234')
def test_recordset_list(self):
rsets = self.clients.recordset_list(self.zone.id)
self.assertGreater(len(rsets), 0)
def test_recordset_create_and_show(self):
rset = self.clients.recordset_show(self.zone.id, self.recordset.id)
self.assertTrue(hasattr(self.recordset, 'action'))
self.assertTrue(hasattr(rset, 'action'))
self.assertEqual(self.recordset.created_at, rset.created_at)
self.assertEqual(self.recordset.description, rset.description)
self.assertEqual(self.recordset.id, rset.id)
self.assertEqual(self.recordset.name, rset.name)
self.assertEqual(self.recordset.records, rset.records)
self.assertEqual(self.recordset.status, rset.status)
self.assertEqual(self.recordset.ttl, rset.ttl)
self.assertEqual(self.recordset.type, rset.type)
self.assertEqual(self.recordset.updated_at, rset.updated_at)
self.assertEqual(self.recordset.version, rset.version)
self.assertEqual(self.recordset.zone_id, self.zone.id)
def test_recordset_delete(self):
rset = self.clients.recordset_delete(self.zone.id, self.recordset.id)
self.assertEqual(rset.action, 'DELETE')
self.assertEqual(rset.status, 'PENDING')
def test_recordset_set(self):
rset = self.clients.recordset_set(
self.zone.id,
self.recordset.id,
records='2.3.4.5',
ttl=2345,
description='Updated description',
)
self.assertEqual(rset.records, '2.3.4.5')
self.assertEqual(rset.ttl, '2345')
self.assertEqual(rset.description, 'Updated description')
def test_recordset_set_clear_ttl_and_description(self):
rset = self.clients.recordset_set(
self.zone.id,
self.recordset.id,
no_description=True,
no_ttl=True,
)
self.assertEqual(rset.description, 'None')
self.assertEqual(rset.ttl, 'None')
class TestRecordsetNegative(BaseDesignateTest):
def test_invalid_option_on_recordset_create(self):
cmd = 'recordset create de47d30b-41c5-4e38-b2c5-e0b908e19ec7 ' \
'aaa.desig.com. --type A --records 1.2.3.4 ' \
'--invalid "not valid"'
self.assertRaises(CommandFailed, self.clients.openstack, cmd)
def test_invalid_recordset_command(self):
cmd = 'recordset hopefullynotvalid'
self.assertRaises(CommandFailed, self.clients.openstack, cmd)
| 38.741071
| 77
| 0.690943
|
314e5a51340ba9090366d07155079bff43f64ce4
| 1,322
|
py
|
Python
|
nipype/interfaces/afni/tests/test_auto_Resample.py
|
grlee77/nipype
|
73f3a733ac1b7d9b09ec32a387905a9302423b87
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/afni/tests/test_auto_Resample.py
|
grlee77/nipype
|
73f3a733ac1b7d9b09ec32a387905a9302423b87
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/afni/tests/test_auto_Resample.py
|
grlee77/nipype
|
73f3a733ac1b7d9b09ec32a387905a9302423b87
|
[
"BSD-3-Clause"
] | null | null | null |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.afni.preprocess import Resample
def test_Resample_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-inset %s',
copyfile=False,
mandatory=True,
position=-1,
),
master=dict(argstr='-master %s',
),
orientation=dict(argstr='-orient %s',
),
out_file=dict(argstr='-prefix %s',
name_source='in_file',
name_template='%s_resample',
),
outputtype=dict(),
resample_mode=dict(argstr='-rmode %s',
),
terminal_output=dict(nohash=True,
),
voxel_size=dict(argstr='-dxyz %f %f %f',
),
)
inputs = Resample.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Resample_outputs():
output_map = dict(out_file=dict(),
)
outputs = Resample.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 26.44
| 78
| 0.644478
|
8b8fdc690785f418945adbb1a6a999f7129e826c
| 10,516
|
py
|
Python
|
src/recommender.py
|
ghazaltariri/Movie_Recommender_System
|
8e5be133e4c6073f22c380cb40c121d201b967c4
|
[
"Apache-2.0"
] | null | null | null |
src/recommender.py
|
ghazaltariri/Movie_Recommender_System
|
8e5be133e4c6073f22c380cb40c121d201b967c4
|
[
"Apache-2.0"
] | null | null | null |
src/recommender.py
|
ghazaltariri/Movie_Recommender_System
|
8e5be133e4c6073f22c380cb40c121d201b967c4
|
[
"Apache-2.0"
] | null | null | null |
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from time import time
import pyspark as ps
from pyspark.sql.types import *
from pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder
from pyspark.ml.recommendation import ALS
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.sql import functions as F
class MovieRecommender(object):
"""Template class for a Movie Recommender system."""
def __init__(self, local=False):
"""Constructs a MovieRecommender"""
self.spark = ps.sql.SparkSession.builder \
.master("local[4]") \
.appName("Movie Reccomender") \
.getOrCreate()
self.sc = self.spark.sparkContext
self.logger = logging.getLogger('reco-cs')
self.users = self.sc.textFile('data/users.dat').map(lambda x: (int(x.split('::')[0]), x))
self.movies = self.sc.textFile('data/movies.dat').map(lambda x: (int(x.split('::')[0]), x))
self.local = local
def fit(self, ratings):
"""
Trains the recommender on a given set of ratings.
Parameters
----------
ratings : pandas dataframe, shape = (n_ratings, 4)
with columns 'user', 'movie', 'rating', 'timestamp'
Returns
-------
self : object
Returns self.
"""
self.logger.debug("starting fit")
#Save the training data for later use:
self.training_data = ratings.copy()
# self.training_data = ratings.toPandas()
self.users_train_unique = self.training_data.user.unique()
self.movies_train_unique = self.training_data.movie.unique()
#Begin Transforming the data for fitting
t0 = time()
users = self.users
movies = self.movies
ratings = self.spark.createDataFrame(ratings.copy())
# Maps the ratings df structure to that of the test data'a
ratings = ratings.rdd.map(tuple)
ratings = ratings.map(lambda x: '::'.join(x))
ratings = ratings.map(lambda x: (int(x.split('::')[0]), x))
self.ratings = ratings
# Joins all the tables together for training
joined = ratings.join(users)
temp = joined.map(lambda x: '::'.join(x[1])).map(lambda x: (int(x.split('::')[1]), x))
joined_full = temp.join(movies).map(lambda x: '::'.join(x[1]))
# Removes the :: seperator from the RDD
def split_to_cols(x):
values = x.split('::')
return (int(values[0]), int(values[1]), int(values[2]))
# Not used but kept around because it could be
def get_ratings(x):
values = x.split('::')
return (int(values[2]))
# Turns the RDD into a DataFrame
spark_df = joined_full.map(split_to_cols)
schema = StructType([
StructField("userID", IntegerType(), True),
StructField("movieID", IntegerType(), True),
StructField("rating", IntegerType(), True)])
# Creates the proper train DataFrame for fitting
train = self.spark.createDataFrame(spark_df, schema)
# Instantiate the model (Alternating Least Squares)
als = ALS(
itemCol='movieID',
userCol='userID',
ratingCol='rating',
nonnegative=True,
regParam=0.4,
maxIter=10,
rank=14)
# Creates the reccomender by fitting the training data
self.recommender = als.fit(train)
# Fit the model
print('Model Created. Trainging....')
self.recommender = als.fit(train)
self.fitted = True
self.logger.debug("finishing fit")
print('DONE! ', time()-t0, ' seconds.')
return(self)
def transform(self, requests):
"""
Predicts the ratings for a given set of requests.
Parameters
----------
requests : pandas dataframe, shape = (n_ratings, 2)
with columns 'user', 'movie'
Returns
-------
dataframe : a pandas dataframe with columns 'user', 'movie', 'rating'
column 'rating' containing the predicted rating
"""
# test_df = requests.toPandas()
self.test_df = requests.copy()
#Filter down the request data
self.old_old = test_df[(test_df.user.isin(self.users_train_unique))
& (test_df.movie.isin(self.movies_train_unique))]
newish = test_df[~((test_df.user.isin(self.users_train_unique))
& (test_df.movie.isin(self.movies_train_unique)))]
self.newish = newish
#Split off the new users/movies:
self.requests_new_movies = newish[(newish.user.isin(self.users_train_unique))
& ~(newish.movie.isin(self.movies_train_unique))]
self.requests_new_users = newish[~((newish.user.isin(self.users_train_unique))
& ~(newish.movie.isin(self.movies_train_unique)))]
requests = self.spark.createDataFrame(self.old_old)
self.logger.debug("starting predict")
self.logger.debug("request count: {}".format(requests.count()))
t0 = time()
users = self.users
movies = self.movies
# Gets the requests in the right shape
requests = requests.rdd.map(tuple)
requests = requests.map(lambda x: '::'.join(x))
requests = requests.map(lambda x: (int(x.split('::')[0]), x))
joined = requests.join(users)
temp = joined.map(lambda x: '::'.join(x[1])).map(lambda x: (int(x.split('::')[1]), x))
joined_full = temp.join(movies).map(lambda x: '::'.join(x[1]))
def split_to_cols(x):
values = x.split('::')
return (int(values[0]), int(values[1]), int(values[2]))
def get_ratings(x):
values = x.split('::')
return (int(values[2]))
data_rdd = joined_full.map(split_to_cols)
j_ratings = joined_full.map(get_ratings)
schema = StructType([
StructField("userID", IntegerType(), True),
StructField("movieID", IntegerType(), True),
StructField("rating", IntegerType(), True)])
test = self.spark.createDataFrame(data_rdd, schema)
self.logger.debug("finishing predict for recognized users and movies")
print('Transforming...')
output = self.recommender.transform(test)
output = output.toPandas()
output.drop('rating',axis=1,inplace=True)
output.rename(columns={'userID':'user', 'movieID':'movie'}, inplace = True)
print('DONE! ', time()-t0, ' seconds.')
print("Sending the new users to different model..")
t0 = time()
self.new_user_pred = self.weighted_Recommendation()
output = pd.concat([output,self.new_user_pred],axis=0)
print('DONE! ', time()-t0, ' seconds.')
print("Sending the new movies to different model..")
t0 = time()
if self.local == False:
self.new_movie_pred = self.requests_new_movies.copy()
self.new_movie_pred['prediction'] = 2.5
output = pd.concat([output,self.new_movie_pred],axis=0)
# else:
# for
print('DONE! ', time()-t0, ' seconds.')
return(output)
def weighted_Recommendation(self, is_sparse=False):
pd.options.display.float_format = '{:,.2f}'.format
training = self.training_data.copy()
users_movies = self.requests_new_users
if is_sparse:
grouped_training = pd.DataFrame(np.full(len(training.columns),2.5))
grouped_training['movie'] = np.array(training.columns)
grouped_training['rating']= np.array(training.mean(axis = 0))
grouped_training['vote']= np.array(training.count(axis = 0))
grouped_training = grouped_training[['movie','rating','vote']]
else:
training['rating'] = training['rating'].astype(int)
grouped_training = training.groupby('movie') \
.agg({'user':'size', 'rating':'mean'}) \
.rename(columns={'user':'vote','rating':'rating'}) \
.reset_index()
# Calculate the minimum number of voters required to be in the chart
m = grouped_training['vote'].quantile(0.5)
# Filter out all qualified movies into a new DataFrame
scorings = grouped_training.copy().loc[grouped_training['vote'] >= m]
F = pd.merge(users_movies, scorings, on='movie', how='left')
F['rating'].fillna(2.5, inplace=True)
final = F[['user','movie','rating']]
final.rename(columns={'rating':'prediction'},inplace=True,copy=False)
return(final)
def pred_on_similarity(df, similarity_matrix, userID, movieID, num_similar=10):
'''
GENERATE 1 PREDICTED VALUE OF AN UNSEEN MOVIE FOR AN EXISTING USER BASED ON THAT USER'S RATINGS OF THE MOST
SIMILAR MOVIES TO THE MOVIE IN QUESTION.
df : 'pandas dataframe with columns user(int), movie(int)
similarity_matrix : square matrix pd.DataFrame of similarities
userID : int : id of user in df
movieID : int/str : id of movie in df
num_similary : int : compare movie in question to *num_similar* number of other movies the user has rated.
'''
n = num_similar
movieID = str(movieID)
user = df[df.user == userID][['movie','rating']] #get user movies and ratings by the user in question
m = similarity_matrix[movieID].reset_index() #get similarities for the movie in question
m.columns = ['movie','similarity'] #rename columns for merge
merged = m.merge(user, on='movie',how='inner') #merge movie similarities with ratings
merged['product'] = merged.rating*merged.similarity #calculate rating*similarity
#get top similarity value for normalizing
sorted_sims = merged.similarity.sort_values(ascending=False)
norm = sorted_sims[sorted_sims < 1].iloc[0]
#sort by top similarities, take first n ratings*similarities, take average, normalize
p = np.mean(merged.sort_values(by='similarity', ascending=False)['product'][:n])/norm
return p
if __name__ == "__main__":
logger = logging.getLogger('reco-cs')
logger.critical('you should use run.py instead')
| 38.520147
| 115
| 0.599087
|
89d760b4a170f582add6fb82c8201bea320d0dfc
| 665
|
py
|
Python
|
tools/tensorflow_model_freezer/sample/__init__.py
|
periannath/ONE
|
61e0bdf2bcd0bc146faef42b85d469440e162886
|
[
"Apache-2.0"
] | 255
|
2020-05-22T07:45:29.000Z
|
2022-03-29T23:58:22.000Z
|
tools/tensorflow_model_freezer/sample/__init__.py
|
periannath/ONE
|
61e0bdf2bcd0bc146faef42b85d469440e162886
|
[
"Apache-2.0"
] | 5,102
|
2020-05-22T07:48:33.000Z
|
2022-03-31T23:43:39.000Z
|
tools/tensorflow_model_freezer/sample/__init__.py
|
periannath/ONE
|
61e0bdf2bcd0bc146faef42b85d469440e162886
|
[
"Apache-2.0"
] | 120
|
2020-05-22T07:51:08.000Z
|
2022-02-16T19:08:05.000Z
|
# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# indicating that this folder is python package
| 41.5625
| 74
| 0.765414
|
9cc6fad45911b98c9c1e189693539c8c95fa5f86
| 6,034
|
py
|
Python
|
x_layer/hypoxic_volume.py
|
parkermac/LiveOcean
|
bef3e1e729ada1069853dd4f57f79f452b54f4fa
|
[
"MIT"
] | 4
|
2015-06-09T18:53:11.000Z
|
2021-08-19T01:39:38.000Z
|
x_layer/hypoxic_volume.py
|
parkermac/LiveOcean
|
bef3e1e729ada1069853dd4f57f79f452b54f4fa
|
[
"MIT"
] | null | null | null |
x_layer/hypoxic_volume.py
|
parkermac/LiveOcean
|
bef3e1e729ada1069853dd4f57f79f452b54f4fa
|
[
"MIT"
] | 1
|
2017-03-07T01:28:49.000Z
|
2017-03-07T01:28:49.000Z
|
"""
This calculates hypoxic volume.
"""
# imports
import os, sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import zrfun
import zfun
import numpy as np
import netCDF4 as nc
import argparse
from datetime import datetime, timedelta
start_time = datetime.now()
# Command line arguments
def boolean_string(s):
if s not in ['False', 'True']:
raise ValueError('Not a valid boolean string')
return s == 'True' # note use of ==
parser = argparse.ArgumentParser()
# standard arguments
parser.add_argument('-g', '--gridname', nargs='?', type=str, default='cas6')
parser.add_argument('-t', '--tag', nargs='?', type=str, default='v3')
parser.add_argument('-x', '--ex_name', nargs='?', type=str, default='lo8b')
parser.add_argument('-0', '--date_string0', nargs='?', type=str, default='2019.07.04')
parser.add_argument('-1', '--date_string1', nargs='?', type=str, default='2019.07.05')
parser.add_argument('-lt', '--list_type', nargs='?', type=str, default='daily')
parser.add_argument('-test', '--testing', default=False, type=boolean_string)
args = parser.parse_args()
# save some arguments
Ldir = Lfun.Lstart(args.gridname, args.tag)
Ldir['gtagex'] = Ldir['gtag'] + '_' + args.ex_name
Ldir['list_type'] = args.list_type
Ldir['date_string0'] = args.date_string0
Ldir['date_string1'] = args.date_string1
# make sure the output directory exists
outdir00 = Ldir['LOo']
Lfun.make_dir(outdir00)
outdir0 = outdir00 + 'layer/'
Lfun.make_dir(outdir0)
outdir = (outdir0 + Ldir['gtagex'] + '_' + Ldir['date_string0']
+ '_' + Ldir['date_string1'] + '/')
Lfun.make_dir(outdir, clean=False)
# get list of history files to plot
fn_list = Lfun.get_fn_list(args.list_type, Ldir, args.date_string0, args.date_string1)
# name output file
out_fn = (outdir + 'hypoxic_volume_' + Ldir['list_type'] + '.nc')
# get rid of the old version, if it exists
try:
os.remove(out_fn)
except OSError:
pass
# lists of variables to process
dlist = ['xi_rho', 'eta_rho', 'xi_psi', 'eta_psi', 'ocean_time']
vn_list_2d = [ 'lon_rho', 'lat_rho', 'lon_psi', 'lat_psi', 'mask_rho', 'h']
vn_list_2d_custom = ['DA']
vn_list_3d_t_custom = ['hyp_dz']
# make some things
fn = fn_list[0]
G = zrfun.get_basic_info(fn, only_G=True)
DA = G['DX'] * G['DY']
ny, nx = DA.shape
h = G['h']
S = zrfun.get_basic_info(fn, only_S=True)
zr, zw = zrfun.get_z(h, 0*h, S)
dzr = np.diff(zw, axis=0)
ds1 = nc.Dataset(fn)
ds2 = nc.Dataset(out_fn, 'w')
# Create dimensions
for dname, the_dim in ds1.dimensions.items():
if dname in dlist:
ds2.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None)
# Create variables and their attributes
# - first time
vn = 'ocean_time'
varin = ds1[vn]
vv = ds2.createVariable(vn, varin.dtype, varin.dimensions)
vv.long_name = varin.long_name
vv.units = varin.units
# - then static 2d fields
for vn in vn_list_2d:
varin = ds1[vn]
vv = ds2.createVariable(vn, varin.dtype, varin.dimensions)
vv.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
vv[:] = ds1[vn][:]
# - then static custom fields
for vn in vn_list_2d_custom:
if vn == 'DA':
vv = ds2.createVariable(vn, float, ('eta_rho', 'xi_rho'))
vv.long_name = 'Cell horizontal area'
vv.units = 'm2'
vv[:] = DA
#
# - then time-dependent custom 3d fields (processed into 2d)
for vn in vn_list_3d_t_custom:
if vn == 'hyp_dz':
vv = ds2.createVariable(vn, float, ('ocean_time', 'eta_rho', 'xi_rho'))
vv.long_name = 'Thickess of hypoxic water'
vv.units = 'm'
vv.time='ocean_time'
# Copy time dependent data
omat = np.nan * np.ones(h.shape)
omat = np.ma.masked_where(G['mask_rho']==0, omat)
tt = 0
NF = len(fn_list)
for fn in fn_list:
if np.mod(tt,24)==0:
print(' working on %d of %d' % (tt, NF))
sys.stdout.flush()
ds = nc.Dataset(fn)
ds2['ocean_time'][tt] = ds['ocean_time'][0]
for vn in vn_list_3d_t_custom:
if vn == 'hyp_dz':
oxy = ds['oxygen'][0,:,:,:]
dzrm = np.ma.masked_where(oxy > 60, dzr)
hyp_dz = dzrm.sum(axis=0)
ds2[vn][tt,:,:] = hyp_dz
tt += 1
ds.close()
if args.testing:
sys.path.append(os.path.abspath('../plotting'))
import pfun
import matplotlib.pyplot as plt
plt.close('all')
fs=16
plt.rc('font', size=fs)
fig = plt.figure(figsize=(10,12))
ax = fig.add_subplot(111)
cs = ax.pcolormesh(G['lon_psi'],G['lat_psi'],
hyp_dz[1:-1,1:-1]/h[1:-1,1:-1], vmin=0, vmax=1)
fig.colorbar(cs)
pfun.add_coast(ax)
pfun.dar(ax)
ax.axis([-130, -122, 42, 52])
ax.contour(G['lon_rho'],G['lat_rho'],h, [100, 200, 2000],
colors=['darkorange','plum','darkorchid'], linewidths=2, linestyles='solid')
ax.text(.95,.16,'100 m',color='darkorange',weight='bold',transform=ax.transAxes,ha='right')
ax.text(.95,.13,'200 m',color='plum',weight='bold',transform=ax.transAxes,ha='right')
ax.text(.95,.1,'2000 m',color='darkorchid',weight='bold',transform=ax.transAxes,ha='right')
ax.set_title('Hypoxic Depth Fraction')
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.set_xticks([-130, -128, -126, -124, -122])
plt.show()
plt.rcdefaults()
print(' Saved Variables '.center(50,'='))
for vn in ds2.variables:
print(vn)
ds1.close()
ds2.close()
# finale
import collections
result_dict = collections.OrderedDict()
result_dict['out_fn'] = out_fn
result_dict['date_string0'] = args.date_string0
result_dict['date_string1'] = args.date_string1
time_format = '%Y.%m.%d %H:%M:%S'
result_dict['start_time'] = start_time.strftime(time_format)
end_time = datetime.now()
result_dict['end_time'] = end_time.strftime(time_format)
dt_sec = (end_time - start_time).seconds
result_dict['total_seconds'] = str(dt_sec)
if os.path.isfile(out_fn):
result_dict['result'] = 'success'
else:
result_dict['result'] = 'fail'
print('')
for k in result_dict.keys():
print('%s: %s' % (k, result_dict[k]))
| 30.629442
| 95
| 0.651309
|
e942d9d78a2a7f8e401f2121cd8c8d74f79db875
| 1,876
|
py
|
Python
|
droidlet/tools/crowdsourcing/vision_annotation_task/run_labeling_with_qual.py
|
colesbury/fairo
|
9e50a3aa7369c68c80e84d80abd5fcdee8a9277a
|
[
"MIT"
] | null | null | null |
droidlet/tools/crowdsourcing/vision_annotation_task/run_labeling_with_qual.py
|
colesbury/fairo
|
9e50a3aa7369c68c80e84d80abd5fcdee8a9277a
|
[
"MIT"
] | null | null | null |
droidlet/tools/crowdsourcing/vision_annotation_task/run_labeling_with_qual.py
|
colesbury/fairo
|
9e50a3aa7369c68c80e84d80abd5fcdee8a9277a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from mephisto.operations.operator import Operator
from mephisto.tools.scripts import load_db_and_process_config
from mephisto.abstractions.blueprints.static_html_task.static_html_blueprint import (
BLUEPRINT_TYPE,
)
from mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (
SharedStaticTaskState,
)
from mephisto.data_model.qualification import QUAL_EXISTS, QUAL_NOT_EXIST, make_qualification_dict
from pilot_config import PILOT_ALLOWLIST_QUAL_NAME as ALLOWLIST_QUALIFICATION
import hydra
from omegaconf import DictConfig
from dataclasses import dataclass, field
from typing import List, Any
TASK_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
defaults = [
"_self_",
{"mephisto/blueprint": BLUEPRINT_TYPE},
{"mephisto/architect": "ec2"},
{"mephisto/provider": "mock"},
{"conf": "labeling"},
]
from mephisto.operations.hydra_config import RunScriptConfig, register_script_config
@dataclass
class TestScriptConfig(RunScriptConfig):
defaults: List[Any] = field(default_factory=lambda: defaults)
task_dir: str = TASK_DIRECTORY
register_script_config(name="scriptconfig", module=TestScriptConfig)
@hydra.main(config_name="scriptconfig")
def main(cfg: DictConfig) -> None:
shared_state = SharedStaticTaskState(
qualifications=[
make_qualification_dict(ALLOWLIST_QUALIFICATION, QUAL_EXISTS, None),
],
)
db, cfg = load_db_and_process_config(cfg)
operator = Operator(db)
operator.validate_and_run_config(cfg.mephisto, shared_state)
operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)
if __name__ == "__main__":
main()
| 29.3125
| 98
| 0.776119
|
a54572e9e70524adba07a51ebeda0a4681ba339d
| 232
|
py
|
Python
|
Python-Programing-Basics/Nested Loops/Nested Loops - Lab/05_travelling/travelling.py
|
alexanderivanov2/Softuni-Software-Engineering
|
8adb96f445f1da17dbb6eded9e9594319154c7e7
|
[
"MIT"
] | null | null | null |
Python-Programing-Basics/Nested Loops/Nested Loops - Lab/05_travelling/travelling.py
|
alexanderivanov2/Softuni-Software-Engineering
|
8adb96f445f1da17dbb6eded9e9594319154c7e7
|
[
"MIT"
] | null | null | null |
Python-Programing-Basics/Nested Loops/Nested Loops - Lab/05_travelling/travelling.py
|
alexanderivanov2/Softuni-Software-Engineering
|
8adb96f445f1da17dbb6eded9e9594319154c7e7
|
[
"MIT"
] | null | null | null |
destination = input()
savings = 0
while destination != "End":
price = float(input())
while savings < price:
savings += float(input())
print(f"Going to {destination}!")
savings = 0
destination = input()
| 19.333333
| 37
| 0.603448
|
4bf95f7c3ea89643748e00f48cf85e85ad2d957c
| 23,643
|
py
|
Python
|
keras/distribute/keras_utils_test.py
|
ahmedopolis/keras
|
4c87dc9685eea2ed20111f9604b10d627b17f032
|
[
"Apache-2.0"
] | null | null | null |
keras/distribute/keras_utils_test.py
|
ahmedopolis/keras
|
4c87dc9685eea2ed20111f9604b10d627b17f032
|
[
"Apache-2.0"
] | null | null | null |
keras/distribute/keras_utils_test.py
|
ahmedopolis/keras
|
4c87dc9685eea2ed20111f9604b10d627b17f032
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models with callbacks, checkpointing with dist strategy."""
import tensorflow.compat.v2 as tf
import collections
import tempfile
from absl.testing import parameterized
import numpy as np
import keras
from keras import losses
from keras.distribute import distribute_strategy_test as keras_test_lib
from keras.distribute import distributed_training_utils_v1
from keras.distribute import optimizer_combinations
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
class TestDistributionStrategyWithCallbacks(tf.test.TestCase,
parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations()))
def test_callbacks_in_fit(self, distribution):
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer='sgd',
loss='mse',
metrics=['mae'])
dataset = keras_test_lib.get_dataset(distribution)
counter = Counter()
epochs = 2
steps_per_epoch = 5
validation_steps = 3
model.fit(
dataset,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=0,
validation_data=dataset,
validation_steps=validation_steps,
callbacks=[counter])
if (isinstance(distribution, tf.compat.v1.distribute.experimental.TPUStrategy) and
not tf.executing_eagerly()):
# TPU Strategy can have multi step training, from extended.steps_per_run
# if steps_per_run = 1, then num_batch_call_per_epoch = steps_per_epoch
steps_per_run = distribution.extended.steps_per_run
num_batch_call_per_epoch = steps_per_epoch // steps_per_run
if steps_per_epoch % steps_per_run:
num_batch_call_per_epoch += 1
else:
num_batch_call_per_epoch = steps_per_epoch
self.assertDictEqual(
counter.method_counts, {
'on_batch_begin': epochs * num_batch_call_per_epoch,
'on_batch_end': epochs * num_batch_call_per_epoch,
'on_epoch_begin': epochs,
'on_epoch_end': epochs,
'on_test_batch_begin': epochs * validation_steps,
'on_test_batch_end': epochs * validation_steps,
'on_test_begin': epochs,
'on_test_end': epochs,
'on_train_batch_begin': epochs * num_batch_call_per_epoch,
'on_train_batch_end': epochs * num_batch_call_per_epoch,
'on_train_begin': 1,
'on_train_end': 1
})
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations()))
def test_callbacks_in_eval(self, distribution):
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer='sgd',
loss='mse',
metrics=['mae'])
dataset = keras_test_lib.get_dataset(distribution)
counter = Counter()
model.evaluate(dataset, steps=5, callbacks=[counter])
self.assertDictEqual(
counter.method_counts, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations()))
def test_callbacks_in_predict(self, distribution):
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer='sgd',
loss='mse',
metrics=['mae'])
dataset = keras_test_lib.get_dataset(distribution)
counter = Counter()
model.predict(
keras_test_lib.get_predict_dataset(dataset),
steps=5,
callbacks=[counter])
self.assertDictEqual(
counter.method_counts, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
class TestDistributionStrategyErrorCases(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.
mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph']))
def test_validating_dataset_input_tensors_with_shape_mismatch(
self, distribution):
with self.cached_session():
@tf.function
def run():
ctx = tf.distribute.get_replica_context()
if ctx.replica_id_in_sync_group.device.endswith('GPU:0'):
return tf.constant([[1, 2]])
else:
return tf.constant([[1, 2], [1, 2]])
x = distribution.run(run)
# Removed device and input tensor shape details from the error message
# since the order of the device and the corresponding input tensor shape
# is not deterministic over different runs.
with self.assertRaisesRegex(
ValueError, 'Input tensor shapes do not match for '
'distributed tensor inputs '
'PerReplica:.+'):
with distribution.scope():
distributed_training_utils_v1.validate_distributed_dataset_inputs(
distribution, x, None)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations
.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_validating_dataset_input_tensors_with_dtype_mismatch(
self, distribution):
with self.cached_session():
@tf.function
def run():
ctx = tf.distribute.get_replica_context()
if ctx.replica_id_in_sync_group.device.endswith('GPU:0'):
return tf.constant([[1, 2]], dtype=tf.int32)
else:
return tf.constant([[1, 2]], dtype=tf.float64)
x = distribution.run(run)
# Removed device and input tensor dtype details from the error message
# since the order of the device and the corresponding input tensor dtype
# is not deterministic over different runs.
with self.assertRaisesRegex(
ValueError, 'Input tensor dtypes do not match for '
'distributed tensor inputs '
'PerReplica:.+'):
with distribution.scope():
distributed_training_utils_v1.validate_distributed_dataset_inputs(
distribution, x, None)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_unsupported_features(self, distribution, mode):
with self.cached_session():
with distribution.scope():
model = keras_test_lib.get_model()
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics)
dataset = keras_test_lib.get_dataset(distribution)
# Test with validation split
with self.assertRaises(ValueError):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_split=0.5,
validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaises(ValueError):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument for dataset with infinite
# cardinality.
dataset = dataset.repeat()
with self.assertRaises(ValueError):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaises(ValueError):
model.evaluate(dataset, verbose=0)
with self.assertRaises(ValueError):
model.predict(dataset, verbose=0)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.one_device_strategy,
],
mode=['graph', 'eager']))
def test_distribution_strategy_on_subclassed_model(
self, distribution):
with distribution.scope():
class _SimpleMLP(keras.Model):
def __init__(self, num_labels):
super(_SimpleMLP, self).__init__()
self.dense = keras.layers.Dense(num_labels)
def call(self, inputs):
return self.dense(inputs)
model = _SimpleMLP(3)
if not tf.executing_eagerly():
with self.assertRaisesRegex(
ValueError,
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.'):
model.compile(
'sgd')
else:
model.compile(
'sgd')
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.one_device_strategy,
],
mode=['graph', 'eager']))
def test_distribution_strategy_on_deferred_sequential_model(
self, distribution):
with distribution.scope():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu'))
model.add(keras.layers.Dense(3, activation='softmax'))
if tf.executing_eagerly():
model.compile(
'sgd')
else:
with self.assertRaisesRegex(
ValueError,
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without '
'`input_shape`/`input_dim` set in its first layer or '
'a subclassed model.'):
model.compile(
'sgd')
@tf.__internal__.distribute.combinations.generate(
keras_test_lib.all_strategy_combinations_minus_default())
def test_standalone_loss_without_loss_reduction(self, distribution):
with distribution.scope():
loss_object = losses.MeanSquaredError()
with self.assertRaisesRegex(
ValueError, 'Please use `tf.keras.losses.Reduction.SUM` or '
'`tf.keras.losses.Reduction.NONE`'):
y = np.asarray([1, 0])
loss_object(y, y)
class TestDistributionStrategyWithLossMasking(tf.test.TestCase,
parameterized.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager'],
optimizer=optimizer_combinations
.gradient_descent_optimizer_keras_v2_fn
))
def test_masking(self, distribution, optimizer):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
with distribution.scope():
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
model.compile(
loss='mse',
optimizer=optimizer())
y = np.array([[[1], [1]], [[1], [1]]])
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)
self.assertEqual(hist.history['loss'][0], 0)
class TestDistributionStrategyWithNormalizationLayer(tf.test.TestCase,
parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations(),
tf.__internal__.test.combinations.combine(
fused=[True, False],
optimizer=optimizer_combinations
.gradient_descent_optimizer_keras_v2_fn)))
def test_batchnorm_correctness(self, distribution, fused, optimizer):
with self.cached_session():
with distribution.scope():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
input_shape=(
10,
20,
30,
), momentum=0.8, fused=fused)
model.add(norm)
model.compile(
loss='mse',
optimizer=optimizer())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 20, 30))
x = x.astype('float32')
dataset = tf.data.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = keras_test_lib.batch_wrapper(dataset, 32, distribution)
predict_dataset = tf.data.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = keras_test_lib.batch_wrapper(predict_dataset, 32,
distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
# TODO(b/146181571): Enable this for all distribution strategies once
# DistributedVariable.assign() returns a variable for MirroredStrategy.
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.tpu_strategy_combinations(),
tf.__internal__.test.combinations.combine(
optimizer=optimizer_combinations
.gradient_descent_optimizer_keras_v2_fn)))
def test_batchnorm_correctness_with_renorm(self, distribution, optimizer):
with self.cached_session():
with distribution.scope():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
input_shape=(
10,
20,
30,
), momentum=0.8, fused=False, renorm=True)
model.add(norm)
model.compile(
loss='mse',
optimizer=optimizer())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 20, 30))
x = x.astype('float32')
dataset = tf.data.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = keras_test_lib.batch_wrapper(dataset, 32, distribution)
predict_dataset = tf.data.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = keras_test_lib.batch_wrapper(predict_dataset, 32,
distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class TestDistributionStrategySaveLoadWeights(tf.test.TestCase,
parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations_minus_default(),
tf.__internal__.test.combinations.combine(
optimizer=optimizer_combinations.rmsprop_optimizer_keras_v2_fn)))
def test_save_load_h5(self, distribution, optimizer):
with self.cached_session():
dataset = keras_test_lib.get_dataset(distribution)
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer(),
'mse')
model.fit(dataset, epochs=1, steps_per_epoch=1)
weights_file = tempfile.mktemp('.h5')
model.save_weights(weights_file)
model_2 = keras_test_lib.get_model()
model_2.compile(
optimizer(),
'mse')
model_2.load_weights(weights_file)
model_2.predict(
keras_test_lib.get_predict_dataset(distribution), steps=2)
model_2.fit(dataset, epochs=1, steps_per_epoch=1)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations_minus_default(),
tf.__internal__.test.combinations.combine(
optimizer=optimizer_combinations.rmsprop_optimizer_keras_v2_fn)))
def test_save_load_trackable(self, distribution, optimizer):
# TODO(b/123533246): Enable the test for TPU once bug is fixed
if (isinstance(distribution,
(tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy)) and
distribution.extended.steps_per_run > 1):
self.skipTest('MultiStep TPU Strategy deadlocks with optimizer restore.')
with self.cached_session():
dataset = keras_test_lib.get_dataset(distribution)
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer(),
'mse')
model.fit(dataset, epochs=1, steps_per_epoch=1)
weights_file = tempfile.mktemp()
model.save_weights(weights_file)
model_2 = keras_test_lib.get_model()
model_2.compile(
optimizer(),
'mse')
model_2.load_weights(weights_file)
model_2.predict(
keras_test_lib.get_predict_dataset(distribution), steps=2)
model_2.fit(dataset, epochs=1, steps_per_epoch=1)
class TestDistributionStrategyValidation(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_test_lib.all_strategy_combinations_minus_default()))
def test_layer_outside_scope(self, distribution):
with self.cached_session():
with self.assertRaisesRegex(
ValueError, 'was not created in the distribution strategy'):
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
with distribution.scope():
model = keras.Model(x, y)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics)
@tf.__internal__.distribute.combinations.generate(
keras_test_lib.all_strategy_combinations_minus_default())
def test_model_outside_scope(self, distribution):
with self.cached_session():
with self.assertRaisesRegex(
ValueError, 'was not created in the distribution strategy'):
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
with distribution.scope():
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
class TestDistributionStrategyWithStaticShapes(tf.test.TestCase,
parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_input_batch_size_not_divisible_by_num_replicas(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
ValueError, r'The `batch_size` argument \(5\) must be divisible by '
r'the number of replicas \(2\)'):
keras.layers.Input(shape=(3,), batch_size=5, name='input')
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_static_input_batch_size(self, distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10, drop_remainder=True)
with distribution.scope():
x = keras.layers.Input(shape=(3,), batch_size=10, name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
model.compile(optimizer='sgd', loss='mse', metrics=['mae'])
model.fit(dataset, epochs=1, steps_per_epoch=5)
model.evaluate(dataset, steps=5)
model.predict(dataset)
if __name__ == '__main__':
tf.__internal__.distribute.multi_process_runner.test_main()
| 37.889423
| 114
| 0.652624
|
5ef906c79830f8e6ec961326015c50567253b119
| 15,672
|
py
|
Python
|
lambda/lambda_function.py
|
aktarinaakhi/python-alexa-audio-streaming-example-skill
|
ccc467be1b4586b3531853eaf0597628d4777ae6
|
[
"MIT"
] | null | null | null |
lambda/lambda_function.py
|
aktarinaakhi/python-alexa-audio-streaming-example-skill
|
ccc467be1b4586b3531853eaf0597628d4777ae6
|
[
"MIT"
] | null | null | null |
lambda/lambda_function.py
|
aktarinaakhi/python-alexa-audio-streaming-example-skill
|
ccc467be1b4586b3531853eaf0597628d4777ae6
|
[
"MIT"
] | null | null | null |
import logging
import json
import random
from ask_sdk_core.utils import is_request_type, is_intent_name
from ask_sdk_core.dispatch_components import (AbstractRequestHandler, AbstractExceptionHandler, AbstractRequestInterceptor, AbstractResponseInterceptor)
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_model.interfaces.audioplayer import (
PlayDirective, PlayBehavior, AudioItem, Stream, AudioItemMetadata,
StopDirective, ClearQueueDirective, ClearBehavior)
# Initializing the logger and setting the level to "INFO"
# Read more about it here https://www.loggly.com/ultimate-guide/python-logging-basics/
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
"""
Instructions to modify the code to play the stream of your choice:
1. Replace the current url with your stream url. Make sure that it has has a valid SSL certificate and starts with 'https' and not 'http'
2. Replace the title under metadata with the name of your stream or radio. Alexa speaks out this name before the stream begings.
3. Replace the subtitle under metadata with your streams tagline. It is displayed on screen enabled devices while the skill is playing.
4. Replace the url under metadata>art>sources with an album art image of your choice. It should be in png or jpg format of the size 512x512 pixels.
5. Replace the url under metadata>backgroundImage>sources with a background image of your choice. It should be in png or jpg format of the size 1200x800 pixels.
"""
# Audio stream metadata
STREAMS = [
{
"token": '1',
"url": 'https://www.radiokrishna.com/RKC-Terni-HQ.m3u',
"metadata": {
"title": 'latest money with mak and g podcast',
"subtitle": 'A subtitle for money with mak and g podcast',
"art": {
"sources": [
{
"contentDescription": 'example image',
"url": 'https://s3.amazonaws.com/cdn.dabblelab.com/img/audiostream-starter-512x512.png',
"widthPixels": 512,
"heightPixels": 512
}
]
},
"backgroundImage": {
"sources": [
{
"contentDescription": 'example image',
"url": 'https://s3.amazonaws.com/cdn.dabblelab.com/img/wayfarer-on-beach-1200x800.png',
"widthPixels": 1200,
"heightPixels": 800
}
]
}
}
}
]
# Intent Handlers
# This handler checks if the device supports audio playback
class CheckAudioInterfaceHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
if handler_input.request_envelope.context.system.device:
return handler_input.request_envelope.context.system.device.supported_interfaces.audio_player is None
else:
return False
def handle(self, handler_input):
language_prompts = handler_input.attributes_manager.request_attributes["_"]
speech_output = language_prompts["DEVICE_NOT_SUPPORTED"]
return (
handler_input.response_builder
.speak(speech_output)
.set_should_end_session(True)
.response
)
# This handler starts the stream playback whenever a user invokes the skill or resumes playback.
class LaunchRequestHandler(AbstractRequestHandler):
def can_handle(self,handler_input):
return is_request_type("LaunchRequest")(handler_input)
def handle(self,handler_input):
stream = STREAMS[0]
return ( handler_input.response_builder
.speak("Starting {}".format(stream["metadata"]["title"]))
.add_directive(
PlayDirective(
play_behavior=PlayBehavior.REPLACE_ALL,
audio_item=AudioItem(
stream=Stream(
token=stream["token"],
url=stream["url"],
offset_in_milliseconds=0,
expected_previous_token=None),
metadata=stream["metadata"]
)
)
)
.set_should_end_session(True)
.response
)
class ResumeStreamIntentHandler(AbstractRequestHandler):
def can_handle(self,handler_input):
return (is_request_type("PlaybackController.PlayCommandIssued")(handler_input)
or is_intent_name("AMAZON.ResumeIntent")(handler_input)
)
def handle(self,handler_input):
stream = STREAMS[0]
return ( handler_input.response_builder
.add_directive(
PlayDirective(
play_behavior=PlayBehavior.REPLACE_ALL,
audio_item=AudioItem(
stream=Stream(
token=stream["token"],
url=stream["url"],
offset_in_milliseconds=0,
expected_previous_token=None),
metadata=stream["metadata"]
)
)
)
.set_should_end_session(True)
.response
)
# This handler handles all the required audio player intents which are not supported by the skill yet.
class UnhandledFeaturesIntentHandler(AbstractRequestHandler):
def can_handle(self,handler_input):
return (is_intent_name("AMAZON.LoopOnIntent")(handler_input)
or is_intent_name("AMAZON.NextIntent")(handler_input)
or is_intent_name("AMAZON.PreviousIntent")(handler_input)
or is_intent_name("AMAZON.RepeatIntent")(handler_input)
or is_intent_name("AMAZON.ShuffleOnIntent")(handler_input)
or is_intent_name("AMAZON.StartOverIntent")(handler_input)
or is_intent_name("AMAZON.ShuffleOffIntent")(handler_input)
or is_intent_name("AMAZON.LoopOffIntent")(handler_input)
)
def handle(self, handler_input):
language_prompts = handler_input.attributes_manager.request_attributes["_"]
speech_output = random.choice(language_prompts["UNHANDLED"])
return (
handler_input.response_builder
.speak(speech_output)
.set_should_end_session(True)
.response
)
# This handler provides the user with basic info about the skill when a user asks for it.
# Note: This would only work with one shot utterances and not during stream playback.
class AboutIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_intent_name("AboutIntent")(handler_input)
def handle(self, handler_input):
language_prompts = handler_input.attributes_manager.request_attributes["_"]
speech_output = random.choice(language_prompts["ABOUT"])
reprompt = random.choice(language_prompts["ABOUT_REPROMPT"])
return (
handler_input.response_builder
.speak(speech_output)
.ask(reprompt)
.response
)
class HelpIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
language_prompts = handler_input.attributes_manager.request_attributes["_"]
speech_output = random.choice(language_prompts["HELP"])
reprompt = random.choice(language_prompts["HELP_REPROMPT"])
return (
handler_input.response_builder
.speak(speech_output)
.ask(reprompt)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return (
is_intent_name("AMAZON.CancelIntent")(handler_input)
or is_intent_name("AMAZON.StopIntent")(handler_input)
or is_intent_name("AMAZON.PauseIntent")(handler_input)
)
def handle(self, handler_input):
return ( handler_input.response_builder
.add_directive(
ClearQueueDirective(
clear_behavior=ClearBehavior.CLEAR_ALL)
)
.add_directive(StopDirective())
.set_should_end_session(True)
.response
)
class PlaybackStartedIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_request_type("AudioPlayer.PlaybackStarted")(handler_input)
def handle(self, handler_input):
return ( handler_input.response_builder
.add_directive(
ClearQueueDirective(
clear_behavior=ClearBehavior.CLEAR_ENQUEUED)
)
.response
)
class PlaybackStoppedIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return ( is_request_type("PlaybackController.PauseCommandIssued")(handler_input)
or is_request_type("AudioPlayer.PlaybackStopped")(handler_input)
)
def handle(self, handler_input):
return ( handler_input.response_builder
.add_directive(
ClearQueueDirective(
clear_behavior=ClearBehavior.CLEAR_ALL)
)
.add_directive(StopDirective())
.set_should_end_session(True)
.response
)
# This handler tries to play the stream again if the playback failed due to any reason.
class PlaybackFailedIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_request_type("AudioPlayer.PlaybackFailed")(handler_input)
def handle(self,handler_input):
stream = STREAMS[0]
return ( handler_input.response_builder
.add_directive(
PlayDirective(
play_behavior=PlayBehavior.REPLACE_ALL,
audio_item=AudioItem(
stream=Stream(
token=stream["token"],
url=stream["url"],
offset_in_milliseconds=0,
expected_previous_token=None),
metadata=stream["metadata"]
)
)
)
.set_should_end_session(True)
.response
)
# This handler handles utterances that can't be matched to any other intent handler.
class FallbackIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_intent_name("AMAZON.FallbackIntent")(handler_input)
def handle(self, handler_input):
language_prompts = handler_input.attributes_manager.request_attributes["_"]
speech_output = random.choice(language_prompts["FALLBACK"])
reprompt = random.choice(language_prompts["FALLBACK_REPROMPT"])
return (
handler_input.response_builder
.speak(speech_output)
.ask(reprompt)
.response
)
class SessionEndedRequestHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
logger.info("Session ended with reason: {}".format(handler_input.request_envelope.request.reason))
return handler_input.response_builder.response
class ExceptionEncounteredRequestHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_request_type("System.ExceptionEncountered")(handler_input)
def handle(self, handler_input):
logger.info("Session ended with reason: {}".format(handler_input.request_envelope.request.reason))
return handler_input.response_builder.response
# Interceptors
# This interceptor is used for supporting different languages and locales. It detects the users locale,
# loads the corresponding language prompts and sends them as a request attribute object to the handler functions.
class LocalizationInterceptor(AbstractRequestInterceptor):
def process(self, handler_input):
locale = handler_input.request_envelope.request.locale
logger.info("Locale is {}".format(locale))
try:
with open("languages/"+str(locale)+".json") as language_data:
language_prompts = json.load(language_data)
except:
with open("languages/"+ str(locale[:2]) +".json") as language_data:
language_prompts = json.load(language_data)
handler_input.attributes_manager.request_attributes["_"] = language_prompts
# This interceptor logs each request sent from Alexa to our endpoint.
class RequestLogger(AbstractRequestInterceptor):
def process(self, handler_input):
logger.debug("Alexa Request: {}".format(
handler_input.request_envelope.request))
# This interceptor logs each response our endpoint sends back to Alexa.
class ResponseLogger(AbstractResponseInterceptor):
def process(self, handler_input, response):
logger.debug("Alexa Response: {}".format(response))
# This exception handler handles syntax or routing errors. If you receive an error stating
# the request handler is not found, you have not implemented a handler for the intent or
# included it in the skill builder below
class CatchAllExceptionHandler(AbstractExceptionHandler):
def can_handle(self, handler_input, exception):
return True
def handle(self, handler_input, exception):
logger.error(exception, exc_info=True)
language_prompts = handler_input.attributes_manager.request_attributes["_"]
speech_output = language_prompts["ERROR"]
reprompt = language_prompts["ERROR_REPROMPT"]
return (
handler_input.response_builder
.speak(speech_output)
.ask(reprompt)
.response
)
# Skill Builder
# Define a skill builder instance and add all the request handlers,
# exception handlers and interceptors to it.
sb = SkillBuilder()
sb.add_request_handler(CheckAudioInterfaceHandler())
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(ResumeStreamIntentHandler())
sb.add_request_handler(UnhandledFeaturesIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(AboutIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
sb.add_request_handler(PlaybackStartedIntentHandler())
sb.add_request_handler(PlaybackStoppedIntentHandler())
sb.add_request_handler(PlaybackFailedIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_exception_handler(CatchAllExceptionHandler())
sb.add_global_request_interceptor(LocalizationInterceptor())
sb.add_global_request_interceptor(RequestLogger())
sb.add_global_response_interceptor(ResponseLogger())
lambda_handler = sb.lambda_handler()
| 41.242105
| 160
| 0.636549
|
0a314446b6341d143d4b4d7f983537019704b933
| 5,333
|
py
|
Python
|
src/cryptoadvance/specter/server.py
|
romanz/specter-desktop
|
68ef655ed03f1631bd7284085a38c404c59e755a
|
[
"MIT"
] | null | null | null |
src/cryptoadvance/specter/server.py
|
romanz/specter-desktop
|
68ef655ed03f1631bd7284085a38c404c59e755a
|
[
"MIT"
] | null | null | null |
src/cryptoadvance/specter/server.py
|
romanz/specter-desktop
|
68ef655ed03f1631bd7284085a38c404c59e755a
|
[
"MIT"
] | null | null | null |
import logging
import os
import sys
import secrets
from pathlib import Path
from dotenv import load_dotenv
from flask import Flask, redirect, url_for
from flask_login import LoginManager, login_user
from flask_wtf.csrf import CSRFProtect
from .helpers import hwi_get_config
from .specter import Specter
from .hwi_server import hwi_server
from .user import User
from .util.version import VersionChecker
from werkzeug.middleware.proxy_fix import ProxyFix
from jinja2 import select_autoescape
logger = logging.getLogger(__name__)
env_path = Path(".") / ".flaskenv"
load_dotenv(env_path)
csrf = CSRFProtect()
def calc_module_name(config):
"""tiny helper to make passing configs more convenient"""
if "." in config:
return config
else:
return "cryptoadvance.specter.config." + config
def create_app(config=None):
# Cmdline has precedence over Env-Var
if config is not None:
config = calc_module_name(
os.environ.get("SPECTER_CONFIG")
if os.environ.get("SPECTER_CONFIG")
else config
)
else:
# Enables injection of a different config via Env-Variable
if os.environ.get("SPECTER_CONFIG"):
config = calc_module_name(os.environ.get("SPECTER_CONFIG"))
else:
# Default
config = "cryptoadvance.specter.config.ProductionConfig"
if getattr(sys, "frozen", False):
# Best understood with the snippet below this section:
# https://pyinstaller.readthedocs.io/en/v3.3.1/runtime-information.html#using-sys-executable-and-sys-argv-0
template_folder = os.path.join(sys._MEIPASS, "templates")
static_folder = os.path.join(sys._MEIPASS, "static")
logger.info("pyinstaller based instance running in {}".format(sys._MEIPASS))
app = Flask(
__name__, template_folder=template_folder, static_folder=static_folder
)
else:
app = Flask(__name__, template_folder="templates", static_folder="static")
app.jinja_env.autoescape = select_autoescape(default_for_string=True, default=True)
logger.info(f"Configuration: {config}")
app.config.from_object(config)
app.wsgi_app = ProxyFix(
app.wsgi_app, x_for=1, x_proto=1, x_host=1, x_port=1, x_prefix=1
)
csrf.init_app(app)
app.csrf = csrf
return app
def init_app(app, hwibridge=False, specter=None):
"""see blogpost 19nd Feb 2020"""
# Login via Flask-Login
app.logger.info("Initializing LoginManager")
app.secret_key = app.config["SECRET_KEY"]
if specter is None:
# the default. If not None, then it got injected for testing
app.logger.info("Initializing Specter")
specter = Specter(
data_folder=app.config["SPECTER_DATA_FOLDER"],
config=app.config["DEFAULT_SPECTER_CONFIG"],
internal_bitcoind_version=app.config["INTERNAL_BITCOIND_VERSION"],
)
# version checker
# checks for new versions once per hour
specter.version = VersionChecker(specter=specter)
specter.version.start()
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.init_app(app) # Enable Login
login_manager.login_view = "auth_endpoint.login" # Enable redirects if unauthorized
app.config["SESSION_COOKIE_SAMESITE"] = "Strict"
@login_manager.user_loader
def user_loader(id):
return specter.user_manager.get_user(id)
def login(id):
login_user(user_loader(id))
app.login = login
# Attach specter instance so child views (e.g. hwi) can access it
app.specter = specter
if specter.config["auth"].get("method") == "none":
app.logger.info("Login disabled")
app.config["LOGIN_DISABLED"] = True
else:
app.logger.info("Login enabled")
app.logger.info("Initializing Controller ...")
app.register_blueprint(hwi_server, url_prefix="/hwi")
csrf.exempt(hwi_server)
if not hwibridge:
with app.app_context():
from cryptoadvance.specter.server_endpoints import controller
if app.config.get("TESTING") and len(app.view_functions) <= 20:
# Need to force a reload as otherwise the import is skipped
# in pytest, the app is created anew for ech test
# But we shouldn't do that if not necessary as this would result in
# --> View function mapping is overwriting an existing endpoint function
# see archblog for more about this nasty workaround
import importlib
importlib.reload(controller)
else:
@app.route("/", methods=["GET"])
def index():
return redirect(url_for("hwi_server.hwi_bridge_settings"))
@app.context_processor
def inject_tor():
if app.config["DEBUG"]:
return dict(tor_service_id="", tor_enabled=False)
return dict(tor_service_id=app.tor_service_id, tor_enabled=app.tor_enabled)
return app
def create_and_init():
"""This method can be used to fill the FLASK_APP-env variable like
export FLASK_APP="src/cryptoadvance/specter/server:create_and_init()"
See Development.md to use this for debugging
"""
app = create_app()
app.app_context().push()
init_app(app)
return app
| 33.968153
| 115
| 0.677667
|
0e44cd6ee0a7812f64acff9c529b350ce44c4bf3
| 3,406
|
py
|
Python
|
tests/adapters/test_tf_tensor_input.py
|
henrywu2019/BentoML
|
d3665f052374a1a419b2a3912b1986334fdae2ac
|
[
"Apache-2.0"
] | 3,451
|
2019-04-02T01:47:42.000Z
|
2022-03-31T16:20:49.000Z
|
tests/adapters/test_tf_tensor_input.py
|
henrywu2019/BentoML
|
d3665f052374a1a419b2a3912b1986334fdae2ac
|
[
"Apache-2.0"
] | 1,925
|
2019-04-03T00:19:05.000Z
|
2022-03-31T22:41:54.000Z
|
tests/adapters/test_tf_tensor_input.py
|
henrywu2019/BentoML
|
d3665f052374a1a419b2a3912b1986334fdae2ac
|
[
"Apache-2.0"
] | 451
|
2019-04-02T01:53:41.000Z
|
2022-03-29T08:49:06.000Z
|
# pylint: disable=redefined-outer-name
import sys
import json
import base64
import math
import numbers
import pytest
import numpy as np
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
from bentoml.types import HTTPRequest, BATCH_HEADER
def mock_tensorflow_module():
class MockTensor:
def __init__(self, _input):
self.input = _input
def numpy(self):
if isinstance(self.input, (list, tuple)):
return np.array(self.input, dtype=object)
return self.input
def __eq__(self, dst):
return self.input == dst.input
class MockConstant(MockTensor):
pass
sys.modules['tensorflow'] = MagicMock()
import tensorflow as tf
tf.__version__ = "2.0"
tf.Tensor = tf.compat.v2.Tensor = MockTensor
tf.constant = tf.compat.v2.constant = MockConstant
mock_tensorflow_module()
STR_BYTES = b"hello world"
STR = STR_BYTES.decode("utf-8")
STR_B64 = base64.b64encode(STR_BYTES).decode()
BIN_BYTES = b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR"
BIN_B64 = base64.b64encode(BIN_BYTES).decode()
TEST_INPUTS = [
{'instances': [[[1, 2]], [[3, 4]]]},
{"instances": [[1.0, -float('inf'), float('inf')]]},
{"instances": float('nan')},
{"instances": {"b64": STR_B64}},
{"instances": [{"b64": STR_B64}]},
{"instances": {"b64": BIN_B64}},
{"instances": [{"b64": BIN_B64}]},
]
TEST_HEADERS = [
((BATCH_HEADER, 'true'),),
((BATCH_HEADER, 'true'),),
((BATCH_HEADER, 'false'),),
((BATCH_HEADER, 'false'),),
((BATCH_HEADER, 'true'),),
((BATCH_HEADER, 'false'),),
((BATCH_HEADER, 'true'),),
]
EXPECTED_RESULTS = [
[[[1, 2]], [[3, 4]]],
[[1.0, -float('inf'), float('inf')]],
float('nan'),
STR,
[STR],
{"b64": BIN_B64},
[{"b64": BIN_B64}],
]
@pytest.fixture(params=zip(TEST_INPUTS, TEST_HEADERS, EXPECTED_RESULTS))
def test_cases(request):
return request.param
def assert_eq_or_both_nan(x, y):
if isinstance(x, numbers.Number) and isinstance(y, numbers.Number):
assert math.isnan(x) and math.isnan(y) or math.isclose(x, y)
else:
assert x == y
def test_tf_tensor_handle_request(make_api, test_cases):
'''
ref: https://www.tensorflow.org/tfx/serving/api_rest#request_format_2
'''
from bentoml.adapters import TfTensorInput
api = make_api(input_adapter=TfTensorInput(), user_func=lambda i: i)
input_data, headers, except_result = test_cases
body = json.dumps(input_data).encode('utf-8')
request = HTTPRequest(headers=headers, body=body)
response = tuple(api.handle_batch_request([request]))[0]
prediction = json.loads(response.body)
assert_eq_or_both_nan(except_result, prediction)
def test_tf_tensor_handle_batch_request(make_api, test_cases):
'''
ref: https://www.tensorflow.org/tfx/serving/api_rest#request_format_2
'''
from bentoml.adapters import TfTensorInput
api = make_api(input_adapter=TfTensorInput(), user_func=lambda i: i)
input_data, headers, except_result = test_cases
body = json.dumps(input_data).encode('utf-8')
request = HTTPRequest(headers=headers, body=body)
responses = api.handle_batch_request([request] * 3)
for response in responses:
prediction = json.loads(response.body)
assert_eq_or_both_nan(except_result, prediction)
| 25.609023
| 73
| 0.658837
|
02aac2a81ae488e5836d70740bbeb641eccdea02
| 2,459
|
py
|
Python
|
sahara/tests/unit/service/api/v2/test_images.py
|
openstack/sahara
|
c4f4d29847d5bcca83d49ef7e9a3378458462a79
|
[
"Apache-2.0"
] | 161
|
2015-01-05T11:46:42.000Z
|
2022-01-05T07:41:39.000Z
|
sahara/tests/unit/service/api/v2/test_images.py
|
openstack/sahara
|
c4f4d29847d5bcca83d49ef7e9a3378458462a79
|
[
"Apache-2.0"
] | 1
|
2021-01-28T06:06:41.000Z
|
2021-01-28T06:06:43.000Z
|
sahara/tests/unit/service/api/v2/test_images.py
|
openstack/sahara
|
c4f4d29847d5bcca83d49ef7e9a3378458462a79
|
[
"Apache-2.0"
] | 118
|
2015-01-29T06:34:35.000Z
|
2021-12-06T07:30:09.000Z
|
# Copyright (c) 2017 EasyStack Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from sahara.service.api.v2 import images
from sahara.tests.unit import base
class TestImageApi(base.SaharaTestCase):
def SetUp(self):
super(TestImageApi, self).SetUp()
@mock.patch('sahara.utils.openstack.images.SaharaImageManager')
def test_get_image_tags(self, mock_manager):
image = mock.Mock()
manager = mock.Mock()
manager.get.return_value = mock.Mock(tags=['foo', 'bar', 'baz'])
mock_manager.return_value = manager
self.assertEqual(['foo', 'bar', 'baz'], images.get_image_tags(image))
@mock.patch('sahara.utils.openstack.images.SaharaImageManager')
def test_set_image_tags(self, mock_manager):
def _tag(image, to_add):
return tags.append('qux')
def _untag(image, to_remove):
return tags.remove('bar')
expected_tags = ['foo', 'baz', 'qux']
tags = ['foo', 'bar', 'baz']
image = mock.Mock()
manager = mock.Mock()
manager.get.return_value = mock.Mock(tags=tags)
manager.tag.side_effect = _tag
manager.untag.side_effect = _untag
mock_manager.return_value = manager
self.assertEqual(expected_tags,
images.set_image_tags(image, expected_tags).tags)
@mock.patch('sahara.utils.openstack.images.SaharaImageManager')
def test_remove_image_tags(self, mock_manager):
def _untag(image, to_remove):
for i in range(len(to_remove)):
actual_tags.pop()
return actual_tags
actual_tags = ['foo', 'bar', 'baz']
image = mock.Mock()
manager = mock.Mock()
manager.get.return_value = mock.Mock(tags=actual_tags)
manager.untag.side_effect = _untag
mock_manager.return_value = manager
self.assertEqual([], images.remove_image_tags(image).tags)
| 35.637681
| 77
| 0.666531
|
60914a027b2002f3d2bb23f454edcd8e9d8b1f7d
| 1,499
|
py
|
Python
|
PyInstaller/isolated/__init__.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/isolated/__init__.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/isolated/__init__.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
# -----------------------------------------------------------------------------
# Copyright (c) 2021-2022, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
# -----------------------------------------------------------------------------
"""
PyInstaller hooks typically will need to import the package which they are written for but doing so may manipulate
globals such as :data:`sys.path` or :data:`os.environ` in ways that affect the build. For example, on Windows,
Qt's binaries are added to then loaded via ``PATH`` in such a way that if you import multiple Qt variants in one
session then there is no guarantee which variant's binaries each variant will get!
To get around this, PyInstaller does any such tasks in an isolated Python subprocess and ships a
:mod:`PyInstaller.isolated` submodule to do so in hooks. ::
from PyInstaller import isolated
This submodule provides:
* :func:`isolated.call() <call>` to evaluate functions in isolation.
* :func:`@isolated.decorate <decorate>` to mark a function as always called in isolation.
* :class:`isolated.Python() <Python>` to efficiently call many functions in a single child instance of Python.
"""
# flake8: noqa
from ._parent import Python, call, decorate
| 46.84375
| 114
| 0.682455
|
be7d415573882e988c15dddbdccf497e26c425da
| 897
|
py
|
Python
|
misc.py
|
RSkinderowicz/PTACO
|
7cc38eb4a74480c71d561c63086902514fef849b
|
[
"MIT"
] | null | null | null |
misc.py
|
RSkinderowicz/PTACO
|
7cc38eb4a74480c71d561c63086902514fef849b
|
[
"MIT"
] | null | null | null |
misc.py
|
RSkinderowicz/PTACO
|
7cc38eb4a74480c71d561c63086902514fef849b
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
Miscellaneous utility functions.
"""
from array import array
def array_double(values):
"""
Returns a compact array-like representation of double values.
"""
return array('d', values)
def array_int(values):
"""
Returns a compact array-like representation of integer values.
"""
return array('l', values)
def array_bool(values):
"""
Returns a compact array-like representation of boolean values in a
given iterable.
"""
return array('B', values)
def mean(sequence):
return sum(sequence) / max(len(sequence), 1.0)
def median(lst):
"""
Returns median of the lst.
"""
sorted_list = sorted(lst)
list_len = len(sorted_list)
index = (list_len - 1) // 2
if list_len % 2:
return sorted_list[index]
else:
return (sorted_list[index] + sorted_list[index + 1])/2.0
| 19.085106
| 70
| 0.627648
|
064a5c8966f6e0ac38b4b20d12329d83b202864e
| 2,086
|
py
|
Python
|
skil/workspaces.py
|
bpark738/skil-python
|
6721b17788a13bf17bc2a50ead63f591f0edb47e
|
[
"Apache-2.0"
] | null | null | null |
skil/workspaces.py
|
bpark738/skil-python
|
6721b17788a13bf17bc2a50ead63f591f0edb47e
|
[
"Apache-2.0"
] | null | null | null |
skil/workspaces.py
|
bpark738/skil-python
|
6721b17788a13bf17bc2a50ead63f591f0edb47e
|
[
"Apache-2.0"
] | null | null | null |
import skil_client
from skil_client.rest import ApiException as api_exception
class WorkSpace:
"""WorkSpace
Workspaces are a collection of features that enable different tasks such as
conducting experiments, training models, and test different dataset transforms.
Workspaces are distinct from Deployments by operating as a space for
non-production work.
# Arguments
skil: Skil server instance
name: string. Name for the workspace.
labels: string. Labels associated with the workspace, useful for searching (comma seperated).
verbose: boolean. If True, api response will be printed.
create: boolean. Internal, do not use.
"""
def __init__(self, skil=None, name=None, labels=None, verbose=False, create=True):
if not create:
return
self.skil = skil
self.printer = self.skil.printer
self.name = name if name else 'skil_workspace'
self.workspace = self.skil.api.add_model_history(
self.skil.server_id,
skil_client.AddModelHistoryRequest(name, labels)
)
self.id = self.workspace.model_history_id
if verbose:
self.printer.pprint(self.workspace)
def delete(self):
"""Deletes the work space.
"""
try:
api_response = self.skil.api.delete_model_history(
self.skil.server_id, self.id)
self.skil.printer.pprint(api_response)
except api_exception as e:
self.skil.printer.pprint(
">>> Exception when calling delete_model_history: %s\n" % e)
def get_workspace_by_id(skil, workspace_id):
"""Get workspace by ID
# Arguments:
skil: `Skil` server instance
workspace_id: string, workspace ID
"""
server_id = skil.server_id
response = skil.api.get_model_history(server_id, workspace_id)
ws = WorkSpace(create=False)
ws.skil = skil
ws.printer = skil.printer
ws.workspace = response
ws.id = workspace_id
ws.name = response.model_name
return ws
| 31.606061
| 101
| 0.654362
|
bb4a3eb738556a95ebf8ea29dc3c8328466d59a4
| 3,325
|
py
|
Python
|
awa_extractor.py
|
kartik727/vocab_practice
|
e5deba65aac81c801c3e87cafc8aa22311b3e482
|
[
"MIT"
] | null | null | null |
awa_extractor.py
|
kartik727/vocab_practice
|
e5deba65aac81c801c3e87cafc8aa22311b3e482
|
[
"MIT"
] | 1
|
2021-09-11T23:04:28.000Z
|
2021-09-12T23:37:26.000Z
|
awa_extractor.py
|
kartik727/vocab_practice
|
e5deba65aac81c801c3e87cafc8aa22311b3e482
|
[
"MIT"
] | null | null | null |
import argparse
import enum
import random
from termcolor import colored
class TopicStatus(enum.Enum):
statement = 1
instruction = 2
class LineType(enum.Enum):
para = 1
div = 2
_div = 3
other = 4
class Topic:
def __init__(self):
self.statement = []
self.instruction = None
self.status = TopicStatus.statement
def update(self, line: str) -> bool:
line_type = Topic.get_line_type(line)
if self.status == TopicStatus.statement:
if line_type == LineType.para:
self.statement.append(line[3:-4])
elif self.statement: # statement not empty
if line_type == LineType.div:
self.status = TopicStatus.instruction
return False
elif self.status == TopicStatus.instruction:
if line_type == LineType.para:
self.instruction = line[3:-4]
return True
def __str__(self):
bld = ''
for s in self.statement:
bld += s + '\n'
bld = colored(bld, 'yellow')
bld += colored('\n' + self.instruction + '\n', 'blue', on_color='on_yellow')
return bld
@staticmethod
def get_line_type(line: str) -> LineType:
if line[:3] == '<p>':
return LineType.para
elif line[:4] == '<div':
return LineType.div
elif line[-6:] == '</div>':
return LineType._div
else:
return LineType.other
class TopicBuffer:
def __init__(self):
self.empty = True
self.topic = None
self.topic_db = []
def build(self, line):
if self.empty:
self.topic = Topic()
self.empty = False
topic_complete = self.topic.update(line)
if topic_complete:
self.topic_db.append(self.topic)
self.topic = None
self.empty = True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--issue-filename', default='issue_pool.txt', help='Name of the file to be read for the issue pool', metavar='')
parser.add_argument('--argument-filename', default='argument_pool.txt', help='Name of the file to be read for the argument pool', metavar='')
parser.add_argument('-i', '--issue', action='store_true', help='Select if you want an `issue` task')
parser.add_argument('-a', '--argument', action='store_true', help='Select if you want an `argument` task')
args = parser.parse_args()
assert not (args.issue and args.argument), 'You can only request one of `issue` or `argument` task'
if args.argument:
issue = False
elif args.issue:
issue = True
else:
print('No option between `issue` and `argument` chosen. Selecting `issue` by default.')
issue = True
if issue:
filename = args.issue_filename
else:
filename = args.argument_filename
data_path = 'data/' + filename
with open(data_path, 'r') as f:
tb = TopicBuffer()
for line in f:
line = line.rstrip()
tb.build(line)
print(f'Total topics built: {len(tb.topic_db)}')
topic = random.choice(tb.topic_db)
print('\n\n' + '-'*80 + '\n')
print(topic)
print('\n' + '-'*80 + '\n')
if __name__ == '__main__':
main()
| 29.954955
| 145
| 0.579549
|
8be4d40233aa0cc3eb83cc55c54584eb46631de4
| 19,138
|
py
|
Python
|
pyftdi/bits.py
|
larsch/pyftdi
|
c77136fe4000f36842bc996ff1d5a5a0e05c1be4
|
[
"BSD-3-Clause"
] | 1
|
2020-09-27T20:08:57.000Z
|
2020-09-27T20:08:57.000Z
|
pyftdi/bits.py
|
larsch/pyftdi
|
c77136fe4000f36842bc996ff1d5a5a0e05c1be4
|
[
"BSD-3-Clause"
] | null | null | null |
pyftdi/bits.py
|
larsch/pyftdi
|
c77136fe4000f36842bc996ff1d5a5a0e05c1be4
|
[
"BSD-3-Clause"
] | 1
|
2020-09-03T16:21:38.000Z
|
2020-09-03T16:21:38.000Z
|
# Copyright (c) 2010-2019 Emmanuel Blot <emmanuel.blot@free.fr>
# Copyright (c) 2008-2016, Neotion
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Neotion nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NEOTION BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Bit field and sequence management."""
from typing import Iterable, List, Optional, Tuple, Union
from .misc import is_iterable, xor
#pylint: disable-msg=invalid-name
#pylint: disable-msg=unneeded-not
#pylint: disable-msg=too-many-branches
#pylint: disable-msg=too-many-arguments
class BitSequenceError(Exception):
"""Bit sequence error"""
class BitSequence:
"""Bit sequence.
Support most of the common bit operations: or, and, shift, comparison,
and conversion from and to integral values.
Bit sequence objects are iterable.
Can be initialized with another bit sequence, a integral value,
a sequence of bytes or an iterable of common boolean values.
:param value: initial value
:param msb: most significant bit first or not
:param length: count of signficant bits in the bit sequence
:param bytes_: initial value specified as a sequence of bytes
:param msby: most significant byte first or not
"""
def __init__(self, value: Union['BitSequence', str, int] = None,
msb: bool = False, length: int = 0,
bytes_: Optional[bytes] = None, msby: bool = True):
"""Instantiate a new bit sequence.
"""
self._seq = bytearray()
seq = self._seq
if value and bytes_:
raise BitSequenceError("Cannot inialize with both a value and "
"bytes")
if bytes_:
provider = list(bytes_).__iter__() if msby else reversed(bytes_)
for byte in provider:
if isinstance(byte, str):
byte = ord(byte)
elif byte > 0xff:
raise BitSequenceError("Invalid byte value")
b = []
for _ in range(8):
b.append(bool(byte & 0x1))
byte >>= 1
if msb:
b.reverse()
seq.extend(b)
else:
value = self._tomutable(value)
if isinstance(value, int):
self._init_from_integer(value, msb, length)
elif isinstance(value, BitSequence):
self._init_from_sibling(value, msb)
elif is_iterable(value):
self._init_from_iterable(value, msb)
elif value is None:
pass
else:
raise BitSequenceError("Cannot initialize from a %s" % type(value))
self._update_length(length, msb)
def sequence(self) -> bytearray:
"""Return the internal representation as a new mutable sequence"""
return bytearray(self._seq)
def reverse(self) -> 'BitSequence':
"""In-place reverse"""
self._seq.reverse()
return self
def invert(self) -> 'BitSequence':
"""In-place invert sequence values"""
self._seq = bytearray([x ^ 1 for x in self._seq])
return self
def append(self, seq) -> 'BitSequence':
"""Concatenate a new BitSequence"""
if not isinstance(seq, BitSequence):
seq = BitSequence(seq)
self._seq.extend(seq.sequence())
return self
def lsr(self, count: int) -> None:
"""Left shift rotate"""
count %= len(self)
self._seq[:] = self._seq[count:] + self._seq[:count]
def rsr(self, count: int) -> None:
"""Right shift rotate"""
count %= len(self)
self._seq[:] = self._seq[-count:] + self._seq[:-count]
def tobit(self) -> bool:
"""Degenerate the sequence into a single bit, if possible"""
if len(self) != 1:
raise BitSequenceError("BitSequence should be a scalar")
return bool(self._seq[0])
def tobyte(self, msb: bool = False) -> int:
"""Convert the sequence into a single byte value, if possible"""
if len(self) > 8:
raise BitSequenceError("Cannot fit into a single byte")
byte = 0
pos = -1 if not msb else 0
# copy the sequence
seq = self._seq[:]
while seq:
byte <<= 1
byte |= seq.pop(pos)
return byte
def tobytes(self, msb: bool = False, msby: bool = False) -> bytearray:
"""Convert the sequence into a sequence of byte values"""
blength = (len(self)+7) & (~0x7)
sequence = list(self._seq)
if not msb:
sequence.reverse()
bytes_ = bytearray()
for pos in range(0, blength, 8):
seq = sequence[pos:pos+8]
byte = 0
while seq:
byte <<= 1
byte |= seq.pop(0)
bytes_.append(byte)
if msby:
bytes_.reverse()
return bytes_
@staticmethod
def _tomutable(value: Union[str, Tuple]) -> List:
"""Convert a immutable sequence into a mutable one"""
if isinstance(value, tuple):
# convert immutable sequence into a list so it can be popped out
value = list(value)
elif isinstance(value, str):
# convert immutable sequence into a list so it can be popped out
if value.startswith('0b'):
value = list(value[2:])
else:
value = list(value)
return value
def _init_from_integer(self, value: int, msb: bool, length: int) -> None:
"""Initialize from any integer value"""
bl = length or -1
seq = self._seq
while bl:
seq.append(bool(value & 1))
value >>= 1
if not value:
break
bl -= 1
if msb:
seq.reverse()
def _init_from_iterable(self, iterable: Iterable, msb: bool) -> None:
"""Initialize from an iterable"""
smap = {'0': 0, '1': 1, False: 0, True: 1, 0: 0, 1: 1}
seq = self._seq
try:
if msb:
seq.extend([smap[bit] for bit in reversed(iterable)])
else:
seq.extend([smap[bit] for bit in iterable])
except KeyError:
raise BitSequenceError("Invalid binary character in initializer")
def _init_from_sibling(self, value: 'BitSequence', msb: bool) -> None:
"""Initialize from a fellow object"""
self._seq = value.sequence()
if msb:
self._seq.reverse()
def _update_length(self, length, msb):
"""If a specific length is specified, extend the sequence as
expected"""
if length and (len(self) < length):
extra = bytearray([False] * (length-len(self)))
if msb:
extra.extend(self._seq)
self._seq = extra
else:
self._seq.extend(extra)
def __iter__(self):
return self._seq.__iter__()
def __reversed__(self):
return self._seq.__reversed__()
def __getitem__(self, index):
if isinstance(index, slice):
return self.__class__(value=self._seq[index])
return self._seq[index]
def __setitem__(self, index, value):
if isinstance(value, BitSequence):
if issubclass(value.__class__, self.__class__) and \
value.__class__ != self.__class__:
raise BitSequenceError("Cannot set item with instance of a "
"subclass")
if isinstance(index, slice):
value = self.__class__(value, length=len(self._seq[index]))
self._seq[index] = value.sequence()
else:
if not isinstance(value, BitSequence):
value = self.__class__(value)
val = value.tobit()
if index > len(self._seq):
raise BitSequenceError("Cannot change the sequence size")
self._seq[index] = val
def __len__(self):
return len(self._seq)
def __eq__(self, other):
return self._cmp(other) == 0
def __ne__(self, other):
return not self == other
def __le__(self, other):
return not self._cmp(other) <= 0
def __lt__(self, other):
return not self._cmp(other) < 0
def __ge__(self, other):
return not self._cmp(other) >= 0
def __gt__(self, other):
return not self._cmp(other) > 0
def _cmp(self, other):
# the bit sequence should be of the same length
ld = len(self) - len(other)
if ld:
return ld
for n, (x, y) in enumerate(zip(self._seq, other.sequence()), start=1):
if xor(x, y):
return n
return 0
def __repr__(self):
# cannot use bin() as it truncates the MSB zero bits
return ''.join([b and '1' or '0' for b in reversed(self._seq)])
def __str__(self):
chunks = []
srepr = repr(self)
length = len(self)
for i in range(0, length, 8):
if i:
j = -i
else:
j = None
chunks.append(srepr[-i-8:j])
return '%d: %s' % (len(self), ' '.join(reversed(chunks)))
def __int__(self):
value = 0
for b in reversed(self._seq):
value <<= 1
value |= b and 1
return value
def __and__(self, other):
if not isinstance(other, self.__class__):
raise BitSequenceError('Need a BitSequence to combine')
if len(self) != len(other):
raise BitSequenceError('Sequences must be the same size')
return self.__class__(value=list(map(lambda x, y: x and y,
self._seq, other.sequence())))
def __or__(self, other):
if not isinstance(other, self.__class__):
raise BitSequenceError('Need a BitSequence to combine')
if len(self) != len(other):
raise BitSequenceError('Sequences must be the same size')
return self.__class__(value=list(map(lambda x, y: x or y,
self._seq, other.sequence())))
def __add__(self, other):
return self.__class__(value=self._seq + other.sequence())
def __ilshift__(self, count):
count %= len(self)
seq = bytearray([0]*count)
seq.extend(self._seq[:-count])
self._seq = seq
return self
def __irshift__(self, count):
count %= len(self)
seq = self._seq[count:]
seq.extend([0]*count)
self._seq = seq
return self
def inc(self) -> None:
"""Increment the sequence"""
for p, b in enumerate(self._seq):
b ^= True
self._seq[p] = b
if b:
break
def dec(self) -> None:
"""Decrement the sequence"""
for p, b in enumerate(self._seq):
b ^= True
self._seq[p] = b
if not b:
break
def invariant(self) -> bool:
"""Tells whether all bits of the sequence are of the same value.
Return the value, or ValueError if the bits are not of the same
value
"""
try:
ref = self._seq[0]
except IndexError:
raise ValueError('Empty sequence')
if len(self._seq) == 1:
return ref
for b in self._seq[1:]:
if b != ref:
raise ValueError('Bits do no match')
return ref
class BitZSequence(BitSequence):
"""Tri-state bit sequence manipulation.
Support most of the BitSequence operations, with an extra high-Z state
:param value: initial value
:param msb: most significant bit first or not
:param length: count of signficant bits in the bit sequence
"""
__slots__ = ['_seq']
Z = 0xff # maximum byte value
def __init__(self, value=None, msb=False, length=0):
BitSequence.__init__(self, value=value, msb=msb, length=length)
def invert(self):
self._seq = [x in (None, BitZSequence.Z) and BitZSequence.Z or x ^ 1
for x in self._seq]
return self
def tobyte(self, msb=False):
raise BitSequenceError("Type %s cannot be converted to byte" %
type(self))
def tobytes(self, msb=False, msby=False):
raise BitSequenceError("Type %s cannot be converted to bytes" %
type(self))
def matches(self, other):
if not isinstance(self, BitSequence):
raise BitSequenceError('Not a BitSequence instance')
# the bit sequence should be of the same length
ld = len(self) - len(other)
if ld:
return ld
for (x, y) in zip(self._seq, other.sequence()):
if BitZSequence.Z in (x, y):
continue
if x is not y:
return False
return True
def _init_from_iterable(self, iterable, msb):
"""Initialize from an iterable"""
smap = {'0': 0, '1': 1, 'Z': BitZSequence.Z,
False: 0, True: 1, None: BitZSequence.Z,
0: 0, 1: 1, BitZSequence.Z: BitZSequence.Z}
seq = self._seq
try:
if msb:
seq.extend([smap[bit] for bit in reversed(iterable)])
else:
seq.extend([smap[bit] for bit in iterable])
except KeyError:
raise BitSequenceError("Invalid binary character in initializer")
def __repr__(self):
smap = {False: '0', True: '1', BitZSequence.Z: 'Z'}
return ''.join([smap[b] for b in reversed(self._seq)])
def __int__(self):
if BitZSequence.Z in self._seq:
raise BitSequenceError("High-Z BitSequence cannot be converted to "
"an integral type")
return BitSequence.__int__(self)
def __cmp__(self, other):
# the bit sequence should be of the same length
ld = len(self) - len(other)
if ld:
return ld
for n, (x, y) in enumerate(zip(self._seq, other.sequence()), start=1):
if x is not y:
return n
return 0
def __and__(self, other):
if not isinstance(self, BitSequence):
raise BitSequenceError('Need a BitSequence-compliant object to '
'combine')
if len(self) != len(other):
raise BitSequenceError('Sequences must be the same size')
def andz(x, y):
"""Compute the boolean AND operation for a tri-state boolean"""
if BitZSequence.Z in (x, y):
return BitZSequence.Z
return x and y
return self.__class__(
value=list(map(andz, self._seq, other.sequence())))
def __or__(self, other):
if not isinstance(self, BitSequence):
raise BitSequenceError('Need a BitSequence-compliant object to '
'combine')
if len(self) != len(other):
raise BitSequenceError('Sequences must be the same size')
def orz(x, y):
"""Compute the boolean OR operation for a tri-state boolean"""
if BitZSequence.Z in (x, y):
return BitZSequence.Z
return x or y
return self.__class__(value=list(map(orz, self._seq,
other.sequence())))
def __rand__(self, other):
return self.__and__(other)
def __ror__(self, other):
return self.__or__(other)
def __radd__(self, other):
return self.__class__(value=other) + self
class BitField:
"""Bit field class to access and modify an integral value
Beware the slices does not behave as regular Python slices:
bitfield[3:5] means b3..b5, NOT b3..b4 as with regular slices
"""
__slots__ = ['_val']
def __init__(self, value=0):
self._val = value
def to_seq(self, msb=0, lsb=0):
"""Return the BitFiled as a sequence of boolean value"""
seq = bytearray()
count = 0
value = self._val
while value:
count += 1
value >>= 1
for x in range(lsb, max(msb, count)):
seq.append(bool((self._val >> x) & 1))
return tuple(reversed(seq))
def __getitem__(self, index):
if isinstance(index, slice):
if index.stop == index.start:
return
if index.stop < index.start:
offset = index.stop
count = index.start-index.stop+1
else:
offset = index.start
count = index.stop-index.start+1
mask = (1 << count)-1
return (self._val >> offset) & mask
return (self._val >> index) & 1
def __setitem__(self, index, value):
if isinstance(index, slice):
if index.stop == index.start:
return
if index.stop < index.start:
offset = index.stop
count = index.start-index.stop+1
else:
offset = index.start
count = index.stop-index.start+1
mask = (1 << count)-1
value = (value & mask) << offset
mask <<= offset
self._val = (self._val & ~mask) | value
else:
if isinstance(value, bool):
value = int(value)
value = (value & int(1)) << index
mask = int(1) << index
self._val = (self._val & ~mask) | value
def __int__(self):
return self._val
def __str__(self):
return bin(self._val)
| 34.67029
| 79
| 0.560717
|
2b1e2df93e792cc6dc00bad680fe6aa24639e1a4
| 1,629
|
py
|
Python
|
app.py
|
AndrewManHayChiu/dota_dashboard
|
d0c99ae2cb63e2e88046573c294258104dbfdfc4
|
[
"MIT"
] | null | null | null |
app.py
|
AndrewManHayChiu/dota_dashboard
|
d0c99ae2cb63e2e88046573c294258104dbfdfc4
|
[
"MIT"
] | 2
|
2021-08-17T00:54:18.000Z
|
2021-08-17T04:21:04.000Z
|
app.py
|
AndrewManHayChiu/dota_dashboard
|
d0c99ae2cb63e2e88046573c294258104dbfdfc4
|
[
"MIT"
] | 1
|
2021-08-13T07:21:01.000Z
|
2021-08-13T07:21:01.000Z
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# Can also define in css
colours = {
'background': 'white',
'text': 'green'
}
# Define data
df = pd.DataFrame({
'hero': ['Abaddon', 'Abaddon', 'Abaddon', 'Chaos Knight', 'Lich'],
'match': [1, 2, 3, 4, 5],
'kills': [0, 3, 2, 3, 5],
})
# Define charts
# fig = px.bar(data_frame=df, x='Fruit', y='Amount', barmode='group')
fig = px.scatter(data_frame=df,
x='match', y='kills',
color='hero',
hover_name='hero')
fig.update_layout(
plot_bgcolor=colours['background'],
paper_bgcolor=colours['background'],
font_color=colours['text']
)
app.layout = html.Div(style={}, children=[
html.H1(
children='Dash(board)',
style={
'textAlign': 'left',
}
),
html.Div(
children='Developing...',
style={
'textAlign': 'left'
}
),
html.Label('Multi-Select Dropdown'),
dcc.Dropdown(
options=[
{'label': 'Abaddon', 'value': 'Aba'},
{'label': 'Chaos Knight', 'value': 'CK'},
{'label': 'Lich', 'value': 'Lich'}
],
value=['Aba', 'CK'],
multi=True
),
dcc.Graph(
id='scatter_example',
figure=fig
)
])
if __name__ == '__main__':
app.run_server(debug=True, dev_tools_hot_reload=True)
| 22.943662
| 70
| 0.556169
|
9065f4608aca742f800a1ccc3528b1bbc3081ea6
| 1,428
|
py
|
Python
|
etldjango/etldata/management/commands/utils/urllibmod.py
|
DavidCastilloAlvarado/opencovid_ETL
|
0cd7afcb0e7e6247a01c0aced9aab02b8ad1edaf
|
[
"MIT"
] | 5
|
2021-05-21T20:02:34.000Z
|
2021-08-04T21:06:19.000Z
|
etldjango/etldata/management/commands/utils/urllibmod.py
|
DavidCastilloAlvarado/opencovid_ETL
|
0cd7afcb0e7e6247a01c0aced9aab02b8ad1edaf
|
[
"MIT"
] | 1
|
2021-06-04T06:17:17.000Z
|
2021-06-04T06:17:17.000Z
|
etldjango/etldata/management/commands/utils/urllibmod.py
|
DavidCastilloAlvarado/opencovid_ETL
|
0cd7afcb0e7e6247a01c0aced9aab02b8ad1edaf
|
[
"MIT"
] | null | null | null |
import contextlib
import requests
from tqdm import tqdm
from etldjango.settings import PROXI, PORT_PROXI, IP_PROXI_EXT
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
session = requests.Session()
retry = Retry(connect=5, backoff_factor=3.0)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
def urlretrieve(url, filename, n_bytes=1024):
"""
url: origen source for direct download
filename: where to save the file and what is its name
n_bytes: set the chunk size
"""
if PROXI == 'yes':
print('Internal proxy')
proxies = dict(http='socks5://localhost:'+str(PORT_PROXI),
https='socks5://localhost:'+str(PORT_PROXI))
elif not IP_PROXI_EXT is None:
print('External proxy')
proxies = dict(http='socks5://{}:'.format(IP_PROXI_EXT)+str(PORT_PROXI),
https='socks5://{}:'.format(IP_PROXI_EXT)+str(PORT_PROXI))
else:
print('NO proxy')
proxies = None
# proxies={"http": "http://201.234.60.82:999"}
with contextlib.closing(session.get(url, stream=True, timeout=10, verify=True, proxies=proxies)) as r:
r.raise_for_status()
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=n_bytes):
f.write(chunk)
return filename, r.headers
| 36.615385
| 106
| 0.659664
|
a8bddcbdabc5fd3afd85d3a094ff6efd1ffccf09
| 9,691
|
py
|
Python
|
cassandra/concurrent.py
|
clohfink/python-driver
|
30a0e27cd1b8999267c146f0a93adf962a50790b
|
[
"Apache-2.0"
] | 1,163
|
2015-01-01T03:02:05.000Z
|
2022-03-22T13:04:00.000Z
|
cassandra/concurrent.py
|
clohfink/python-driver
|
30a0e27cd1b8999267c146f0a93adf962a50790b
|
[
"Apache-2.0"
] | 556
|
2015-01-05T16:39:29.000Z
|
2022-03-26T20:51:36.000Z
|
cassandra/concurrent.py
|
clohfink/python-driver
|
30a0e27cd1b8999267c146f0a93adf962a50790b
|
[
"Apache-2.0"
] | 449
|
2015-01-05T10:28:59.000Z
|
2022-03-14T23:15:32.000Z
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from heapq import heappush, heappop
from itertools import cycle
import six
from six.moves import xrange, zip
from threading import Condition
import sys
from cassandra.cluster import ResultSet
import logging
log = logging.getLogger(__name__)
ExecutionResult = namedtuple('ExecutionResult', ['success', 'result_or_exc'])
def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False):
"""
Executes a sequence of (statement, parameters) tuples concurrently. Each
``parameters`` item must be a sequence or :const:`None`.
The `concurrency` parameter controls how many statements will be executed
concurrently. When :attr:`.Cluster.protocol_version` is set to 1 or 2,
it is recommended that this be kept below 100 times the number of
core connections per host times the number of connected hosts (see
:meth:`.Cluster.set_core_connections_per_host`). If that amount is exceeded,
the event loop thread may attempt to block on new connection creation,
substantially impacting throughput. If :attr:`~.Cluster.protocol_version`
is 3 or higher, you can safely experiment with higher levels of concurrency.
If `raise_on_first_error` is left as :const:`True`, execution will stop
after the first failed statement and the corresponding exception will be
raised.
`results_generator` controls how the results are returned.
* If :const:`False`, the results are returned only after all requests have completed.
* If :const:`True`, a generator expression is returned. Using a generator results in a constrained
memory footprint when the results set will be large -- results are yielded
as they return instead of materializing the entire list at once. The trade for lower memory
footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results
on-the-fly).
A sequence of ``ExecutionResult(success, result_or_exc)`` namedtuples is returned
in the same order that the statements were passed in. If ``success`` is :const:`False`,
there was an error executing the statement, and ``result_or_exc`` will be
an :class:`Exception`. If ``success`` is :const:`True`, ``result_or_exc``
will be the query result.
Example usage::
select_statement = session.prepare("SELECT * FROM users WHERE id=?")
statements_and_params = []
for user_id in user_ids:
params = (user_id, )
statements_and_params.append((select_statement, params))
results = execute_concurrent(
session, statements_and_params, raise_on_first_error=False)
for (success, result) in results:
if not success:
handle_error(result) # result will be an Exception
else:
process_user(result[0]) # result will be a list of rows
Note: in the case that `generators` are used, it is important to ensure the consumers do not
block or attempt further synchronous requests, because no further IO will be processed until
the consumer returns. This may also produce a deadlock in the IO event thread.
"""
if concurrency <= 0:
raise ValueError("concurrency must be greater than 0")
if not statements_and_parameters:
return []
executor = ConcurrentExecutorGenResults(session, statements_and_parameters) if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters)
return executor.execute(concurrency, raise_on_first_error)
class _ConcurrentExecutor(object):
max_error_recursion = 100
def __init__(self, session, statements_and_params):
self.session = session
self._enum_statements = enumerate(iter(statements_and_params))
self._condition = Condition()
self._fail_fast = False
self._results_queue = []
self._current = 0
self._exec_count = 0
self._exec_depth = 0
def execute(self, concurrency, fail_fast):
self._fail_fast = fail_fast
self._results_queue = []
self._current = 0
self._exec_count = 0
with self._condition:
for n in xrange(concurrency):
if not self._execute_next():
break
return self._results()
def _execute_next(self):
# lock must be held
try:
(idx, (statement, params)) = next(self._enum_statements)
self._exec_count += 1
self._execute(idx, statement, params)
return True
except StopIteration:
pass
def _execute(self, idx, statement, params):
self._exec_depth += 1
try:
future = self.session.execute_async(statement, params, timeout=None)
args = (future, idx)
future.add_callbacks(
callback=self._on_success, callback_args=args,
errback=self._on_error, errback_args=args)
except Exception as exc:
# exc_info with fail_fast to preserve stack trace info when raising on the client thread
# (matches previous behavior -- not sure why we wouldn't want stack trace in the other case)
e = sys.exc_info() if self._fail_fast and six.PY2 else exc
# If we're not failing fast and all executions are raising, there is a chance of recursing
# here as subsequent requests are attempted. If we hit this threshold, schedule this result/retry
# and let the event loop thread return.
if self._exec_depth < self.max_error_recursion:
self._put_result(e, idx, False)
else:
self.session.submit(self._put_result, e, idx, False)
self._exec_depth -= 1
def _on_success(self, result, future, idx):
future.clear_callbacks()
self._put_result(ResultSet(future, result), idx, True)
def _on_error(self, result, future, idx):
self._put_result(result, idx, False)
@staticmethod
def _raise(exc):
if six.PY2 and isinstance(exc, tuple):
(exc_type, value, traceback) = exc
six.reraise(exc_type, value, traceback)
else:
raise exc
class ConcurrentExecutorGenResults(_ConcurrentExecutor):
def _put_result(self, result, idx, success):
with self._condition:
heappush(self._results_queue, (idx, ExecutionResult(success, result)))
self._execute_next()
self._condition.notify()
def _results(self):
with self._condition:
while self._current < self._exec_count:
while not self._results_queue or self._results_queue[0][0] != self._current:
self._condition.wait()
while self._results_queue and self._results_queue[0][0] == self._current:
_, res = heappop(self._results_queue)
try:
self._condition.release()
if self._fail_fast and not res[0]:
self._raise(res[1])
yield res
finally:
self._condition.acquire()
self._current += 1
class ConcurrentExecutorListResults(_ConcurrentExecutor):
_exception = None
def execute(self, concurrency, fail_fast):
self._exception = None
return super(ConcurrentExecutorListResults, self).execute(concurrency, fail_fast)
def _put_result(self, result, idx, success):
self._results_queue.append((idx, ExecutionResult(success, result)))
with self._condition:
self._current += 1
if not success and self._fail_fast:
if not self._exception:
self._exception = result
self._condition.notify()
elif not self._execute_next() and self._current == self._exec_count:
self._condition.notify()
def _results(self):
with self._condition:
while self._current < self._exec_count:
self._condition.wait()
if self._exception and self._fail_fast:
self._raise(self._exception)
if self._exception and self._fail_fast: # raise the exception even if there was no wait
self._raise(self._exception)
return [r[1] for r in sorted(self._results_queue)]
def execute_concurrent_with_args(session, statement, parameters, *args, **kwargs):
"""
Like :meth:`~cassandra.concurrent.execute_concurrent()`, but takes a single
statement and a sequence of parameters. Each item in ``parameters``
should be a sequence or :const:`None`.
Example usage::
statement = session.prepare("INSERT INTO mytable (a, b) VALUES (1, ?)")
parameters = [(x,) for x in range(1000)]
execute_concurrent_with_args(session, statement, parameters, concurrency=50)
"""
return execute_concurrent(session, zip(cycle((statement,)), parameters), *args, **kwargs)
| 40.548117
| 171
| 0.663193
|
4a84e10b032741fc8ea536543a2b52b88f983ea4
| 3,508
|
py
|
Python
|
realbrowserlocusts/locusts.py
|
liveteched/realbrowserlocusts
|
a02ea672e122266d8f3fb8954c5c0db634295cb8
|
[
"MIT"
] | null | null | null |
realbrowserlocusts/locusts.py
|
liveteched/realbrowserlocusts
|
a02ea672e122266d8f3fb8954c5c0db634295cb8
|
[
"MIT"
] | null | null | null |
realbrowserlocusts/locusts.py
|
liveteched/realbrowserlocusts
|
a02ea672e122266d8f3fb8954c5c0db634295cb8
|
[
"MIT"
] | null | null | null |
# pylint:disable=too-few-public-methods
""" Combine Locust with Selenium Web Driver """
import logging
from os import getenv as os_getenv
from locust import User
from locust.exception import LocustError
from selenium import webdriver
from realbrowserlocusts.core import RealBrowserClient
_LOGGER = logging.getLogger(__name__)
class RealBrowserLocust(User):
"""
This is the abstract Locust class which should be subclassed.
"""
client = None
timeout = 30
screen_width = None
screen_height = None
def __init__(self, *args, **kwargs):
super(RealBrowserLocust, self).__init__(*args, **kwargs)
if self.screen_width is None:
raise LocustError("You must specify a screen_width "
"for the browser")
if self.screen_height is None:
raise LocustError("You must specify a screen_height "
"for the browser")
self.proxy_server = os_getenv("LOCUST_BROWSER_PROXY", None)
class ChromeLocust(RealBrowserLocust):
"""
Provides a Chrome webdriver that logs GET's and waits to locust
"""
abstract = True
def __init__(self, *args, **kwargs):
super(ChromeLocust, self).__init__(*args, **kwargs)
options = webdriver.ChromeOptions()
if self.proxy_server:
_LOGGER.info('Using proxy: ' + self.proxy_server)
options.add_argument('proxy-server={}'.format(self.proxy_server))
self.client = RealBrowserClient(
webdriver.Chrome(chrome_options=options),
self.timeout,
self.screen_width,
self.screen_height
)
class HeadlessChromeLocust(RealBrowserLocust):
"""
Provides a headless Chrome webdriver that logs GET's and waits to locust
"""
abstract = True
def __init__(self, *args, **kwargs):
super(HeadlessChromeLocust, self).__init__(*args, **kwargs)
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size={}x{}'.format(
self.screen_width, self.screen_height
))
options.add_argument('disable-gpu')
if self.proxy_server:
_LOGGER.info('Using proxy: ' + self.proxy_server)
options.add_argument('proxy-server={}'.format(self.proxy_server))
driver = webdriver.Chrome(chrome_options=options)
_LOGGER.info('Actually trying to run headless Chrome')
self.client = RealBrowserClient(
driver,
self.timeout,
self.screen_width,
self.screen_height,
set_window=False
)
class FirefoxLocust(RealBrowserLocust):
"""
Provides a Firefox webdriver that logs GET's and waits to locust
"""
abstract = True
def __init__(self, *args, **kwargs):
super(FirefoxLocust, self).__init__(*args, **kwargs)
self.client = RealBrowserClient(
webdriver.Firefox(),
self.timeout,
self.screen_width,
self.screen_height
)
class PhantomJSLocust(RealBrowserLocust):
"""
Provides a PhantomJS webdriver that logs GET's and waits to locust
"""
abstract = True
def __init__(self, *args, **kwargs):
super(PhantomJSLocust, self).__init__(*args, **kwargs)
self.client = RealBrowserClient(
webdriver.PhantomJS(),
self.timeout,
self.screen_width,
self.screen_height
)
| 32.785047
| 77
| 0.633409
|
b9d1cbdd643ba76659f4169993e188320ff08dc6
| 19,111
|
py
|
Python
|
c_model.py
|
LonglifeHyun/GANda_text-to-image
|
095ded617e4df7d7ff7f4954381dde77db6d6883
|
[
"MIT"
] | null | null | null |
c_model.py
|
LonglifeHyun/GANda_text-to-image
|
095ded617e4df7d7ff7f4954381dde77db6d6883
|
[
"MIT"
] | null | null | null |
c_model.py
|
LonglifeHyun/GANda_text-to-image
|
095ded617e4df7d7ff7f4954381dde77db6d6883
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import torch
import torch.nn as nn
from layer import *
## 네트워크 구축하기
# CycleGAN
# https://arxiv.org/pdf/1703.10593.pdf
class CycleGAN(nn.Module):
def __init__(self, in_channels, out_channels, nker=64, norm='bnorm', nblk=6):
super(CycleGAN, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.nker = nker
self.norm = norm
self.nblk = nblk
if norm == 'bnorm':
self.bias = False
else:
self.bias = True
self.enc1 = CBR2d(self.in_channels, 1 * self.nker, kernel_size=7, stride=1, padding=3, norm=self.norm, relu=0.0)
self.enc2 = CBR2d(1 * self.nker, 2 * self.nker, kernel_size=3, stride=2, padding=1, norm=self.norm, relu=0.0)
self.enc3 = CBR2d(2 * self.nker, 4 * self.nker, kernel_size=3, stride=2, padding=1, norm=self.norm, relu=0.0)
if self.nblk:
res = []
for i in range(self.nblk):
res += [ResBlock(4 * self.nker, 4 * self.nker, kernel_size=3, stride=1, padding=1, norm=self.norm, relu=0.0)]
self.res = nn.Sequential(*res)
self.dec3 = DECBR2d(4 * self.nker, 2 * self.nker, kernel_size=3, stride=2, padding=1, norm=self.norm, relu=0.0)
self.dec2 = DECBR2d(2 * self.nker, 1 * self.nker, kernel_size=3, stride=2, padding=1, norm=self.norm, relu=0.0)
self.dec1 = CBR2d(1 * self.nker, self.out_channels, kernel_size=7, stride=1, padding=3, norm=None, relu=None)
def forward(self, x):
x = self.enc1(x)
x = self.enc2(x)
x = self.enc3(x)
x = self.res(x)
x = self.dec3(x)
x = self.dec2(x)
x = self.dec1(x)
x = torch.tanh(x)
return x
# Pix2Pix
# https://arxiv.org/pdf/1611.07004.pdf
class Pix2Pix(nn.Module):
def __init__(self, in_channels, out_channels, nker=64, norm="bnorm"):
super(Pix2Pix, self).__init__()
self.enc1 = CBR2d(in_channels, 1 * nker, kernel_size=4, padding=1,
norm=None, relu=0.2, stride=2)
self.enc2 = CBR2d(1 * nker, 2 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.2, stride=2)
self.enc3 = CBR2d(2 * nker, 4 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.2, stride=2)
self.enc4 = CBR2d(4 * nker, 8 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.2, stride=2)
self.enc5 = CBR2d(8 * nker, 8 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.2, stride=2)
self.enc6 = CBR2d(8 * nker, 8 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.2, stride=2)
self.enc7 = CBR2d(8 * nker, 8 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.2, stride=2)
self.enc8 = CBR2d(8 * nker, 8 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.2, stride=2)
self.dec1 = DECBR2d(8 * nker, 8 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.0, stride=2)
self.drop1 = nn.Dropout2d(0.5)
self.dec2 = DECBR2d(2 * 8 * nker, 8 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.0, stride=2)
self.drop2 = nn.Dropout2d(0.5)
self.dec3 = DECBR2d(2 * 8 * nker, 8 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.0, stride=2)
self.drop3 = nn.Dropout2d(0.5)
self.dec4 = DECBR2d(2 * 8 * nker, 8 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.0, stride=2)
self.dec5 = DECBR2d(2 * 8 * nker, 4 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.0, stride=2)
self.dec6 = DECBR2d(2 * 4 * nker, 2 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.0, stride=2)
self.dec7 = DECBR2d(2 * 2 * nker, 1 * nker, kernel_size=4, padding=1,
norm=norm, relu=0.0, stride=2)
self.dec8 = DECBR2d(2 * 1 * nker, out_channels, kernel_size=4, padding=1,
norm=None, relu=None, stride=2)
def forward(self, x):
enc1 = self.enc1(x)
enc2 = self.enc2(enc1)
enc3 = self.enc3(enc2)
enc4 = self.enc4(enc3)
enc5 = self.enc5(enc4)
enc6 = self.enc6(enc5)
enc7 = self.enc7(enc6)
enc8 = self.enc8(enc7)
dec1 = self.dec1(enc8)
drop1 = self.drop1(dec1)
cat2 = torch.cat((drop1, enc7), dim=1)
dec2 = self.dec2(cat2)
drop2 = self.drop2(dec2)
cat3 = torch.cat((drop2, enc6), dim=1)
dec3 = self.dec3(cat3)
drop3 = self.drop3(dec3)
cat4 = torch.cat((drop3, enc5), dim=1)
dec4 = self.dec4(cat4)
cat5 = torch.cat((dec4, enc4), dim=1)
dec5 = self.dec5(cat5)
cat6 = torch.cat((dec5, enc3), dim=1)
dec6 = self.dec6(cat6)
cat7 = torch.cat((dec6, enc2), dim=1)
dec7 = self.dec7(cat7)
cat8 = torch.cat((dec7, enc1), dim=1)
dec8 = self.dec8(cat8)
x = torch.tanh(dec8)
return x
# DCGAN
# https://arxiv.org/pdf/1511.06434.pdf
class DCGAN(nn.Module):
def __init__(self, in_channels, out_channels, nker=64, norm="bnorm"):
super(DCGAN, self).__init__()
self.dec1 = DECBR2d(1 * in_channels, 8 * nker, kernel_size=4, stride=1,
padding=0, norm=norm, relu=0.0, bias=False)
self.dec2 = DECBR2d(8 * nker, 4 * nker, kernel_size=4, stride=2,
padding=1, norm=norm, relu=0.0, bias=False)
self.dec3 = DECBR2d(4 * nker, 2 * nker, kernel_size=4, stride=2,
padding=1, norm=norm, relu=0.0, bias=False)
self.dec4 = DECBR2d(2 * nker, 1 * nker, kernel_size=4, stride=2,
padding=1, norm=norm, relu=0.0, bias=False)
self.dec5 = DECBR2d(1 * nker, out_channels, kernel_size=4, stride=2,
padding=1, norm=None, relu=None, bias=False)
def forward(self, x):
x = self.dec1(x)
x = self.dec2(x)
x = self.dec3(x)
x = self.dec4(x)
x = self.dec5(x)
x = torch.tanh(x)
return x
class Discriminator(nn.Module):
def __init__(self, in_channels, out_channels, nker=64, norm="bnorm"):
super(Discriminator, self).__init__()
self.enc1 = CBR2d(1 * in_channels, 1 * nker, kernel_size=4, stride=2,
padding=1, norm=None, relu=0.2, bias=False)
self.enc2 = CBR2d(1 * nker, 2 * nker, kernel_size=4, stride=2,
padding=1, norm=norm, relu=0.2, bias=False)
self.enc3 = CBR2d(2 * nker, 4 * nker, kernel_size=4, stride=2,
padding=1, norm=norm, relu=0.2, bias=False)
self.enc4 = CBR2d(4 * nker, 8 * nker, kernel_size=4, stride=2,
padding=1, norm=norm, relu=0.2, bias=False)
self.enc5 = CBR2d(8 * nker, out_channels, kernel_size=4, stride=2,
padding=1, norm=None, relu=None, bias=False)
def forward(self, x):
x = self.enc1(x)
x = self.enc2(x)
x = self.enc3(x)
x = self.enc4(x)
x = self.enc5(x)
x = torch.sigmoid(x)
return x
# U-Net: Convolutional Networks for Biomedical Image Segmentation
# https://arxiv.org/abs/1505.04597
class UNet(nn.Module):
def __init__(self, in_channels, out_channels, nker=64, learning_type="plain", norm="bnorm"):
super(UNet, self).__init__()
self.learning_type = learning_type
# Contracting path
self.enc1_1 = CBR2d(in_channels=in_channels, out_channels=1 * nker, norm=norm)
self.enc1_2 = CBR2d(in_channels=1 * nker, out_channels=1 * nker, norm=norm)
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.enc2_1 = CBR2d(in_channels=nker, out_channels=2 * nker, norm=norm)
self.enc2_2 = CBR2d(in_channels=2 * nker, out_channels=2 * nker, norm=norm)
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.enc3_1 = CBR2d(in_channels=2 * nker, out_channels=4 * nker, norm=norm)
self.enc3_2 = CBR2d(in_channels=4 * nker, out_channels=4 * nker, norm=norm)
self.pool3 = nn.MaxPool2d(kernel_size=2)
self.enc4_1 = CBR2d(in_channels=4 * nker, out_channels=8 * nker, norm=norm)
self.enc4_2 = CBR2d(in_channels=8 * nker, out_channels=8 * nker, norm=norm)
self.pool4 = nn.MaxPool2d(kernel_size=2)
self.enc5_1 = CBR2d(in_channels=8 * nker, out_channels=16 * nker, norm=norm)
# Expansive path
self.dec5_1 = CBR2d(in_channels=16 * nker, out_channels=8 * nker, norm=norm)
self.unpool4 = nn.ConvTranspose2d(in_channels=8 * nker, out_channels=8 * nker,
kernel_size=2, stride=2, padding=0, bias=True)
self.dec4_2 = CBR2d(in_channels=2 * 8 * nker, out_channels=8 * nker, norm=norm)
self.dec4_1 = CBR2d(in_channels=8 * nker, out_channels=4 * nker, norm=norm)
self.unpool3 = nn.ConvTranspose2d(in_channels=4 * nker, out_channels=4 * nker,
kernel_size=2, stride=2, padding=0, bias=True)
self.dec3_2 = CBR2d(in_channels=2 * 4 * nker, out_channels=4 * nker, norm=norm)
self.dec3_1 = CBR2d(in_channels=4 * nker, out_channels=2 * nker, norm=norm)
self.unpool2 = nn.ConvTranspose2d(in_channels=2 * nker, out_channels=2 * nker,
kernel_size=2, stride=2, padding=0, bias=True)
self.dec2_2 = CBR2d(in_channels=2 * 2 * nker, out_channels=2 * nker, norm=norm)
self.dec2_1 = CBR2d(in_channels=2 * nker, out_channels=1 * nker, norm=norm)
self.unpool1 = nn.ConvTranspose2d(in_channels=1 * nker, out_channels=1 * nker,
kernel_size=2, stride=2, padding=0, bias=True)
self.dec1_2 = CBR2d(in_channels=2 * 1 * nker, out_channels=1 * nker, norm=norm)
self.dec1_1 = CBR2d(in_channels=1 * nker, out_channels=1 * nker, norm=norm)
self.fc = nn.Conv2d(in_channels=1 * nker, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x):
enc1_1 = self.enc1_1(x)
enc1_2 = self.enc1_2(enc1_1)
pool1 = self.pool1(enc1_2)
enc2_1 = self.enc2_1(pool1)
enc2_2 = self.enc2_2(enc2_1)
pool2 = self.pool2(enc2_2)
enc3_1 = self.enc3_1(pool2)
enc3_2 = self.enc3_2(enc3_1)
pool3 = self.pool3(enc3_2)
enc4_1 = self.enc4_1(pool3)
enc4_2 = self.enc4_2(enc4_1)
pool4 = self.pool4(enc4_2)
enc5_1 = self.enc5_1(pool4)
dec5_1 = self.dec5_1(enc5_1)
unpool4 = self.unpool4(dec5_1)
cat4 = torch.cat((unpool4, enc4_2), dim=1)
dec4_2 = self.dec4_2(cat4)
dec4_1 = self.dec4_1(dec4_2)
unpool3 = self.unpool3(dec4_1)
cat3 = torch.cat((unpool3, enc3_2), dim=1)
dec3_2 = self.dec3_2(cat3)
dec3_1 = self.dec3_1(dec3_2)
unpool2 = self.unpool2(dec3_1)
cat2 = torch.cat((unpool2, enc2_2), dim=1)
dec2_2 = self.dec2_2(cat2)
dec2_1 = self.dec2_1(dec2_2)
unpool1 = self.unpool1(dec2_1)
cat1 = torch.cat((unpool1, enc1_2), dim=1)
dec1_2 = self.dec1_2(cat1)
dec1_1 = self.dec1_1(dec1_2)
if self.learning_type == "plain":
x = self.fc(dec1_1)
elif self.learning_type == "residual":
x = x + self.fc(dec1_1)
return x
class Hourglass(nn.Module):
def __init__(self, in_channels, out_channels, nker=64, learning_type="plain", norm="bnorm"):
super(Hourglass, self).__init__()
self.learning_type = learning_type
# Contracting path
self.enc1_1 = CBR2d(in_channels=in_channels, out_channels=1 * nker, norm=norm)
self.enc1_2 = CBR2d(in_channels=1 * nker, out_channels=1 * nker, norm=norm)
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.enc2_1 = CBR2d(in_channels=1 * nker, out_channels=2 * nker, norm=norm)
self.enc2_2 = CBR2d(in_channels=2 * nker, out_channels=2 * nker, norm=norm)
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.enc3_1 = CBR2d(in_channels=2 * nker, out_channels=4 * nker, norm=norm)
self.enc3_2 = CBR2d(in_channels=4 * nker, out_channels=4 * nker, norm=norm)
self.pool3 = nn.MaxPool2d(kernel_size=2)
self.enc4_1 = CBR2d(in_channels=4 * nker, out_channels=8 * nker, norm=norm)
self.enc4_2 = CBR2d(in_channels=8 * nker, out_channels=8 * nker, norm=norm)
self.pool4 = nn.MaxPool2d(kernel_size=2)
self.enc5_1 = CBR2d(in_channels=8 * nker, out_channels=16 * nker, norm=norm)
# Expansive path
self.dec5_1 = CBR2d(in_channels=16 * nker, out_channels=8 * nker, norm=norm)
self.unpool4 = nn.ConvTranspose2d(in_channels=8 * nker, out_channels=8 * nker,
kernel_size=2, stride=2, padding=0, bias=True)
self.dec4_2 = CBR2d(in_channels=1 * 8 * nker, out_channels=8 * nker, norm=norm)
self.dec4_1 = CBR2d(in_channels=8 * nker, out_channels=4 * nker, norm=norm)
self.unpool3 = nn.ConvTranspose2d(in_channels=4 * nker, out_channels=4 * nker,
kernel_size=2, stride=2, padding=0, bias=True)
self.dec3_2 = CBR2d(in_channels=1 * 4 * nker, out_channels=4 * nker, norm=norm)
self.dec3_1 = CBR2d(in_channels=4 * nker, out_channels=2 * nker, norm=norm)
self.unpool2 = nn.ConvTranspose2d(in_channels=2 * nker, out_channels=2 * nker,
kernel_size=2, stride=2, padding=0, bias=True)
self.dec2_2 = CBR2d(in_channels=1 * 2 * nker, out_channels=2 * nker, norm=norm)
self.dec2_1 = CBR2d(in_channels=2 * nker, out_channels=1 * nker, norm=norm)
self.unpool1 = nn.ConvTranspose2d(in_channels=1 * nker, out_channels=1 * nker,
kernel_size=2, stride=2, padding=0, bias=True)
self.dec1_2 = CBR2d(in_channels=1 * 1 * nker, out_channels=1 * nker, norm=norm)
self.dec1_1 = CBR2d(in_channels=1 * nker, out_channels=1 * nker, norm=norm)
self.fc = CBR2d(in_channels=1 * nker, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=True, norm=None, relu=None)
def forward(self, x):
enc1_1 = self.enc1_1(x)
enc1_2 = self.enc1_2(enc1_1)
pool1 = self.pool1(enc1_2)
enc2_1 = self.enc2_1(pool1)
enc2_2 = self.enc2_2(enc2_1)
pool2 = self.pool2(enc2_2)
enc3_1 = self.enc3_1(pool2)
enc3_2 = self.enc3_2(enc3_1)
pool3 = self.pool3(enc3_2)
enc4_1 = self.enc4_1(pool3)
enc4_2 = self.enc4_2(enc4_1)
pool4 = self.pool4(enc4_2)
enc5_1 = self.enc5_1(pool4)
dec5_1 = self.dec5_1(enc5_1)
unpool4 = self.unpool4(dec5_1)
# cat4 = torch.cat((unpool4, enc4_2), dim=1)
cat4 = unpool4
dec4_2 = self.dec4_2(cat4)
dec4_1 = self.dec4_1(dec4_2)
unpool3 = self.unpool3(dec4_1)
# cat3 = torch.cat((unpool3, enc3_2), dim=1)
cat3 = unpool3
dec3_2 = self.dec3_2(cat3)
dec3_1 = self.dec3_1(dec3_2)
unpool2 = self.unpool2(dec3_1)
# cat2 = torch.cat((unpool2, enc2_2), dim=1)
cat2 = unpool2
dec2_2 = self.dec2_2(cat2)
dec2_1 = self.dec2_1(dec2_2)
unpool1 = self.unpool1(dec2_1)
# cat1 = torch.cat((unpool1, enc1_2), dim=1)
cat1 = unpool1
dec1_2 = self.dec1_2(cat1)
dec1_1 = self.dec1_1(dec1_2)
if self.learning_type == "plain":
x = self.fc(dec1_1)
elif self.learning_type == "residual":
x = x + self.fc(dec1_1)
return x
# Deep Residual Learning for Image Recognition
# https://arxiv.org/abs/1512.03385
class ResNet(nn.Module):
def __init__(self, in_channels, out_channels, nker=64, learning_type="plain", norm="bnorm", nblk=16):
super(ResNet, self).__init__()
self.learning_type = learning_type
self.enc = CBR2d(in_channels, nker, kernel_size=3, stride=1, padding=1, bias=True, norm=None, relu=0.0)
res = []
for i in range(nblk):
res += [ResBlock(nker, nker, kernel_size=3, stride=1, padding=1, bias=True, norm=norm, relu=0.0)]
self.res = nn.Sequential(*res)
self.dec = CBR2d(nker, nker, kernel_size=3, stride=1, padding=1, bias=True, norm=norm, relu=0.0)
self.fc = CBR2d(nker, out_channels, kernel_size=1, stride=1, padding=0, bias=True, norm=None, relu=None)
def forward(self, x):
x0 = x
x = self.enc(x)
x = self.res(x)
x = self.dec(x)
if self.learning_type == "plain":
x = self.fc(x)
elif self.learning_type == "residual":
x = x0 + self.fc(x)
return x
# Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network
# https://arxiv.org/abs/1609.04802
class SRResNet(nn.Module):
def __init__(self, in_channels, out_channels, nker=64, learning_type="plain", norm="bnorm", nblk=16):
super(SRResNet, self).__init__()
self.learning_type = learning_type
self.enc = CBR2d(in_channels, nker, kernel_size=9, stride=1, padding=4, bias=True, norm=None, relu=0.0)
res = []
for i in range(nblk):
res += [ResBlock(nker, nker, kernel_size=3, stride=1, padding=1, bias=True, norm=norm, relu=0.0)]
self.res = nn.Sequential(*res)
self.dec = CBR2d(nker, nker, kernel_size=3, stride=1, padding=1, bias=True, norm=norm, relu=None)
# ps1 = []
# ps1 += [nn.Conv2d(in_channels=nker, out_channels=nker, kernel_size=3, stride=1, padding=1)]
# ps1 += [nn.ReLU()]
# self.ps1 = nn.Sequential(*ps1)
#
# ps2 = []
# ps2 += [nn.Conv2d(in_channels=nker, out_channels=nker, kernel_size=3, stride=1, padding=1)]
# ps2 += [nn.ReLU()]
# self.ps2 = nn.Sequential(*ps2)
ps1 = []
ps1 += [nn.Conv2d(in_channels=nker, out_channels=4 * nker, kernel_size=3, stride=1, padding=1)]
ps1 += [PixelShuffle(ry=2, rx=2)]
ps1 += [nn.ReLU()]
self.ps1 = nn.Sequential(*ps1)
ps2 = []
ps2 += [nn.Conv2d(in_channels=nker, out_channels=4 * nker, kernel_size=3, stride=1, padding=1)]
ps2 += [PixelShuffle(ry=2, rx=2)]
ps2 += [nn.ReLU()]
self.ps2 = nn.Sequential(*ps2)
self.fc = CBR2d(nker, out_channels, kernel_size=9, stride=1, padding=4, bias=True, norm=None, relu=None)
def forward(self, x):
x = self.enc(x)
x0 = x
x = self.res(x)
x = self.dec(x)
x = x + x0
x = self.ps1(x)
x = self.ps2(x)
x = self.fc(x)
return x
| 35.522305
| 141
| 0.578358
|
10884a87b2933940dbef93d6b5e3f9a3af997e0a
| 24,578
|
py
|
Python
|
components/spiffs/spiffsgen.py
|
hasiflo/esp-idf
|
1ca2afd982230e4cbf021c58137f3141c1870572
|
[
"Apache-2.0"
] | 1
|
2021-05-24T07:39:12.000Z
|
2021-05-24T07:39:12.000Z
|
components/spiffs/spiffsgen.py
|
hasiflo/esp-idf
|
1ca2afd982230e4cbf021c58137f3141c1870572
|
[
"Apache-2.0"
] | null | null | null |
components/spiffs/spiffsgen.py
|
hasiflo/esp-idf
|
1ca2afd982230e4cbf021c58137f3141c1870572
|
[
"Apache-2.0"
] | 1
|
2021-05-29T06:35:09.000Z
|
2021-05-29T06:35:09.000Z
|
#!/usr/bin/env python
#
# spiffsgen is a tool used to generate a spiffs image from a directory
#
# Copyright 2019 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import argparse
import io
import math
import os
import struct
import sys
try:
import typing
TSP = typing.TypeVar('TSP', bound='SpiffsObjPageWithIdx')
ObjIdsItem = typing.Tuple[int, typing.Type[TSP]]
except ImportError:
pass
SPIFFS_PH_FLAG_USED_FINAL_INDEX = 0xF8
SPIFFS_PH_FLAG_USED_FINAL = 0xFC
SPIFFS_PH_FLAG_LEN = 1
SPIFFS_PH_IX_SIZE_LEN = 4
SPIFFS_PH_IX_OBJ_TYPE_LEN = 1
SPIFFS_TYPE_FILE = 1
# Based on typedefs under spiffs_config.h
SPIFFS_OBJ_ID_LEN = 2 # spiffs_obj_id
SPIFFS_SPAN_IX_LEN = 2 # spiffs_span_ix
SPIFFS_PAGE_IX_LEN = 2 # spiffs_page_ix
SPIFFS_BLOCK_IX_LEN = 2 # spiffs_block_ix
class SpiffsBuildConfig(object):
def __init__(self,
page_size, # type: int
page_ix_len, # type: int
block_size, # type: int
block_ix_len, # type: int
meta_len, # type: int
obj_name_len, # type: int
obj_id_len, # type: int
span_ix_len, # type: int
packed, # type: bool
aligned, # type: bool
endianness, # type: str
use_magic, # type: bool
use_magic_len, # type: bool
aligned_obj_ix_tables # type: bool
):
if block_size % page_size != 0:
raise RuntimeError('block size should be a multiple of page size')
self.page_size = page_size
self.block_size = block_size
self.obj_id_len = obj_id_len
self.span_ix_len = span_ix_len
self.packed = packed
self.aligned = aligned
self.obj_name_len = obj_name_len
self.meta_len = meta_len
self.page_ix_len = page_ix_len
self.block_ix_len = block_ix_len
self.endianness = endianness
self.use_magic = use_magic
self.use_magic_len = use_magic_len
self.aligned_obj_ix_tables = aligned_obj_ix_tables
self.PAGES_PER_BLOCK = self.block_size // self.page_size
self.OBJ_LU_PAGES_PER_BLOCK = int(math.ceil(self.block_size / self.page_size * self.obj_id_len / self.page_size))
self.OBJ_USABLE_PAGES_PER_BLOCK = self.PAGES_PER_BLOCK - self.OBJ_LU_PAGES_PER_BLOCK
self.OBJ_LU_PAGES_OBJ_IDS_LIM = self.page_size // self.obj_id_len
self.OBJ_DATA_PAGE_HEADER_LEN = self.obj_id_len + self.span_ix_len + SPIFFS_PH_FLAG_LEN
pad = 4 - (4 if self.OBJ_DATA_PAGE_HEADER_LEN % 4 == 0 else self.OBJ_DATA_PAGE_HEADER_LEN % 4)
self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED = self.OBJ_DATA_PAGE_HEADER_LEN + pad
self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED_PAD = pad
self.OBJ_DATA_PAGE_CONTENT_LEN = self.page_size - self.OBJ_DATA_PAGE_HEADER_LEN
self.OBJ_INDEX_PAGES_HEADER_LEN = (self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED + SPIFFS_PH_IX_SIZE_LEN +
SPIFFS_PH_IX_OBJ_TYPE_LEN + self.obj_name_len + self.meta_len)
if aligned_obj_ix_tables:
self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED = (self.OBJ_INDEX_PAGES_HEADER_LEN + SPIFFS_PAGE_IX_LEN - 1) & ~(SPIFFS_PAGE_IX_LEN - 1)
self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED_PAD = self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED - self.OBJ_INDEX_PAGES_HEADER_LEN
else:
self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED = self.OBJ_INDEX_PAGES_HEADER_LEN
self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED_PAD = 0
self.OBJ_INDEX_PAGES_OBJ_IDS_HEAD_LIM = (self.page_size - self.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED) // self.block_ix_len
self.OBJ_INDEX_PAGES_OBJ_IDS_LIM = (self.page_size - self.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED) // self.block_ix_len
class SpiffsFullError(RuntimeError):
pass
class SpiffsPage(object):
_endianness_dict = {
'little': '<',
'big': '>'
}
_len_dict = {
1: 'B',
2: 'H',
4: 'I',
8: 'Q'
}
def __init__(self, bix, build_config): # type: (int, SpiffsBuildConfig) -> None
self.build_config = build_config
self.bix = bix
def to_binary(self): # type: () -> bytes
raise NotImplementedError()
class SpiffsObjPageWithIdx(SpiffsPage):
def __init__(self, obj_id, build_config): # type: (int, SpiffsBuildConfig) -> None
super(SpiffsObjPageWithIdx, self).__init__(0, build_config)
self.obj_id = obj_id
def to_binary(self): # type: () -> bytes
raise NotImplementedError()
class SpiffsObjLuPage(SpiffsPage):
def __init__(self, bix, build_config): # type: (int, SpiffsBuildConfig) -> None
SpiffsPage.__init__(self, bix, build_config)
self.obj_ids_limit = self.build_config.OBJ_LU_PAGES_OBJ_IDS_LIM
self.obj_ids = list() # type: typing.List[ObjIdsItem]
def _calc_magic(self, blocks_lim): # type: (int) -> int
# Calculate the magic value mirroring computation done by the macro SPIFFS_MAGIC defined in
# spiffs_nucleus.h
magic = 0x20140529 ^ self.build_config.page_size
if self.build_config.use_magic_len:
magic = magic ^ (blocks_lim - self.bix)
# narrow the result to build_config.obj_id_len bytes
mask = (2 << (8 * self.build_config.obj_id_len)) - 1
return magic & mask
def register_page(self, page): # type: (TSP) -> None
if not self.obj_ids_limit > 0:
raise SpiffsFullError()
obj_id = (page.obj_id, page.__class__)
self.obj_ids.append(obj_id)
self.obj_ids_limit -= 1
def to_binary(self): # type: () -> bytes
img = b''
for (obj_id, page_type) in self.obj_ids:
if page_type == SpiffsObjIndexPage:
obj_id ^= (1 << ((self.build_config.obj_id_len * 8) - 1))
img += struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] +
SpiffsPage._len_dict[self.build_config.obj_id_len], obj_id)
assert(len(img) <= self.build_config.page_size)
img += b'\xFF' * (self.build_config.page_size - len(img))
return img
def magicfy(self, blocks_lim): # type: (int) -> None
# Only use magic value if no valid obj id has been written to the spot, which is the
# spot taken up by the last obj id on last lookup page. The parent is responsible
# for determining which is the last lookup page and calling this function.
remaining = self.obj_ids_limit
empty_obj_id_dict = {
1: 0xFF,
2: 0xFFFF,
4: 0xFFFFFFFF,
8: 0xFFFFFFFFFFFFFFFF
}
if remaining >= 2:
for i in range(remaining):
if i == remaining - 2:
self.obj_ids.append((self._calc_magic(blocks_lim), SpiffsObjDataPage))
break
else:
self.obj_ids.append((empty_obj_id_dict[self.build_config.obj_id_len], SpiffsObjDataPage))
self.obj_ids_limit -= 1
class SpiffsObjIndexPage(SpiffsObjPageWithIdx):
def __init__(self, obj_id, span_ix, size, name, build_config
): # type: (int, int, int, str, SpiffsBuildConfig) -> None
super(SpiffsObjIndexPage, self).__init__(obj_id, build_config)
self.span_ix = span_ix
self.name = name
self.size = size
if self.span_ix == 0:
self.pages_lim = self.build_config.OBJ_INDEX_PAGES_OBJ_IDS_HEAD_LIM
else:
self.pages_lim = self.build_config.OBJ_INDEX_PAGES_OBJ_IDS_LIM
self.pages = list() # type: typing.List[int]
def register_page(self, page): # type: (SpiffsObjDataPage) -> None
if not self.pages_lim > 0:
raise SpiffsFullError
self.pages.append(page.offset)
self.pages_lim -= 1
def to_binary(self): # type: () -> bytes
obj_id = self.obj_id ^ (1 << ((self.build_config.obj_id_len * 8) - 1))
img = struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] +
SpiffsPage._len_dict[self.build_config.obj_id_len] +
SpiffsPage._len_dict[self.build_config.span_ix_len] +
SpiffsPage._len_dict[SPIFFS_PH_FLAG_LEN],
obj_id,
self.span_ix,
SPIFFS_PH_FLAG_USED_FINAL_INDEX)
# Add padding before the object index page specific information
img += b'\xFF' * self.build_config.OBJ_DATA_PAGE_HEADER_LEN_ALIGNED_PAD
# If this is the first object index page for the object, add filname, type
# and size information
if self.span_ix == 0:
img += struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] +
SpiffsPage._len_dict[SPIFFS_PH_IX_SIZE_LEN] +
SpiffsPage._len_dict[SPIFFS_PH_FLAG_LEN],
self.size,
SPIFFS_TYPE_FILE)
img += self.name.encode() + (b'\x00' * (
(self.build_config.obj_name_len - len(self.name))
+ self.build_config.meta_len
+ self.build_config.OBJ_INDEX_PAGES_HEADER_LEN_ALIGNED_PAD))
# Finally, add the page index of daa pages
for page in self.pages:
page = page >> int(math.log(self.build_config.page_size, 2))
img += struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] +
SpiffsPage._len_dict[self.build_config.page_ix_len], page)
assert(len(img) <= self.build_config.page_size)
img += b'\xFF' * (self.build_config.page_size - len(img))
return img
class SpiffsObjDataPage(SpiffsObjPageWithIdx):
def __init__(self, offset, obj_id, span_ix, contents, build_config
): # type: (int, int, int, bytes, SpiffsBuildConfig) -> None
super(SpiffsObjDataPage, self).__init__(obj_id, build_config)
self.span_ix = span_ix
self.contents = contents
self.offset = offset
def to_binary(self): # type: () -> bytes
img = struct.pack(SpiffsPage._endianness_dict[self.build_config.endianness] +
SpiffsPage._len_dict[self.build_config.obj_id_len] +
SpiffsPage._len_dict[self.build_config.span_ix_len] +
SpiffsPage._len_dict[SPIFFS_PH_FLAG_LEN],
self.obj_id,
self.span_ix,
SPIFFS_PH_FLAG_USED_FINAL)
img += self.contents
assert(len(img) <= self.build_config.page_size)
img += b'\xFF' * (self.build_config.page_size - len(img))
return img
class SpiffsBlock(object):
def _reset(self): # type: () -> None
self.cur_obj_index_span_ix = 0
self.cur_obj_data_span_ix = 0
self.cur_obj_id = 0
self.cur_obj_idx_page = None # type: typing.Optional[SpiffsObjIndexPage]
def __init__(self, bix, build_config): # type: (int, SpiffsBuildConfig) -> None
self.build_config = build_config
self.offset = bix * self.build_config.block_size
self.remaining_pages = self.build_config.OBJ_USABLE_PAGES_PER_BLOCK
self.pages = list() # type: typing.List[SpiffsPage]
self.bix = bix
lu_pages = list()
for i in range(self.build_config.OBJ_LU_PAGES_PER_BLOCK):
page = SpiffsObjLuPage(self.bix, self.build_config)
lu_pages.append(page)
self.pages.extend(lu_pages)
self.lu_page_iter = iter(lu_pages)
self.lu_page = next(self.lu_page_iter)
self._reset()
def _register_page(self, page): # type: (TSP) -> None
if isinstance(page, SpiffsObjDataPage):
assert self.cur_obj_idx_page is not None
self.cur_obj_idx_page.register_page(page) # can raise SpiffsFullError
try:
self.lu_page.register_page(page)
except SpiffsFullError:
self.lu_page = next(self.lu_page_iter)
try:
self.lu_page.register_page(page)
except AttributeError: # no next lookup page
# Since the amount of lookup pages is pre-computed at every block instance,
# this should never occur
raise RuntimeError('invalid attempt to add page to a block when there is no more space in lookup')
self.pages.append(page)
def begin_obj(self, obj_id, size, name, obj_index_span_ix=0, obj_data_span_ix=0
): # type: (int, int, str, int, int) -> None
if not self.remaining_pages > 0:
raise SpiffsFullError()
self._reset()
self.cur_obj_id = obj_id
self.cur_obj_index_span_ix = obj_index_span_ix
self.cur_obj_data_span_ix = obj_data_span_ix
page = SpiffsObjIndexPage(obj_id, self.cur_obj_index_span_ix, size, name, self.build_config)
self._register_page(page)
self.cur_obj_idx_page = page
self.remaining_pages -= 1
self.cur_obj_index_span_ix += 1
def update_obj(self, contents): # type: (bytes) -> None
if not self.remaining_pages > 0:
raise SpiffsFullError()
page = SpiffsObjDataPage(self.offset + (len(self.pages) * self.build_config.page_size),
self.cur_obj_id, self.cur_obj_data_span_ix, contents, self.build_config)
self._register_page(page)
self.cur_obj_data_span_ix += 1
self.remaining_pages -= 1
def end_obj(self): # type: () -> None
self._reset()
def is_full(self): # type: () -> bool
return self.remaining_pages <= 0
def to_binary(self, blocks_lim): # type: (int) -> bytes
img = b''
if self.build_config.use_magic:
for (idx, page) in enumerate(self.pages):
if idx == self.build_config.OBJ_LU_PAGES_PER_BLOCK - 1:
assert isinstance(page, SpiffsObjLuPage)
page.magicfy(blocks_lim)
img += page.to_binary()
else:
for page in self.pages:
img += page.to_binary()
assert(len(img) <= self.build_config.block_size)
img += b'\xFF' * (self.build_config.block_size - len(img))
return img
class SpiffsFS(object):
def __init__(self, img_size, build_config): # type: (int, SpiffsBuildConfig) -> None
if img_size % build_config.block_size != 0:
raise RuntimeError('image size should be a multiple of block size')
self.img_size = img_size
self.build_config = build_config
self.blocks = list() # type: typing.List[SpiffsBlock]
self.blocks_lim = self.img_size // self.build_config.block_size
self.remaining_blocks = self.blocks_lim
self.cur_obj_id = 1 # starting object id
def _create_block(self): # type: () -> SpiffsBlock
if self.is_full():
raise SpiffsFullError('the image size has been exceeded')
block = SpiffsBlock(len(self.blocks), self.build_config)
self.blocks.append(block)
self.remaining_blocks -= 1
return block
def is_full(self): # type: () -> bool
return self.remaining_blocks <= 0
def create_file(self, img_path, file_path): # type: (str, str) -> None
if len(img_path) > self.build_config.obj_name_len:
raise RuntimeError("object name '%s' too long" % img_path)
name = img_path
with open(file_path, 'rb') as obj:
contents = obj.read()
stream = io.BytesIO(contents)
try:
block = self.blocks[-1]
block.begin_obj(self.cur_obj_id, len(contents), name)
except (IndexError, SpiffsFullError):
block = self._create_block()
block.begin_obj(self.cur_obj_id, len(contents), name)
contents_chunk = stream.read(self.build_config.OBJ_DATA_PAGE_CONTENT_LEN)
while contents_chunk:
try:
block = self.blocks[-1]
try:
# This can fail because either (1) all the pages in block have been
# used or (2) object index has been exhausted.
block.update_obj(contents_chunk)
except SpiffsFullError:
# If its (1), use the outer exception handler
if block.is_full():
raise SpiffsFullError
# If its (2), write another object index page
block.begin_obj(self.cur_obj_id, len(contents), name,
obj_index_span_ix=block.cur_obj_index_span_ix,
obj_data_span_ix=block.cur_obj_data_span_ix)
continue
except (IndexError, SpiffsFullError):
# All pages in the block have been exhausted. Create a new block, copying
# the previous state of the block to a new one for the continuation of the
# current object
prev_block = block
block = self._create_block()
block.cur_obj_id = prev_block.cur_obj_id
block.cur_obj_idx_page = prev_block.cur_obj_idx_page
block.cur_obj_data_span_ix = prev_block.cur_obj_data_span_ix
block.cur_obj_index_span_ix = prev_block.cur_obj_index_span_ix
continue
contents_chunk = stream.read(self.build_config.OBJ_DATA_PAGE_CONTENT_LEN)
block.end_obj()
self.cur_obj_id += 1
def to_binary(self): # type: () -> bytes
img = b''
all_blocks = []
for block in self.blocks:
all_blocks.append(block.to_binary(self.blocks_lim))
bix = len(self.blocks)
if self.build_config.use_magic:
# Create empty blocks with magic numbers
while self.remaining_blocks > 0:
block = SpiffsBlock(bix, self.build_config)
all_blocks.append(block.to_binary(self.blocks_lim))
self.remaining_blocks -= 1
bix += 1
else:
# Just fill remaining spaces FF's
all_blocks.append(b'\xFF' * (self.img_size - len(all_blocks) * self.build_config.block_size))
img += b''.join([blk for blk in all_blocks])
return img
class CustomHelpFormatter(argparse.HelpFormatter):
"""
Similar to argparse.ArgumentDefaultsHelpFormatter, except it
doesn't add the default value if "(default:" is already present.
This helps in the case of options with action="store_false", like
--no-magic or --no-magic-len.
"""
def _get_help_string(self, action): # type: (argparse.Action) -> str
if action.help is None:
return ''
if '%(default)' not in action.help and '(default:' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
return action.help + ' (default: %(default)s)'
return action.help
def main(): # type: () -> None
if sys.version_info[0] < 3:
print('WARNING: Support for Python 2 is deprecated and will be removed in future versions.', file=sys.stderr)
elif sys.version_info[0] == 3 and sys.version_info[1] < 6:
print('WARNING: Python 3 versions older than 3.6 are not supported.', file=sys.stderr)
parser = argparse.ArgumentParser(description='SPIFFS Image Generator',
formatter_class=CustomHelpFormatter)
parser.add_argument('image_size',
help='Size of the created image')
parser.add_argument('base_dir',
help='Path to directory from which the image will be created')
parser.add_argument('output_file',
help='Created image output file path')
parser.add_argument('--page-size',
help='Logical page size. Set to value same as CONFIG_SPIFFS_PAGE_SIZE.',
type=int,
default=256)
parser.add_argument('--block-size',
help="Logical block size. Set to the same value as the flash chip's sector size (g_rom_flashchip.sector_size).",
type=int,
default=4096)
parser.add_argument('--obj-name-len',
help='File full path maximum length. Set to value same as CONFIG_SPIFFS_OBJ_NAME_LEN.',
type=int,
default=32)
parser.add_argument('--meta-len',
help='File metadata length. Set to value same as CONFIG_SPIFFS_META_LENGTH.',
type=int,
default=4)
parser.add_argument('--use-magic',
dest='use_magic',
help='Use magic number to create an identifiable SPIFFS image. Specify if CONFIG_SPIFFS_USE_MAGIC.',
action='store_true')
parser.add_argument('--no-magic',
dest='use_magic',
help='Inverse of --use-magic (default: --use-magic is enabled)',
action='store_false')
parser.add_argument('--use-magic-len',
dest='use_magic_len',
help='Use position in memory to create different magic numbers for each block. Specify if CONFIG_SPIFFS_USE_MAGIC_LENGTH.',
action='store_true')
parser.add_argument('--no-magic-len',
dest='use_magic_len',
help='Inverse of --use-magic-len (default: --use-magic-len is enabled)',
action='store_false')
parser.add_argument('--follow-symlinks',
help='Take into account symbolic links during partition image creation.',
action='store_true')
parser.add_argument('--big-endian',
help='Specify if the target architecture is big-endian. If not specified, little-endian is assumed.',
action='store_true')
parser.add_argument('--aligned-obj-ix-tables',
action='store_true',
help='Use aligned object index tables. Specify if SPIFFS_ALIGNED_OBJECT_INDEX_TABLES is set.')
parser.set_defaults(use_magic=True, use_magic_len=True)
args = parser.parse_args()
if not os.path.exists(args.base_dir):
raise RuntimeError('given base directory %s does not exist' % args.base_dir)
with open(args.output_file, 'wb') as image_file:
image_size = int(args.image_size, 0)
spiffs_build_default = SpiffsBuildConfig(args.page_size, SPIFFS_PAGE_IX_LEN,
args.block_size, SPIFFS_BLOCK_IX_LEN, args.meta_len,
args.obj_name_len, SPIFFS_OBJ_ID_LEN, SPIFFS_SPAN_IX_LEN,
True, True, 'big' if args.big_endian else 'little',
args.use_magic, args.use_magic_len, args.aligned_obj_ix_tables)
spiffs = SpiffsFS(image_size, spiffs_build_default)
for root, dirs, files in os.walk(args.base_dir, followlinks=args.follow_symlinks):
for f in files:
full_path = os.path.join(root, f)
spiffs.create_file('/' + os.path.relpath(full_path, args.base_dir).replace('\\', '/'), full_path)
image = spiffs.to_binary()
image_file.write(image)
if __name__ == '__main__':
main()
| 40.291803
| 147
| 0.610953
|
ff414bd63f7e7b6fdc808bced26bccb1a01dc84a
| 23
|
py
|
Python
|
tests/test-extensions/hello_extension/__init__.py
|
Tonow/cookiecutter
|
d6037b7dee5756e35a6ecd5b522899a9061c2c79
|
[
"BSD-3-Clause"
] | 8,822
|
2015-01-01T19:27:19.000Z
|
2019-07-01T15:49:43.000Z
|
tests/test-extensions/hello_extension/__init__.py
|
Tonow/cookiecutter
|
d6037b7dee5756e35a6ecd5b522899a9061c2c79
|
[
"BSD-3-Clause"
] | 859
|
2015-01-07T21:46:12.000Z
|
2019-07-01T17:36:24.000Z
|
tests/test-extensions/hello_extension/__init__.py
|
Tonow/cookiecutter
|
d6037b7dee5756e35a6ecd5b522899a9061c2c79
|
[
"BSD-3-Clause"
] | 1,144
|
2015-01-14T09:29:41.000Z
|
2019-07-01T09:39:24.000Z
|
"""Hello Extension."""
| 11.5
| 22
| 0.608696
|
37d2f480689f472289a6e595e8c97b660151b24f
| 3,988
|
py
|
Python
|
datastructures/hashTableOpenAddressing.py
|
maxotar/datastructures
|
ba6c499e0bd894ff9b01557048e7a93d91774d39
|
[
"MIT"
] | null | null | null |
datastructures/hashTableOpenAddressing.py
|
maxotar/datastructures
|
ba6c499e0bd894ff9b01557048e7a93d91774d39
|
[
"MIT"
] | null | null | null |
datastructures/hashTableOpenAddressing.py
|
maxotar/datastructures
|
ba6c499e0bd894ff9b01557048e7a93d91774d39
|
[
"MIT"
] | null | null | null |
class HashTableOpenAddressing:
TOMBSTONE = 'RIP'
MINSIZE = 8
def __init__(self, **initial_values):
self._buckets = [() for _ in range(self.MINSIZE)]
for k, v in initial_values.items():
self.insert(k, v)
def get(self, key):
_, myid = self._computeHash(key)
bucketsChecked = 0
while self._buckets[myid] and self._buckets[myid][1] != key:
bucketsChecked += 1
if bucketsChecked < len(self._buckets):
myid = 0 if myid == len(self._buckets) - 1 else myid + 1
else:
break
else: # Run if the while condition EVER hits false
return self._buckets[myid][2] if self._buckets[myid] else None
raise KeyError('key does not exist')
def insert(self, key, value):
self._grow()
myhash, myid = self._computeHash(key)
if not self._buckets[myid]:
self._buckets[myid] = (myhash, key, value)
else:
bucketsChecked = 0
while self._buckets[myid] and self._buckets[myid][1] != key:
bucketsChecked += 1
if bucketsChecked < len(self._buckets):
myid = 0 if myid == len(self._buckets) - 1 else myid + 1
else:
break
else: # Run if the while condition EVER hits false
self._buckets[myid] = (myhash, key, value)
return
raise IndexError('table is full')
def delete(self, key):
_, myid = self._computeHash(key)
if not self._buckets[myid]:
self._buckets[myid] = self.TOMBSTONE
self._shrink()
else:
bucketsChecked = 0
while self._buckets[myid] and self._buckets[myid][1] != key:
bucketsChecked += 1
if bucketsChecked < len(self._buckets):
myid = 0 if myid == len(self._buckets) - 1 else myid + 1
else:
break
else: # Run if the while condition EVER hits false
self._buckets[myid] = self.TOMBSTONE
self._shrink()
return
raise KeyError('key does not exist')
@property
def utilization(self):
try:
return float(len(self)) / float(len(self._buckets))
except ZeroDivisionError:
return 0
@property
def keys(self):
items = [bucket[1] for bucket in self._buckets if bucket]
return items
@property
def values(self):
items = [bucket[2] for bucket in self._buckets if bucket]
return items
def _computeHash(self, key):
myhash = hash(key)
mymask = len(self._buckets) - 1
myid = myhash & mymask
return (myhash, myid)
def _grow(self):
if self.utilization >= 0.75:
self._resize(len(self._buckets) * 4)
def _shrink(self):
if 0 < self.utilization <= 0.16 and len(self._buckets) > self.MINSIZE:
self._resize(len(self._buckets) // 4)
def _resize(self, newsize):
old_buckets = self._buckets
self._buckets = [() for _ in range(newsize)]
# Move all the old buckets to new table
for bucket in [b for b in old_buckets if b and b != self.TOMBSTONE]:
self.insert(bucket[1], bucket[2])
def __setitem__(self, key, val):
self.insert(key, val)
def __getitem__(self, key):
val = self.get(key)
if val:
return val
else:
raise KeyError('key does not exist')
def __delitem__(self, key):
self.delete(key)
def __len__(self):
return len([b for b in self._buckets if b and b != self.TOMBSTONE])
def __str__(self):
return str(list(zip(self.keys, self.values)))
# http://kells.tj/blog/2015/04/26/pure-python-hashtable.html
# http://interactivepython.org/courselib/static/pythonds/SortSearch/Hashing.html
| 32.688525
| 80
| 0.557673
|
9c4f7cce4451579415a1dca2181a46e28217418c
| 3,047
|
py
|
Python
|
tests/unit/test_modular.py
|
anurag-gandhi/pandas-profiling
|
2373f3a299264f7b312dbe4b92edc14d36e8140e
|
[
"MIT"
] | 8
|
2020-07-09T15:28:18.000Z
|
2021-05-08T16:35:15.000Z
|
tests/unit/test_modular.py
|
anurag-gandhi/pandas-profiling
|
2373f3a299264f7b312dbe4b92edc14d36e8140e
|
[
"MIT"
] | null | null | null |
tests/unit/test_modular.py
|
anurag-gandhi/pandas-profiling
|
2373f3a299264f7b312dbe4b92edc14d36e8140e
|
[
"MIT"
] | 1
|
2021-10-03T07:39:04.000Z
|
2021-10-03T07:39:04.000Z
|
import numpy as np
import pandas as pd
import pytest
import pandas_profiling
@pytest.fixture
def tdf(get_data_file):
file_name = get_data_file(
"meteorites.csv",
"https://data.nasa.gov/api/views/gh4g-9sfh/rows.csv?accessType=DOWNLOAD",
)
df = pd.read_csv(file_name)
# Note: Pandas does not support dates before 1880, so we ignore these for this analysis
df["year"] = pd.to_datetime(df["year"], errors="coerce")
# Example: Constant variable
df["source"] = "NASA"
# Example: Boolean variable
df["boolean"] = np.random.choice([True, False], df.shape[0])
# Example: Mixed with base types
df["mixed"] = np.random.choice([1, "A"], df.shape[0])
# Example: Highly correlated variables
df["reclat_city"] = df["reclat"] + np.random.normal(scale=5, size=(len(df)))
# Example: Duplicate observations
duplicates_to_add = pd.DataFrame(df.iloc[0:10])
df = df.append(duplicates_to_add, ignore_index=True)
return df
def test_modular_description_set(tdf):
profile = tdf.profile_report(
title="Modular test",
duplicates=None,
samples={"head": 0, "tail": 0},
correlations=None,
interactions=None,
missing_diagrams={
"matrix": False,
"bar": False,
"dendrogram": False,
"heatmap": False,
},
)
html = profile.get_description()
print(html)
def test_modular_absent(tdf):
profile = tdf.profile_report(
title="Modular test",
duplicates={"head": 0},
samples={"head": 0, "tail": 0},
interactions=None,
correlations={
"pearson": {"calculate": False},
"spearman": {"calculate": False},
"kendall": {"calculate": False},
"phi_k": {"calculate": False},
"cramers": {"calculate": False},
"recoded": {"calculate": False},
},
missing_diagrams=None,
)
html = profile.to_html()
assert "Correlations</h1>" not in html
assert "Duplicate rows</h1>" not in html
assert "Sample</h1>" not in html
assert "Missing values</h1>" not in html
def test_modular_present(tdf):
profile = tdf.profile_report(
title="Modular test",
duplicates={"head": 10},
samples={"head": 10, "tail": 10},
interactions={"targets": ["mass (g)"], "continuous": True},
correlations={
"pearson": {"calculate": True},
"spearman": {"calculate": True},
"kendall": {"calculate": True},
"phi_k": {"calculate": True},
"recoded": {"calculate": True},
"cramers": {"calculate": True},
},
missing_diagrams={
"matrix": True,
"bar": True,
"dendrogram": True,
"heatmap": True,
},
)
html = profile.to_html()
assert "Correlations</h1>" in html
assert "Duplicate rows</h1>" in html
assert "Sample</h1>" in html
assert "Missing values</h1>" in html
| 28.212963
| 91
| 0.573351
|
12edd340b5e7fb3d32008bcf910ef0bae9b9bd02
| 7,128
|
py
|
Python
|
tools/eval_utils/eval_utils_evaluate_cpp_result.py
|
neolixcn/OpenPCDet
|
32bae37db13711a4fb35ad2980068470bb6cee1c
|
[
"Apache-2.0"
] | null | null | null |
tools/eval_utils/eval_utils_evaluate_cpp_result.py
|
neolixcn/OpenPCDet
|
32bae37db13711a4fb35ad2980068470bb6cee1c
|
[
"Apache-2.0"
] | null | null | null |
tools/eval_utils/eval_utils_evaluate_cpp_result.py
|
neolixcn/OpenPCDet
|
32bae37db13711a4fb35ad2980068470bb6cee1c
|
[
"Apache-2.0"
] | null | null | null |
import pickle
import time
import numpy as np
import torch
import tqdm
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils
# ============ added by huxi =============
import eval_utils.cpp_result_load_utils as cpp_result_load
# added by huxi, load rpn config
from pcdet.pointpillar_quantize_config import load_rpn_config_json
# ========================================
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] += ret_dict.get('roi_%s' % str(cur_thresh), 0)
metric['recall_rcnn_%s' % str(cur_thresh)] += ret_dict.get('rcnn_%s' % str(cur_thresh), 0)
metric['gt_num'] += ret_dict.get('gt', 0)
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None, val=False):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'gt_num': 0,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
'''
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
'''
# === added by huxi ===
# I (huxi) replaced the result predicted by network with txt result returned by cpp code,
# so we can use the same metrics to evaluate the result.
config_dict = load_rpn_config_json.get_config()
data_dir = config_dict["eval_result_txt_dir"]
#data_dir = "/home/songhongli/huxi/1022_80epoch/out_txt"
print(str(batch_dict["frame_id"][0]))
batch_dict_, pred_dicts_, class_names_, output_path_ = cpp_result_load.load_txt_data(str(batch_dict["frame_id"][0]), data_dir)
annos_ = dataset.generate_prediction_dicts(
batch_dict, pred_dicts_, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos_
# =====================
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
# logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
# logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
# info_f = open('/home/liuwanqiang/OpenPCDet-master/OpenPCDet-master/output/neolix_models/pointpillar_1031/default/eval/epoch_80/val/default/result.pkl', 'rb')
# det_annos = pickle.load(info_f)
det_range_ls = None
# det_range_ls = [[-10, 10, 0, 10], [-10, 10, 10, 30], [-10, 10, 30, 50], [-10, 10, 50, 70]]
det_range_ls = [[-10, 10, 0, 30]]
# det_range_ls = [[-10, 10, 0, 10]]
if not det_range_ls is None:
for detect_range in det_range_ls:
print("*" * 60)
print("Eval range is abs(x) <10, %d < abs(y) < %d" % (detect_range[2], detect_range[3]))
result_str, result_dict, f2score = dataset.evaluation(
det_annos, class_names, det_range=detect_range,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
print('The f2score of model epoch%s is %f' % (epoch_id, f2score))
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
else:
detect_range = cfg.DATA_CONFIG.POINT_CLOUD_RANGE
detect_range = [0, detect_range[3], 0, detect_range[4]]
result_str, result_dict = dataset.evaluation(
det_annos, class_names, det_range=detect_range,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
if __name__ == '__main__':
pass
| 40.271186
| 163
| 0.628086
|
39428daac2eabfc7e202dc490a5dcf671ed26e2d
| 260,054
|
py
|
Python
|
simonsc/object/wind_table.py
|
KunxiongWang/simonsc
|
48969ff81cc7c256d3bd51ce1446d1fb4ad5825b
|
[
"Apache-2.0"
] | 2
|
2020-12-09T01:55:42.000Z
|
2021-06-30T18:28:25.000Z
|
simonsc/object/wind_table.py
|
KunxiongWang/simonsc
|
48969ff81cc7c256d3bd51ce1446d1fb4ad5825b
|
[
"Apache-2.0"
] | 1
|
2020-12-31T06:09:56.000Z
|
2020-12-31T06:09:56.000Z
|
simonsc/object/wind_table.py
|
KunxiongWang/simonsc
|
48969ff81cc7c256d3bd51ce1446d1fb4ad5825b
|
[
"Apache-2.0"
] | 10
|
2020-10-16T07:36:04.000Z
|
2021-01-25T08:57:47.000Z
|
from sqlalchemy import Column, String, DECIMAL, DateTime, BIGINT, SMALLINT, INTEGER, LargeBinary
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.mysql import LONGTEXT
wind_tables = ['AEQUFROPLEINFOREPPEREND', 'AINDEXCSI500WEIGHT', 'AINDEXDESCRIPTION', 'AINDEXEODPRICES', 'AINDEXFINANCIALDERIVATIVE', 'AINDEXHS300CLOSEWEIGHT', 'AINDEXHS300WEIGHT', 'AINDEXINDUSTRIESEODCITICS', 'AINDEXMEMBERS', 'AINDEXMEMBERSCITICS', 'AINDEXMEMBERSCITICS2', 'AINDEXMEMBERSCITICS2ZL', 'AINDEXMEMBERSCITICS3', 'AINDEXMEMBERSCITICSZL', 'ASAREPLANTRADE', 'ASHAREACCOUNTSPAYABLE', 'ASHAREADMINISTRATION', 'ASHAREANNFINANCIALINDICATOR', 'ASHAREAUDITOPINION', 'ASHAREBALANCESHEET', 'ASHAREBANKINDICATOR', 'ASHAREBEGUARANTEED', 'ASHARECALENDAR', 'ASHARECAPITALIZATION', 'ASHARECAPITALOPERATION', 'ASHARECASHFLOW', 'ASHARECIRCULATINGHOLDERS', 'ASHARECOCAPITALOPERATION', 'ASHARECOMPANYHOLDSHARES', 'ASHARECONCEPTUALPLATE', 'ASHARECREDITORRIGHTS', 'ASHARECUSTOMER', 'ASHAREDEFENDANT', 'ASHAREDESCRIPTION', 'ASHAREDIRECTOR', 'ASHAREDIVIDEND', 'ASHAREEARNINGEST', 'ASHAREEODPRICES', 'ASHAREEQUFROINFO', 'ASHAREEQUITYPLEDGEINFO', 'ASHAREEQUITYRELATIONSHIPS', 'ASHAREESOPDESCRIPTION', 'ASHAREESOPTRADINGINFO', 'ASHAREFINANCIALDERIVATIVE', 'ASHAREFINANCIALINDICATOR', 'ASHAREFLOATHOLDER', 'ASHAREFREEFLOAT', 'ASHAREGROUP', 'ASHAREGROUPINFORMATION', 'ASHAREGUARANTEERELATIONSHIP', 'ASHAREGUARANTEESTATISTICS', 'ASHAREHOLDER', 'ASHAREHOLDERNUMBER', 'ASHAREHOLDING', 'ASHAREIBROKERINDICATOR', 'ASHAREILLEGALITY', 'ASHAREINCDESCRIPTION', 'ASHAREINCEXECQTYPRI', 'ASHAREINCEXERCISEPCT', 'ASHAREINCEXERCISEPCTZL', 'ASHAREINCOME', 'ASHAREINCQUANTITYDETAILS', 'ASHAREINCQUANTITYPRICE', 'ASHAREINDUSRATING', 'ASHAREINDUSTRIESCLASSCITICS', 'ASHAREINDUSTRIESCLASSCITICSZL', 'ASHAREINDUSTRIESCODE', 'ASHAREINSIDEHOLDER', 'ASHAREINSIDERTRADE', 'ASHAREINSTHOLDERDERDATA', 'ASHAREINSURANCEINDICATOR', 'ASHAREINTENSITYTREND', 'ASHAREINTENSITYTRENDADJ', 'ASHAREINVESTMENTPEVC', 'ASHAREIPOPRICINGFORECAST', 'ASHARELONGLOAN', 'ASHAREMAJORHOLDERPLANHOLD', 'ASHAREMAJORHOLDERPLANHOLDZL', 'ASHAREMANAGEMENT', 'ASHAREMANAGEMENTHOLDREWARD', 'ASHAREMARGINSUBJECT', 'ASHAREMARGINTRADE', 'ASHAREMARGINTRADESUM', 'ASHAREMECHANISMOWNERSHIP', 'ASHAREMERGERSUBJECT', 'ASHAREMJRHOLDERTRADE', 'ASHAREPEVCINVESTMENT', 'ASHAREPLAINTIFF', 'ASHAREPLEDGEPROPORTION', 'ASHAREPLEDGETRADE', 'ASHAREPREVIOUSENNAME', 'ASHAREPRODUCT', 'ASHAREPROFITEXPRESS', 'ASHAREPROFITNOTICE', 'ASHAREPROSECUTION', 'ASHARERECEIVABLES', 'ASHAREREGINV', 'ASHARERELATEDPARTYDEBT', 'ASHARERIGHTISSUE', 'ASHARESELLSUBJECT', 'ASHAREST', 'ASHARESTAFF', 'ASHARESTAFFSTRUCTURE', 'ASHARESTIBHOLDERVOTE', 'ASHARESTOCKRATING', 'ASHARESUPERVISOR', 'ASHARESUPPLIER', 'ASHARETRADINGSUSPENSION', 'ASHARETYPECODE', 'CFUNDBANKACCOUNT', 'CFUNDCHANGEWINDCODE', 'CFUNDCODEANDSNAME', 'CFUNDCOMPANYPREVIOUSNAME', 'CFUNDFACTIONALSTYLE', 'CFUNDHOLDRESTRICTEDCIRCULATION', 'CFUNDINDEXMEMBERS', 'CFUNDINDEXTABLE', 'CFUNDINDUSTRIESCODE', 'CFUNDINTRODUCTION', 'CFUNDMANAGEMENT', 'CFUNDPCHREDM', 'CFUNDPORTFOLIOCHANGES', 'CFUNDPREVIOUSNAME', 'CFUNDRALATEDSECURITIESCODE', 'CFUNDRATESENSITIVE', 'CFUNDSTYLECOEFFICIENT', 'CFUNDSTYLETHRESHOLD', 'CFUNDTACODE', 'CFUNDTYPECODE', 'CFUNDWINDCUSTOMCODE', 'CFUNDWINDINDEXCOMPONENT', 'CFUNDWINDINDEXMEMBERS', 'CHANGEWINDCODE', 'CHINACLOSEDFUNDEODPRICE', 'CHINAFEEDERFUND', 'CHINAGRADINGFUND', 'CHINAMFMPERFORMANCE', 'CHINAMFPERFORMANCE', 'CHINAMUTUALFUNDASSETPORTFOLIO', 'CHINAMUTUALFUNDBENCHMARK', 'CHINAMUTUALFUNDBENCHMARKEOD', 'CHINAMUTUALFUNDBONDPORTFOLIO', 'CHINAMUTUALFUNDDESCRIPTION', 'CHINAMUTUALFUNDFLOATSHARE', 'CHINAMUTUALFUNDINDPORTFOLIO', 'CHINAMUTUALFUNDMANAGER', 'CHINAMUTUALFUNDNAV', 'CHINAMUTUALFUNDPCHREDM', 'CHINAMUTUALFUNDPOSESTIMATION', 'CHINAMUTUALFUNDREPNAVPER', 'CHINAMUTUALFUNDSEATTRADING', 'CHINAMUTUALFUNDSECTOR', 'CHINAMUTUALFUNDSHARE', 'CHINAMUTUALFUNDSTOCKPORTFOLIO', 'CHINAMUTUALFUNDSUSPENDPCHREDM', 'CHINAMUTUALFUNDTRACKINGINDEX', 'CLOSEDFUNDPCHREDM', 'CMFAIPINFO', 'CMFCODEANDSNAME', 'CMFCONSEPTION', 'CMFDESCCHANGE', 'CMFFAIRVALUECHANGEPROFIT', 'CMFFIXEDINVESTMENTRATE', 'CMFHOLDER', 'CMFHOLDERSTRUCTURE', 'CMFHOLDINGRATIOANOMALY', 'CMFINDEXDESCRIPTION', 'CMFINDEXEOD', 'CMFINDUSTRYPLATE', 'CMFIOPVNAV', 'CMFNAVOPERATIONRECORD', 'CMFOTHERPORTFOLIO', 'CMFPREFERENTIALFEE', 'CMFPROPORTIONOFINVEOBJ', 'CMFRISKLEVEL', 'CMFSECCLASS', 'CMFSELLINGAGENTS', 'CMFSUBREDFEE', 'CMFTHEMECONCEPT', 'CMFTRADINGSUSPENSION', 'CMFUNDOPERATEPERIOD', 'CMMFPORTFOLIOPTM', 'CMMQUARTERLYDATA', 'CMONEYMARKETDAILYFINCOME', 'CMONEYMARKETFINCOME', 'CMONEYMARKETFSCARRYOVERM', 'CODEANDSNAME', 'COMPANYPREVIOUSNAME', 'COMPINTRODUCTION', 'COMPORGANIZATIONCODE', 'COUNTRYANDAREACODE', 'COUNTRYANDAREACODEZL', 'CPFUNDDESCRIPTION', 'CURRENCYCODE', 'ETFPCHREDM', 'FINANCIALQUALIFICATION', 'FINDEXPERFORMANCE', 'FUNDCREDITRECORD', 'GLOBALMARKETTRADINGTIME', 'GLOBALWORKINGDAY', 'INDEXCONTRASTSECTOR', 'LOFDESCRIPTION', 'LOFPCHREDM', 'RALATEDSECURITIESCODE', 'SHSCCHANNELHOLDINGS', 'SHSCDAILYSTATISTICS', 'WINDCUSTOMCODE', 'WIND_PDUPDATE_LOG']
Base = declarative_base()
metadata = Base.metadata
class AEQUFROPLEINFOREPPEREND(Base):
__tablename__ = 'AEQUFROPLEINFOREPPEREND'
CRNCY_CODE = Column(DECIMAL(20, 4), doc="质押冻结比例(%)")
F_NAV_ACCUMULATED = Column(String(200), doc="股东名称")
F_NAV_ADJFACTOR = Column(DECIMAL(20, 4), doc="持股数量(股)")
F_NAV_ADJUSTED = Column(String(400), doc="备注")
F_NAV_DIVACCUMULATED = Column(DECIMAL(20, 4), doc="质押或冻结数量(股)")
F_NAV_UNIT = Column(String(8), doc="公告日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PRICE_DATE = Column(String(8), doc="报告期")
S_FRO_SHARES = Column(DECIMAL(20, 4), doc="冻结股份数量")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_PLEDGE_SHARES = Column(DECIMAL(20, 4), doc="质押股份数量")
class AINDEXCSI500WEIGHT(Base):
__tablename__ = 'AINDEXCSI500WEIGHT'
CLOSEVALUE = Column(DECIMAL(20, 4), doc="收盘")
EXCHANGE = Column(String(20), doc="交易所")
FREE_SHR_RATIO = Column(DECIMAL(20, 4), doc="自由流通比例(%)(归档后)")
INDEXNAME = Column(String(40), doc="指数名称")
INDEXNAME_ENG = Column(String(100), doc="指数英文名称")
MV_CALCULATION = Column(DECIMAL(20, 2), doc="计算用市值")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPEN_ADJUSTED = Column(DECIMAL(20, 4), doc="调整后开盘参考价")
OPMODE = Column(String(1))
S_CON_WINDCODE = Column(String(40), doc="Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SHR_CALCULATION = Column(DECIMAL(20, 2), doc="计算用股本(股)")
TOT_MV = Column(DECIMAL(20, 2), doc="总市值")
TOT_SHR = Column(DECIMAL(20, 2), doc="总股本(股)")
TRADE_DT = Column(String(10), doc="生效日期")
WEIGHT = Column(DECIMAL(20, 4), doc="权重(%)")
WEIGHTFACTOR = Column(DECIMAL(20, 8), doc="权重因子")
class AINDEXDESCRIPTION(Base):
__tablename__ = 'AINDEXDESCRIPTION'
CHANGE_HISTORY = Column(String(100), doc="变更历史")
EXPIRE_DATE = Column(String(8), doc="终止发布日期")
INCOME_PROCESSING_METHOD = Column(String(20), doc="收益处理方式")
INDEX_INTRO = Column(LONGTEXT, doc="指数简介")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_CODE = Column(String(40), doc="交易代码")
S_INFO_COMPNAME = Column(String(100), doc="指数名称")
S_INFO_EXCHMARKET = Column(String(40), doc="交易所")
S_INFO_INDEX_BASEPER = Column(String(8), doc="基期")
S_INFO_INDEX_BASEPT = Column(DECIMAL(20, 4), doc="基点")
S_INFO_INDEX_WEIGHTSRULE = Column(String(10), doc="加权方式")
S_INFO_INDEXCODE = Column(DECIMAL(9, 0), doc="指数类别代码")
S_INFO_INDEXSTYLE = Column(String(40), doc="指数风格")
S_INFO_INDEXTYPE = Column(String(40), doc="指数类别")
S_INFO_LISTDATE = Column(String(8), doc="发布日期")
S_INFO_NAME = Column(String(50), doc="证券简称")
S_INFO_PINYIN = Column(String(40), doc="简称拼音")
S_INFO_PUBLISHER = Column(String(100), doc="发布方")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
WEIGHT_TYPE = Column(DECIMAL(9, 0), doc="权重类型")
WEIGHT_TYPE_NAME = Column(String(100), doc="权重类型名称")
class AINDEXEODPRICES(Base):
__tablename__ = 'AINDEXEODPRICES'
CRNCY_CODE = Column(String(10), doc="货币代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_DQ_AMOUNT = Column(DECIMAL(20, 4), doc="成交金额(千元)")
S_DQ_CHANGE = Column(DECIMAL(20, 4), doc="涨跌(点)")
S_DQ_CLOSE = Column(DECIMAL(20, 4), doc="收盘价(点)")
S_DQ_HIGH = Column(DECIMAL(20, 4), doc="最高价(点)")
S_DQ_LOW = Column(DECIMAL(20, 4), doc="最低价(点)")
S_DQ_OPEN = Column(DECIMAL(20, 4), doc="开盘价(点)")
S_DQ_PCTCHANGE = Column(DECIMAL(20, 4), doc="涨跌幅(%)")
S_DQ_PRECLOSE = Column(DECIMAL(20, 4), doc="昨收盘价(点)")
S_DQ_VOLUME = Column(DECIMAL(20, 4), doc="成交量(手)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SEC_ID = Column(String(10), doc="证券ID")
TRADE_DT = Column(String(8), doc="交易日期")
class AINDEXFINANCIALDERIVATIVE(Base):
__tablename__ = 'AINDEXFINANCIALDERIVATIVE'
ACCOUNTS_GROWTH_RATE = Column(DECIMAL(20, 4), doc="应收账款比年初增速")
ACCT_PAYABLE = Column(DECIMAL(20, 4), doc="应付账款合计")
ACCT_RCV = Column(DECIMAL(20, 4), doc="应收账款合计")
ADV_FROM_CUST = Column(DECIMAL(20, 4), doc="预收账款合计")
ASSET_TURNOVER = Column(DECIMAL(20, 4), doc="资产周转率")
ASSETS_GROWTH_RATE = Column(DECIMAL(20, 4), doc="总资产比年初增速")
ASSETS_LIABILITIES = Column(DECIMAL(20, 4), doc="资产负债率")
BONDS_PAYABLE = Column(DECIMAL(20, 4), doc="应付债券合计")
CAP_RSRV = Column(DECIMAL(20, 4), doc="资本公积金合计")
CASH_CASH_EQU_END_PERIOD = Column(DECIMAL(20, 4), doc="期末现金合计")
CASH_PAID_INVEST = Column(DECIMAL(20, 4), doc="投资支付的现金合计")
CASH_PAID_INVEST_TOT = Column(DECIMAL(20, 4), doc="投资现金流出合计")
CASH_PAY_ACQ_CONST_FIOLTA = Column(DECIMAL(20, 4), doc="购建固定无形和长期资产支付的现金合计")
CASH_PAY_BEH_EMPL = Column(DECIMAL(20, 4), doc="支付给职工以及为职工支付的现金合计")
CASH_PAY_DIST_DPCP_INT_EXP = Column(DECIMAL(20, 4), doc="分配股利偿付利息支付的现金合计")
CASH_PAY_GOODS_PURCH_SERV_REC = Column(DECIMAL(20, 4), doc="购买商品支付的现金合计")
CASH_PREPAY_AMT_BORR = Column(DECIMAL(20, 4), doc="偿付债务支付的现金合计")
CASH_RECP_BORROW = Column(DECIMAL(20, 4), doc="取得借款收到的现金合计")
CASH_RECP_CAP_CONTRIB = Column(DECIMAL(20, 4), doc="吸收投资收到的现金合计")
CASHFLOW_INCOME_RATIO = Column(DECIMAL(20, 4), doc="现金流收入比")
CONST_IN_PROG = Column(DECIMAL(20, 4), doc="在建工程合计")
EMPL_BEN_PAYABLE = Column(DECIMAL(20, 4), doc="应付职工薪酬合计")
FIN_EXP_TOT = Column(DECIMAL(20, 4), doc="财务费用合计")
FINANCIAL_LEVERAGE = Column(DECIMAL(20, 4), doc="财务杠杆")
FIX_ASSETS = Column(DECIMAL(20, 4), doc="固定资产合计")
GERL_ADMIN_EXP_TOT = Column(DECIMAL(20, 4), doc="管理费用合计")
GROSS_MARGIN_INC_LESS_CHAIN = Column(DECIMAL(20, 4), doc="单季度:毛利率环比增减")
GROSS_MARGIN_INC_LESS_QUA = Column(DECIMAL(20, 4), doc="单季度:毛利率同比增减")
GROSS_PROFIT_MARGIN = Column(DECIMAL(20, 4), doc="毛利率")
GROSSPROFIT_MARGIN_INC_LESS = Column(DECIMAL(20, 4), doc="毛利率同比增减")
IMPAIR_LOSS_ASSETS_TOT = Column(DECIMAL(20, 4), doc="资产减值损失合计")
INC_TAX_TOT = Column(DECIMAL(20, 4), doc="所得税合计")
INGREDIENT_NUM = Column(DECIMAL(20, 0), doc="成分股数量")
INVEST_REAL_ESTATE = Column(DECIMAL(20, 4), doc="投资性房地产合计")
LONG_TERM_EQY_INVEST = Column(DECIMAL(20, 4), doc="长期股权投资合计")
LOSS_INGREDIENT_NUM = Column(DECIMAL(20, 0), doc="亏损成分股数量")
LT_BORROW = Column(DECIMAL(20, 4), doc="长期借款合计")
MONETARY_CAP = Column(DECIMAL(20, 4), doc="货币资金合计")
NET_AFTER_DED_NR_LP_CORRECT = Column(DECIMAL(20, 4), doc="扣非归属净利润合计")
NET_ASSET_TURNOVER = Column(DECIMAL(20, 4), doc="净资产周转率")
NET_ASSETS_GROWTH_RATE = Column(DECIMAL(20, 4), doc="净资产比年初增速")
NET_BUSINESS_CYCLE = Column(DECIMAL(20, 4), doc="净营业周期")
NET_CASH_FLOWS_FNC_TOT = Column(DECIMAL(20, 4), doc="筹资活动净流量合计")
NET_CASH_FLOWS_INV_TOT = Column(DECIMAL(20, 4), doc="投资活动净流量合计")
NET_CASH_RECP_DISP_FIOLTA = Column(DECIMAL(20, 4), doc="处置固定资产等收回的现金合计")
NET_CASHFLOW_PROFIT = Column(DECIMAL(20, 4), doc="现金流净利润比")
NET_GAIN_CHG_FV_TOT = Column(DECIMAL(20, 4), doc="公允价值变动净收益合计")
NET_INCR_CASH_CASH_EQU_TOT = Column(DECIMAL(20, 4), doc="现金及现金等价物净增加额合计")
NET_INCR_CASH_CASH_EQU_TTM = Column(DECIMAL(20, 4), doc="现金及现金等价物净增加额(TTM)")
NET_INVEST_INC_TOT = Column(DECIMAL(20, 4), doc="投资净收益合计")
NET_PRO_RATE_INC_LESS_CHAIN = Column(DECIMAL(20, 4), doc="单季度:净利润率环比增减")
NET_PRO_RATE_INC_LESS_QUA = Column(DECIMAL(20, 4), doc="单季度:净利润率同比增减")
NET_PRO_RATE_INC_LESS_TTM = Column(DECIMAL(20, 4), doc="净利率同比增减(TTM)")
NET_PRO_RATE_INCREASE_LESS = Column(DECIMAL(20, 4), doc="净利润率同比增减")
NET_PROFIT_GROWTH_RATE = Column(DECIMAL(20, 4), doc="净利润同比增速")
NET_PROFIT_GROWTH_RATE_CHAIN = Column(DECIMAL(20, 4), doc="单季度:净利润环比增速")
NET_PROFIT_GROWTH_RATE_QUA = Column(DECIMAL(20, 4), doc="单季度:净利润同比增速")
NET_PROFIT_GROWTH_RATE_TTM = Column(DECIMAL(20, 4), doc="净利润同比增速(TTM)")
NET_PROFIT_INCL_MIN_INT_INC = Column(DECIMAL(20, 4), doc="净利润(含少数股东权益)合计")
NET_PROFIT_RATE = Column(DECIMAL(20, 4), doc="净利润率")
NET_PROFIT_RATE_QUA = Column(DECIMAL(20, 4), doc="单季度:净利润率")
NET_PROFIT_RATE_TTM = Column(DECIMAL(20, 4), doc="净利润率(TTM)")
NET_PROFIT_TOT = Column(DECIMAL(20, 4), doc="净利润合计")
NET_PROFIT_TTM = Column(DECIMAL(20, 4), doc="净利润(TTM)")
NON_CUR_LIAB_DUE_WITHIN_1Y = Column(DECIMAL(20, 4), doc="一年内到期的非流动负债合计")
NON_OPER_EXP_TOT = Column(DECIMAL(20, 4), doc="营业外支出合计")
NON_OPER_REV_TOT = Column(DECIMAL(20, 4), doc="营业外收入合计")
NOTES_PAYABLE = Column(DECIMAL(20, 4), doc="应付票据合计")
NOTES_RCV = Column(DECIMAL(20, 4), doc="应收票据合计")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPER_COST_TOT = Column(DECIMAL(20, 4), doc="营业成本合计")
OPER_PROFIT_TOT = Column(DECIMAL(20, 4), doc="营业利润合计")
OPER_REV = Column(DECIMAL(20, 4), doc="营业收入合计")
OPER_REV_GROWTH_RATE = Column(DECIMAL(20, 4), doc="营业收入同比增速")
OPER_REV_GROWTH_RATE_CHAIN = Column(DECIMAL(20, 4), doc="单季度:营业收入环比增速")
OPER_REV_GROWTH_RATE_QUA = Column(DECIMAL(20, 4), doc="单季度:营业收入同比增速")
OPER_REV_GROWTH_RATE_TTM = Column(DECIMAL(20, 4), doc="营业收入同比增速(TTM)")
OPER_REV_TTM = Column(DECIMAL(20, 4), doc="营业收入(TTM)")
OPMODE = Column(String(1))
OWNERS_EQUITY = Column(DECIMAL(20, 4), doc="所有者权益合计")
PAID_UP_CAPITAL = Column(DECIMAL(20, 4), doc="实收资本合计")
PERIOD_EXPENSE_INC_LESS = Column(DECIMAL(20, 4), doc="期间费用率同比增减")
PERIOD_EXPENSE_INC_LESS_CHAIN = Column(DECIMAL(20, 4), doc="单季度:?期间费用率环比增减")
PERIOD_EXPENSE_INC_LESS_QUA = Column(DECIMAL(20, 4), doc="单季度:?期间费用率同比增减")
PERIOD_EXPENSE_RATE = Column(DECIMAL(20, 4), doc="期间费用率")
PERIOD_EXPENSE_RATE_QUA = Column(DECIMAL(20, 4), doc="单季度:期间费用率")
PREPAY = Column(DECIMAL(20, 4), doc="预付款项合计")
PROC_ISSUE_BONDS = Column(DECIMAL(20, 4), doc="发行债券收到的现金合计")
PROFIT_RATE_QUA = Column(DECIMAL(20, 4), doc="单季度:毛利率")
REPORT_PERIOD = Column(String(12), doc="报告期")
REPORT_TYPE_CODE = Column(DECIMAL(9, 0), doc="报表类型代码")
ROA = Column(DECIMAL(20, 4), doc="ROA")
ROA_INCREASE_LESS = Column(DECIMAL(20, 4), doc="ROA同比增减")
ROA_INCREASE_LESS_CHAIN = Column(DECIMAL(20, 4), doc="单季度:ROA环比增减")
ROA_INCREASE_LESS_QUA = Column(DECIMAL(20, 4), doc="单季度:ROA同比增减")
ROA_INCREASE_LESS_TTM = Column(DECIMAL(20, 4), doc="ROA同比增减(TTM)")
ROA_QUA = Column(DECIMAL(20, 4), doc="单季度:ROA")
ROA_TTM = Column(DECIMAL(20, 4), doc="ROA(TTM)")
ROE = Column(DECIMAL(20, 4), doc="ROE")
ROE_INCREASE_LESS = Column(DECIMAL(20, 4), doc="ROE同比增减")
ROE_INCREASE_LESS_CHAIN = Column(DECIMAL(20, 4), doc="单季度:ROE环比增减")
ROE_INCREASE_LESS_QUA = Column(DECIMAL(20, 4), doc="单季度:ROE同比增减")
ROE_INCREASE_LESS_TTM = Column(DECIMAL(20, 4), doc="ROE同比增减(TTM)")
ROE_QUA = Column(DECIMAL(20, 4), doc="单季度:ROE")
ROE_TTM = Column(DECIMAL(20, 4), doc="ROE(TTM)")
S_FA_ARTURNDAYS = Column(DECIMAL(20, 4), doc="应收账款周转天数")
S_FA_CURRENT = Column(DECIMAL(20, 4), doc="流动比率")
S_FA_EXTRAORDINARY = Column(DECIMAL(20, 4), doc="非经常性损益合计")
S_FA_INVTURNDAYS = Column(DECIMAL(20, 4), doc="存货周转天数")
S_FA_QUICK = Column(DECIMAL(20, 4), doc="速动比率")
S_FA_SALESCASHINTOOR = Column(DECIMAL(20, 4), doc="销售商品提供劳务收到的现金合计")
S_INFO_WINDCODE = Column(String(60), doc="指数Wind代码")
S_VAL_PCF_OCF = Column(DECIMAL(20, 4), doc="经营现金流合计")
S_VAL_PCF_OCF_CHAIN = Column(DECIMAL(20, 4), doc="单季度:经营现金流环比增速")
S_VAL_PCF_OCF_GROWTH_RATE = Column(DECIMAL(20, 4), doc="经营现金流同比增速")
S_VAL_PCF_OCF_GROWTH_RATE_TTM = Column(DECIMAL(20, 4), doc="经营现金流同比增速(TTM)")
S_VAL_PCF_OCF_QUA = Column(DECIMAL(20, 4), doc="单季度:经营现金流同比增速")
S_VAL_PCF_OCF_TTM = Column(DECIMAL(20, 4), doc="经营现金流(TTM)")
SALES_EXPENSE_RATE = Column(DECIMAL(20, 4), doc="销售费用率")
SALES_EXPENSE_RATE_QUA = Column(DECIMAL(20, 4), doc="单季度:销售费用率")
SELLING_DIST_EXP_TOT = Column(DECIMAL(20, 4), doc="销售费用合计")
ST_BORROW = Column(DECIMAL(20, 4), doc="短期借款合计")
STOCK_RATIO_GROWTH_RATE = Column(DECIMAL(20, 4), doc="存货比年初增速")
STOT_CASH_INFLOWS_FNC_TOT = Column(DECIMAL(20, 4), doc="筹资活动现金流入合计")
STOT_CASH_INFLOWS_INV_TOT = Column(DECIMAL(20, 4), doc="投资活动现金流入合计")
STOT_CASH_INFLOWS_OPER_TOT = Column(DECIMAL(20, 4), doc="经营活动现金流入合计")
STOT_CASH_OUTFLOWS_FNC_TOT = Column(DECIMAL(20, 4), doc="筹资活动现金流出合计")
STOT_CASH_OUTFLOWS_OPER_TOT = Column(DECIMAL(20, 4), doc="经营活动现金流出合计")
TOT_CUR_ASSETS = Column(DECIMAL(20, 4), doc="流动资产合计")
TOT_CUR_LIAB = Column(DECIMAL(20, 4), doc="流动负债合计")
TOT_NON_CUR_ASSETS = Column(DECIMAL(20, 4), doc="非流动资产合计")
TOT_NON_CUR_LIAB = Column(DECIMAL(20, 4), doc="非流动负债合计")
TOT_PROFIT = Column(DECIMAL(20, 4), doc="利润总额合计")
TOTAL_ACCOUNTS_RECEIVABLE = Column(DECIMAL(20, 4), doc="应收账款合计")
TOTAL_ASSETS = Column(DECIMAL(20, 4), doc="总资产合计")
TOTAL_INVENTORY = Column(DECIMAL(20, 4), doc="存货合计")
TOTAL_NET_ASSETS = Column(DECIMAL(20, 4), doc="净资产合计")
UNDISTRIBUTED_PROFIT = Column(DECIMAL(20, 4), doc="未分配利润合计")
class AINDEXHS300CLOSEWEIGHT(Base):
__tablename__ = 'AINDEXHS300CLOSEWEIGHT'
I_WEIGHT = Column(DECIMAL(20, 4), doc="权重")
I_WEIGHT_11 = Column(DECIMAL(20, 2), doc="总股本(股)")
I_WEIGHT_12 = Column(String(2), doc="自由流通比例(%)(归档后)")
I_WEIGHT_14 = Column(DECIMAL(20, 8), doc="权重因子")
I_WEIGHT_15 = Column(DECIMAL(20, 4), doc="收盘")
I_WEIGHT_16 = Column(String(2), doc="调整后开盘参考价")
I_WEIGHT_17 = Column(DECIMAL(20, 2), doc="总市值")
I_WEIGHT_18 = Column(DECIMAL(20, 2), doc="计算用市值")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_WINDCODE = Column(String(40), doc="成份股Wind代码")
S_IN_INDEX = Column(DECIMAL(20, 2), doc="计算用股本(股)")
S_INFO_WINDCODE = Column(String(40), doc="指数Wind代码")
TRADE_DT = Column(String(10), doc="交易日期")
class AINDEXHS300WEIGHT(Base):
__tablename__ = 'AINDEXHS300WEIGHT'
I_WEIGHT = Column(DECIMAL(20, 4), doc="权重")
I_WEIGHT_11 = Column(DECIMAL(20, 2), doc="总股本(股)")
I_WEIGHT_12 = Column(DECIMAL(20, 4), doc="自由流通比例(%)(归档后)")
I_WEIGHT_14 = Column(DECIMAL(20, 8), doc="权重因子")
I_WEIGHT_15 = Column(DECIMAL(20, 4), doc="收盘")
I_WEIGHT_16 = Column(DECIMAL(20, 4), doc="调整后开盘参考价")
I_WEIGHT_17 = Column(DECIMAL(20, 2), doc="总市值")
I_WEIGHT_18 = Column(DECIMAL(20, 2), doc="计算用市值")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_WINDCODE = Column(String(60), doc="成份股Wind代码")
S_IN_INDEX = Column(DECIMAL(20, 2), doc="计算用股本(股)")
S_INFO_WINDCODE = Column(String(60), doc="指数Wind代码")
TRADE_DT = Column(String(12), doc="交易日期")
class AINDEXINDUSTRIESEODCITICS(Base):
__tablename__ = 'AINDEXINDUSTRIESEODCITICS'
CRNCY_CODE = Column(String(10), doc="货币代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_DQ_AMOUNT = Column(DECIMAL(20, 4), doc="成交金额(千元)")
S_DQ_CHANGE = Column(DECIMAL(20, 4), doc="涨跌(点)")
S_DQ_CLOSE = Column(DECIMAL(20, 4), doc="收盘价(点)")
S_DQ_HIGH = Column(DECIMAL(20, 4), doc="最高价(点)")
S_DQ_LOW = Column(DECIMAL(20, 4), doc="最低价(点)")
S_DQ_OPEN = Column(DECIMAL(20, 4), doc="开盘价(点)")
S_DQ_PCTCHANGE = Column(DECIMAL(20, 4), doc="涨跌幅(%)")
S_DQ_PRECLOSE = Column(DECIMAL(20, 4), doc="昨收盘价(点)")
S_DQ_VOLUME = Column(DECIMAL(20, 4), doc="成交量(手)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TRADE_DT = Column(String(8), doc="交易日期")
class AINDEXMEMBERS(Base):
__tablename__ = 'AINDEXMEMBERS'
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_INDATE = Column(String(8), doc="纳入日期")
S_CON_OUTDATE = Column(String(8), doc="剔除日期")
S_CON_WINDCODE = Column(String(40), doc="成份股Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="指数Wind代码")
class AINDEXMEMBERSCITICS(Base):
__tablename__ = 'AINDEXMEMBERSCITICS'
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_INDATE = Column(String(8), doc="纳入日期")
S_CON_OUTDATE = Column(String(8), doc="剔除日期")
S_CON_WINDCODE = Column(String(40), doc="成份股Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="指数Wind代码")
class AINDEXMEMBERSCITICS2(Base):
__tablename__ = 'AINDEXMEMBERSCITICS2'
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_INDATE = Column(String(8), doc="纳入日期")
S_CON_OUTDATE = Column(String(8), doc="剔除日期")
S_CON_WINDCODE = Column(String(40), doc="Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class AINDEXMEMBERSCITICS2ZL(Base):
__tablename__ = 'AINDEXMEMBERSCITICS2ZL'
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_INDATE = Column(String(8), doc="纳入日期")
S_CON_OUTDATE = Column(String(8), doc="剔除日期")
S_CON_WINDCODE = Column(String(40), doc="Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class AINDEXMEMBERSCITICS3(Base):
__tablename__ = 'AINDEXMEMBERSCITICS3'
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_INDATE = Column(String(8), doc="纳入日期")
S_CON_OUTDATE = Column(String(8), doc="剔除日期")
S_CON_WINDCODE = Column(String(40), doc="Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class AINDEXMEMBERSCITICSZL(Base):
__tablename__ = 'AINDEXMEMBERSCITICSZL'
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_INDATE = Column(String(8), doc="纳入日期")
S_CON_OUTDATE = Column(String(8), doc="剔除日期")
S_CON_WINDCODE = Column(String(40), doc="成份股Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="指数Wind代码")
class ASAREPLANTRADE(Base):
__tablename__ = 'ASAREPLANTRADE'
ANN_DT = Column(String(8), doc="首次披露公告日")
ANN_DT_NEW = Column(String(8), doc="最新公告日")
CHANGE_END_DATE = Column(String(8), doc="变动截止日期")
CHANGE_START_DATE = Column(String(8), doc="变动起始日期")
HOLD_NUMBER = Column(DECIMAL(20, 4), doc="持有证券数量(股/张)")
HOLD_PROPORTION = Column(DECIMAL(20, 4), doc="持股数量占比(%)")
HOLD_RESTRICTED_STOCK = Column(DECIMAL(20, 4), doc="持有限售股数量(股)")
HOLD_UNLIMITED_SALE_SHARES = Column(DECIMAL(20, 4), doc="持有无限售股数量(股)")
HOLDER_ID = Column(String(10), doc="持有方id")
HOLDER_NAME = Column(String(100), doc="持有方名称")
HOLDER_STATUS = Column(String(80), doc="股东身份类别")
HOLDER_TYPE = Column(String(1), doc="股东类型")
IS_ADJUSTMENT = Column(DECIMAL(1, 0), doc="方案是否有过调整")
IS_CHANGE_CONTROL = Column(DECIMAL(1, 0), doc="是否导致公司控制权变更")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PLAN_MAX_HOLD_RATIO = Column(DECIMAL(20, 4), doc="拟最大变动数量占持有公司股份的比例(%)")
PLAN_TRANSACT_MAX = Column(DECIMAL(20, 4), doc="拟变动金额上限(元)")
PLAN_TRANSACT_MAX_NUM = Column(DECIMAL(20, 4), doc="拟变动数量上限(股/张)")
PLAN_TRANSACT_MAX_RATIO = Column(DECIMAL(20, 4), doc="拟变动数量上限占比(%)")
PLAN_TRANSACT_MIN = Column(DECIMAL(20, 4), doc="拟变动金额下限(元)")
PLAN_TRANSACT_MIN_NUM = Column(DECIMAL(20, 4), doc="拟变动数量下限(股/张)")
PLAN_TRANSACT_MIN_RATIO = Column(DECIMAL(20, 4), doc="拟变动数量下限占比(%)")
PROGRAM_ADJUSTMENT_MEMO = Column(String(1000), doc="方案调整说明")
PROGRAMME_PROGRESS = Column(DECIMAL(9, 0), doc="方案进度代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SPECIAL_CHANGES_MEMO = Column(String(1000), doc="特殊变动说明")
TOT_ACTUAL_TRANSACT_NUM = Column(DECIMAL(20, 4), doc="实际累计变动证券数量(股/张)")
TOTAL_CAPITAL_STOCK = Column(DECIMAL(20, 4), doc="公司总股本(股)")
TRANSACT_OBJECTIVE = Column(String(400), doc="变动目的")
TRANSACT_PERIOD_DESCRIPTION = Column(String(100), doc="变动期间说明")
TRANSACT_SOURCE_FUNDS = Column(String(100), doc="变动资金来源")
TRANSACT_STOCK_SOURCE = Column(String(100), doc="变动股份来源")
TRANSACT_TYPE = Column(String(4), doc="变动方向")
TRANSACTION_MODE = Column(String(100), doc="交易方式")
VARIABLE_PRICE_MEMO = Column(String(100), doc="变动价格说明")
class ASHAREACCOUNTSPAYABLE(Base):
__tablename__ = 'ASHAREACCOUNTSPAYABLE'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_INFO_AMOUNT = Column(DECIMAL(20, 4), doc="金额")
S_INFO_COMP_NAME = Column(String(100), doc="上游公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="上游公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="上游公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
S_INFO_DISCLOSER = Column(String(100), doc="披露公司ID")
class ASHAREADMINISTRATION(Base):
__tablename__ = 'ASHAREADMINISTRATION'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
S_INFO_MANAGER_NAME = Column(String(80), doc="姓名")
S_INFO_MANAGER_POST = Column(String(40), doc="职务")
S_INFO_MANAGER_STARTDATE = Column(String(8), doc="任职日期")
S_INFO_MANID = Column(String(10), doc="人物id")
class ASHAREANNFINANCIALINDICATOR(Base):
__tablename__ = 'ASHAREANNFINANCIALINDICATOR'
ANN_DT = Column(String(8), doc="公告日期")
CONTRIBUTIONPS = Column(DECIMAL(20, 4), doc="每股社会贡献值(元)")
CRNCY_CODE = Column(String(10), doc="货币代码")
GROWTH_BPS_SH = Column(DECIMAL(22, 4), doc="比年初增长率.归属于母公司股东的每股净资产(%)")
IFLISTED_DATA = Column(DECIMAL(1, 0), doc="是否上市后数据")
MEMO = Column(String(100), doc="备注")
NET_PROFIT = Column(DECIMAL(20, 4), doc="国际会计准则净利润(元)")
NET_PROFIT_YOY = Column(DECIMAL(20, 4), doc="同比增长率.净利润(%)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RD_EXPENSE = Column(DECIMAL(20, 4), doc="研发费用(元)")
REPORT_PERIOD = Column(String(8), doc="报告期")
ROE_DILUTED = Column(DECIMAL(20, 4), doc="净资产收益率-摊薄(%)")
ROE_EX = Column(DECIMAL(20, 4), doc="净资产收益率-扣除(%)")
ROE_EXWEIGHTED = Column(DECIMAL(20, 4), doc="净资产收益率-扣除/加权(%)")
ROE_WEIGHTED = Column(DECIMAL(20, 4), doc="净资产收益率-加权(%)")
S_FA_ARTURN = Column(DECIMAL(20, 4), doc="应收账款周转率(%)")
S_FA_BPS = Column(DECIMAL(22, 4), doc="每股净资产(元)")
S_FA_BPS_ADJUST = Column(DECIMAL(20, 4), doc="每股净资产-调整(元)")
S_FA_BPS_SH = Column(DECIMAL(20, 4), doc="归属于母公司股东的每股净资产(元)")
S_FA_CURRENT = Column(DECIMAL(20, 4), doc="流动比(%)")
S_FA_DEDUCTEDPROFIT = Column(DECIMAL(20, 4), doc="扣除非经常性损益后的净利润(扣除少数股东损益)")
S_FA_DEDUCTEDPROFIT_YOY = Column(DECIMAL(22, 4), doc="同比增长率.扣除非经常性损益后的净利润(扣除少数股东损益)(%)")
S_FA_EPS_BASIC = Column(DECIMAL(20, 4), doc="每股收益-基本")
S_FA_EPS_DILUTED = Column(DECIMAL(20, 4), doc="每股收益-摊薄(元)")
S_FA_EPS_DILUTED2 = Column(DECIMAL(20, 6), doc="每股收益-稀释(元)")
S_FA_EPS_EX = Column(DECIMAL(20, 4), doc="每股收益-扣除(元)")
S_FA_EPS_EXBASIC = Column(DECIMAL(20, 4), doc="每股收益-扣除/基本")
S_FA_EPS_EXDILUTED = Column(DECIMAL(20, 4), doc="每股收益-扣除/稀释(元)")
S_FA_EXTRAORDINARY = Column(DECIMAL(22, 4), doc="非经常性损益(元)")
S_FA_INVTURN = Column(DECIMAL(20, 4), doc="存货周转率(%)")
S_FA_OCFPS = Column(DECIMAL(20, 4), doc="每股经营活动产生的现金流量净额(元)")
S_FA_QUICK = Column(DECIMAL(20, 4), doc="速动比(%)")
S_FA_YOYEBT = Column(DECIMAL(20, 4), doc="同比增长率.利润总额(%)")
S_FA_YOYEPS_BASIC = Column(DECIMAL(22, 4), doc="同比增长率.基本每股收益(%)")
S_FA_YOYEPS_DILUTED = Column(DECIMAL(22, 4), doc="同比增长率.稀释每股收益(%)")
S_FA_YOYEQUITY = Column(DECIMAL(22, 4), doc="比年初增长率.归属母公司的股东权益(%)")
S_FA_YOYOCFPS = Column(DECIMAL(22, 4), doc="同比增长率.每股经营活动产生的现金流量净额(%)")
S_FA_YOYOP = Column(DECIMAL(20, 4), doc="同比增长率.营业利润(%)")
S_FT_DEBTTOASSETS = Column(DECIMAL(20, 4), doc="资产负债率(%)")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_DIV = Column(String(40), doc="分红方案")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
STATEMENT_TYPE = Column(DECIMAL(9, 0), doc="报表类型代码")
YOY_NET_CASH_FLOWS = Column(DECIMAL(22, 4), doc="同比增长率.经营活动产生的现金流量净额(%)")
YOY_ROE_DILUTED = Column(DECIMAL(22, 4), doc="同比增长率.净资产收益率(摊薄)(%)")
class ASHAREAUDITOPINION(Base):
__tablename__ = 'ASHAREAUDITOPINION'
ANN_DT = Column(String(8), doc="公告日期")
ANN_DT1 = Column(String(8), doc="内控审计报告公告日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_AUDIT_FEE_MEMO = Column(String(1000), doc="审计费用说明")
S_AUDIT_RESULT_MEMO = Column(LONGTEXT, doc="审计结果说明")
S_IN_CONTROL_ACCOUNTANT = Column(String(100), doc="内控签字会计师")
S_IN_CONTROL_ACCOUNTING_FIRM = Column(String(10), doc="内控会计师事务所ID")
S_IN_CONTROL_AUDIT = Column(LONGTEXT, doc="内控审计结果说明")
S_IN_CONTROL_AUDIT_OPINION = Column(DECIMAL(9, 0), doc="内控审计意见类别代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_PAY_AUDIT_EXPENSES = Column(DECIMAL(20, 4), doc="当期实付审计费用(总额)(元)")
S_STMNOTE_AUDIT_AGENCY = Column(String(100), doc="会计师事务所")
S_STMNOTE_AUDIT_CATEGORY = Column(DECIMAL(9, 0), doc="审计结果类别代码")
S_STMNOTE_AUDIT_CPA = Column(String(100), doc="签字会计师")
class ASHAREBALANCESHEET(Base):
__tablename__ = 'ASHAREBALANCESHEET'
ACC_EXP = Column(DECIMAL(20, 4), doc="预提费用")
ACCOUNTS_PAYABLE = Column(DECIMAL(20, 4), doc="应付票据及应付账款")
ACCOUNTS_RECEIVABLE = Column(DECIMAL(20, 4), doc="应收款项")
ACCOUNTS_RECEIVABLE_BILL = Column(DECIMAL(20, 4), doc="应收票据及应收账款")
ACCT_PAYABLE = Column(DECIMAL(20, 4), doc="应付账款")
ACCT_RCV = Column(DECIMAL(20, 4), doc="应收账款")
ACTING_TRADING_SEC = Column(DECIMAL(20, 4), doc="代理买卖证券款")
ACTING_UW_SEC = Column(DECIMAL(20, 4), doc="代理承销证券款")
ACTUAL_ANN_DT = Column(String(8), doc="实际公告日期")
ADV_FROM_CUST = Column(DECIMAL(20, 4), doc="预收款项")
AGENCY_BUS_ASSETS = Column(DECIMAL(20, 4), doc="代理业务资产")
AGENCY_BUS_LIAB = Column(DECIMAL(20, 4), doc="代理业务负债")
ANN_DT = Column(String(8), doc="公告日期")
ASSET_DEP_OTH_BANKS_FIN_INST = Column(DECIMAL(20, 4), doc="存放同业和其它金融机构款项")
BONDS_PAYABLE = Column(DECIMAL(20, 4), doc="应付债券")
BORROW_CENTRAL_BANK = Column(DECIMAL(20, 4), doc="向中央银行借款")
CAP_MRGN_PAID = Column(DECIMAL(20, 4), doc="存出资本保证金")
CAP_RSRV = Column(DECIMAL(20, 4), doc="资本公积金")
CAP_STK = Column(DECIMAL(20, 4), doc="股本")
CASH_DEPOSITS_CENTRAL_BANK = Column(DECIMAL(20, 4), doc="现金及存放中央银行款项")
CLAIMS_PAYABLE = Column(DECIMAL(20, 4), doc="应付赔付款")
CLIENTS_CAP_DEPOSIT = Column(DECIMAL(20, 4), doc="客户资金存款")
CLIENTS_RSRV_SETTLE = Column(DECIMAL(20, 4), doc="客户备付金")
CNVD_DIFF_FOREIGN_CURR_STAT = Column(DECIMAL(20, 4), doc="外币报表折算差额")
COMP_TYPE_CODE = Column(String(2), doc="公司类型代码")
CONST_IN_PROG = Column(DECIMAL(20, 4), doc="在建工程")
CONST_IN_PROG_TOT = Column(DECIMAL(20, 4), doc="在建工程(合计)(元)")
CONSUMPTIVE_BIO_ASSETS = Column(DECIMAL(20, 4), doc="消耗性生物资产")
CONTRACT_LIABILITIES = Column(DECIMAL(20, 4), doc="合同负债")
CONTRACTUAL_ASSETS = Column(DECIMAL(20, 4), doc="合同资产")
CRNCY_CODE = Column(String(10), doc="货币代码")
CUST_BANK_DEP = Column(DECIMAL(20, 4), doc="吸收存款")
DEBT_INVESTMENT = Column(DECIMAL(20, 4), doc="债权投资(元)")
DEFERRED_EXP = Column(DECIMAL(20, 4), doc="待摊费用")
DEFERRED_INC = Column(DECIMAL(20, 4), doc="递延收益")
DEFERRED_INC_NON_CUR_LIAB = Column(DECIMAL(20, 4), doc="递延收益-非流动负债")
DEFERRED_TAX_ASSETS = Column(DECIMAL(20, 4), doc="递延所得税资产")
DEFERRED_TAX_LIAB = Column(DECIMAL(20, 4), doc="递延所得税负债")
DEPOSIT_RECEIVED = Column(DECIMAL(20, 4), doc="存入保证金")
DEPOSIT_RECEIVED_IB_DEPOSITS = Column(DECIMAL(20, 4), doc="吸收存款及同业存放")
DERIVATIVE_FIN_ASSETS = Column(DECIMAL(20, 4), doc="衍生金融资产")
DERIVATIVE_FIN_LIAB = Column(DECIMAL(20, 4), doc="衍生金融负债")
DVD_PAYABLE = Column(DECIMAL(20, 4), doc="应付股利")
DVD_PAYABLE_INSURED = Column(DECIMAL(20, 4), doc="应付保单红利")
DVD_RCV = Column(DECIMAL(20, 4), doc="应收股利")
EMPL_BEN_PAYABLE = Column(DECIMAL(20, 4), doc="应付职工薪酬")
FIN_ASSETS_AVAIL_FOR_SALE = Column(DECIMAL(20, 4), doc="可供出售金融资产")
FIN_ASSETS_COST_SHARING = Column(DECIMAL(20, 4), doc="以摊余成本计量的金融资产")
FIN_ASSETS_FAIR_VALUE = Column(DECIMAL(20, 4), doc="以公允价值计量且其变动计入其他综合收益的金融资产")
FIX_ASSETS = Column(DECIMAL(20, 4), doc="固定资产")
FIX_ASSETS_DISP = Column(DECIMAL(20, 4), doc="固定资产清理")
FUND_SALES_FIN_ASSETS_RP = Column(DECIMAL(20, 4), doc="卖出回购金融资产款")
GOODWILL = Column(DECIMAL(20, 4), doc="商誉")
HANDLING_CHARGES_COMM_PAYABLE = Column(DECIMAL(20, 4), doc="应付手续费及佣金")
HELD_TO_MTY_INVEST = Column(DECIMAL(20, 4), doc="持有至到期投资")
HFS_ASSETS = Column(DECIMAL(20, 4), doc="持有待售的资产")
HFS_SALES = Column(DECIMAL(20, 4), doc="持有待售的负债")
INCL_PLEDGE_LOAN = Column(DECIMAL(20, 4), doc="其中:质押借款")
INCL_SEAT_FEES_EXCHANGE = Column(DECIMAL(20, 4), doc="其中:交易席位费")
INDEPENDENT_ACCT_ASSETS = Column(DECIMAL(20, 4), doc="独立账户资产")
INDEPENDENT_ACCT_LIAB = Column(DECIMAL(20, 4), doc="独立账户负债")
INSURED_DEPOSIT_INVEST = Column(DECIMAL(20, 4), doc="保户储金及投资款")
INSURED_PLEDGE_LOAN = Column(DECIMAL(20, 4), doc="保户质押贷款")
INT_PAYABLE = Column(DECIMAL(20, 4), doc="应付利息")
INT_RCV = Column(DECIMAL(20, 4), doc="应收利息")
INTANG_ASSETS = Column(DECIMAL(20, 4), doc="无形资产")
INVENTORIES = Column(DECIMAL(20, 4), doc="存货")
INVEST_REAL_ESTATE = Column(DECIMAL(20, 4), doc="投资性房地产")
LEASE_LIAB = Column(DECIMAL(20, 4), doc="租赁负债")
LENDING_FUNDS = Column(DECIMAL(20, 4), doc="融出资金")
LESS_TSY_STK = Column(DECIMAL(20, 4), doc="减:库存股")
LIAB_DEP_OTH_BANKS_FIN_INST = Column(DECIMAL(20, 4), doc="同业和其它金融机构存放款项")
LIFE_INSUR_RSRV = Column(DECIMAL(20, 4), doc="寿险责任准备金")
LOANS_AND_ADV_GRANTED = Column(DECIMAL(20, 4), doc="发放贷款及垫款")
LOANS_OTH_BANKS = Column(DECIMAL(20, 4), doc="拆入资金")
LOANS_TO_OTH_BANKS = Column(DECIMAL(20, 4), doc="拆出资金")
LONG_TERM_DEFERRED_EXP = Column(DECIMAL(20, 4), doc="长期待摊费用")
LONG_TERM_EQY_INVEST = Column(DECIMAL(20, 4), doc="长期股权投资")
LONG_TERM_REC = Column(DECIMAL(20, 4), doc="长期应收款")
LT_BORROW = Column(DECIMAL(20, 4), doc="长期借款")
LT_HEALTH_INSUR_V = Column(DECIMAL(20, 4), doc="长期健康险责任准备金")
LT_PAYABLE = Column(DECIMAL(20, 4), doc="长期应付款")
LT_PAYABLE_TOT = Column(DECIMAL(20, 4), doc="长期应付款(合计)(元)")
LT_PAYROLL_PAYABLE = Column(DECIMAL(20, 4), doc="长期应付职工薪酬")
MINORITY_INT = Column(DECIMAL(20, 4), doc="少数股东权益")
MONETARY_CAP = Column(DECIMAL(20, 4), doc="货币资金")
MRGN_PAID = Column(DECIMAL(20, 4), doc="存出保证金")
NON_CUR_ASSETS_DUE_WITHIN_1Y = Column(DECIMAL(20, 4), doc="一年内到期的非流动资产")
NON_CUR_LIAB_DUE_WITHIN_1Y = Column(DECIMAL(20, 4), doc="一年内到期的非流动负债")
NOTES_PAYABLE = Column(DECIMAL(20, 4), doc="应付票据")
NOTES_RCV = Column(DECIMAL(20, 4), doc="应收票据")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OIL_AND_NATURAL_GAS_ASSETS = Column(DECIMAL(20, 4), doc="油气资产")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
OTH_ASSETS = Column(DECIMAL(20, 4), doc="其他资产")
OTH_CUR_ASSETS = Column(DECIMAL(20, 4), doc="其他流动资产")
OTH_CUR_LIAB = Column(DECIMAL(20, 4), doc="其他流动负债")
OTH_LIAB = Column(DECIMAL(20, 4), doc="其他负债")
OTH_NON_CUR_ASSETS = Column(DECIMAL(20, 4), doc="其他非流动资产")
OTH_NON_CUR_LIAB = Column(DECIMAL(20, 4), doc="其他非流动负债")
OTH_PAYABLE = Column(DECIMAL(20, 4), doc="其他应付款")
OTH_PAYABLE_TOT = Column(DECIMAL(20, 4), doc="其他应付款(合计)(元)")
OTH_RCV = Column(DECIMAL(20, 4), doc="其他应收款")
OTH_RCV_TOT = Column(DECIMAL(20, 4), doc="其他应收款(合计)(元)")
OTHER_COMP_INCOME = Column(DECIMAL(20, 4), doc="其他综合收益")
OTHER_DEBT_INVESTMENT = Column(DECIMAL(20, 4), doc="其他债权投资(元)")
OTHER_EQUITY_INVESTMENT = Column(DECIMAL(20, 4), doc="其他权益工具投资(元)")
OTHER_EQUITY_TOOLS = Column(DECIMAL(20, 4), doc="其他权益工具")
OTHER_EQUITY_TOOLS_P_SHR = Column(DECIMAL(20, 4), doc="其他权益工具:优先股")
OTHER_ILLIQUIDFINANCIAL_ASSETS = Column(DECIMAL(20, 4), doc="其他非流动金融资产(元)")
OTHER_SUSTAINABLE_BOND = Column(DECIMAL(20, 4), doc="其他权益工具:永续债(元)")
OUT_LOSS_RSRV = Column(DECIMAL(20, 4), doc="未决赔款准备金")
PAYABLE_TO_REINSURER = Column(DECIMAL(20, 4), doc="应付分保账款")
PAYABLES = Column(DECIMAL(20, 4), doc="应付款项")
PRECIOUS_METALS = Column(DECIMAL(20, 4), doc="贵金属")
PREM_RCV = Column(DECIMAL(20, 4), doc="应收保费")
PREM_RECEIVED_ADV = Column(DECIMAL(20, 4), doc="预收保费")
PREPAY = Column(DECIMAL(20, 4), doc="预付款项")
PRODUCTIVE_BIO_ASSETS = Column(DECIMAL(20, 4), doc="生产性生物资产")
PROJ_MATL = Column(DECIMAL(20, 4), doc="工程物资")
PROV_NOM_RISKS = Column(DECIMAL(20, 4), doc="一般风险准备")
PROVISIONS = Column(DECIMAL(20, 4), doc="预计负债")
R_AND_D_COSTS = Column(DECIMAL(20, 4), doc="开发支出")
RCV_CEDED_CLAIM_RSRV = Column(DECIMAL(20, 4), doc="应收分保未决赔款准备金")
RCV_CEDED_LIFE_INSUR_RSRV = Column(DECIMAL(20, 4), doc="应收分保寿险责任准备金")
RCV_CEDED_LT_HEALTH_INSUR_RSRV = Column(DECIMAL(20, 4), doc="应收分保长期健康险责任准备金")
RCV_CEDED_UNEARNED_PREM_RSRV = Column(DECIMAL(20, 4), doc="应收分保未到期责任准备金")
RCV_FROM_CEDED_INSUR_CONT_RSRV = Column(DECIMAL(20, 4), doc="应收分保合同准备金")
RCV_FROM_REINSURER = Column(DECIMAL(20, 4), doc="应收分保账款")
RCV_INVEST = Column(DECIMAL(20, 4), doc="应收款项类投资")
RECEIVABLES_FINANCING = Column(DECIMAL(20, 4), doc="应收款项融资")
RED_MONETARY_CAP_FOR_SALE = Column(DECIMAL(20, 4), doc="买入返售金融资产")
REPORT_PERIOD = Column(String(8), doc="报告期")
RIGHT_USE_ASSETS = Column(DECIMAL(20, 4), doc="使用权资产")
RSRV_INSUR_CONT = Column(DECIMAL(20, 4), doc="保险合同准备金")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SETTLE_RSRV = Column(DECIMAL(20, 4), doc="结算备付金")
SPE_BAL_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="资产差额(特殊报表科目)")
SPE_BAL_LIAB_DIFF = Column(DECIMAL(20, 4), doc="负债差额(特殊报表科目)")
SPE_BAL_LIAB_EQY_DIFF = Column(DECIMAL(20, 4), doc="负债及股东权益差额(特殊报表项目)")
SPE_BAL_SHRHLDR_EQY_DIFF = Column(DECIMAL(20, 4), doc="股东权益差额(特殊报表科目)")
SPE_CUR_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="流动资产差额(特殊报表科目)")
SPE_CUR_LIAB_DIFF = Column(DECIMAL(20, 4), doc="流动负债差额(特殊报表科目)")
SPE_NON_CUR_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="非流动资产差额(特殊报表科目)")
SPE_NON_CUR_LIAB_DIFF = Column(DECIMAL(20, 4), doc="非流动负债差额(特殊报表科目)")
SPECIAL_RSRV = Column(DECIMAL(20, 4), doc="专项储备")
SPECIFIC_ITEM_PAYABLE = Column(DECIMAL(20, 4), doc="专项应付款")
ST_BONDS_PAYABLE = Column(DECIMAL(20, 4), doc="应付短期债券")
ST_BORROW = Column(DECIMAL(20, 4), doc="短期借款")
ST_FINANCING_PAYABLE = Column(DECIMAL(20, 4), doc="应付短期融资款")
STATEMENT_TYPE = Column(String(10), doc="报表类型")
STM_BS_TOT = Column(DECIMAL(20, 4), doc="固定资产(合计)(元)")
SUBR_REC = Column(DECIMAL(20, 4), doc="应收代位追偿款")
SURPLUS_RSRV = Column(DECIMAL(20, 4), doc="盈余公积金")
TAXES_SURCHARGES_PAYABLE = Column(DECIMAL(20, 4), doc="应交税费")
TIME_DEPOSITS = Column(DECIMAL(20, 4), doc="定期存款")
TOT_ASSETS = Column(DECIMAL(20, 4), doc="资产总计")
TOT_BAL_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="资产差额(合计平衡项目)")
TOT_BAL_LIAB_DIFF = Column(DECIMAL(20, 4), doc="负债差额(合计平衡项目)")
TOT_BAL_LIAB_EQY_DIFF = Column(DECIMAL(20, 4), doc="负债及股东权益差额(合计平衡项目)")
TOT_BAL_SHRHLDR_EQY_DIFF = Column(DECIMAL(20, 4), doc="股东权益差额(合计平衡项目)")
TOT_CUR_ASSETS = Column(DECIMAL(20, 4), doc="流动资产合计")
TOT_CUR_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="流动资产差额(合计平衡项目)")
TOT_CUR_LIAB = Column(DECIMAL(20, 4), doc="流动负债合计")
TOT_CUR_LIAB_DIFF = Column(DECIMAL(20, 4), doc="流动负债差额(合计平衡项目)")
TOT_LIAB = Column(DECIMAL(20, 4), doc="负债合计")
TOT_LIAB_SHRHLDR_EQY = Column(DECIMAL(20, 4), doc="负债及股东权益总计")
TOT_NON_CUR_ASSETS = Column(DECIMAL(20, 4), doc="非流动资产合计")
TOT_NON_CUR_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="非流动资产差额(合计平衡项目)")
TOT_NON_CUR_LIAB = Column(DECIMAL(20, 4), doc="非流动负债合计")
TOT_NON_CUR_LIAB_DIFF = Column(DECIMAL(20, 4), doc="非流动负债差额(合计平衡项目)")
TOT_SHR = Column(DECIMAL(20, 4), doc="期末总股本")
TOT_SHRHLDR_EQY_EXCL_MIN_INT = Column(DECIMAL(20, 4), doc="股东权益合计(不含少数股东权益)")
TOT_SHRHLDR_EQY_INCL_MIN_INT = Column(DECIMAL(20, 4), doc="股东权益合计(含少数股东权益)")
TRADABLE_FIN_ASSETS = Column(DECIMAL(20, 4), doc="交易性金融资产")
TRADABLE_FIN_LIAB = Column(DECIMAL(20, 4), doc="交易性金融负债")
UNCONFIRMED_INVEST_LOSS = Column(DECIMAL(20, 4), doc="未确认的投资损失")
UNDISTRIBUTED_PROFIT = Column(DECIMAL(20, 4), doc="未分配利润")
UNEARNED_PREM_RSRV = Column(DECIMAL(20, 4), doc="未到期责任准备金")
WIND_CODE = Column(String(40), doc="Wind代码")
class ASHAREBANKINDICATOR(Base):
__tablename__ = 'ASHAREBANKINDICATOR'
ANN_DT = Column(String(8))
BAD_LOAD_FIVE_CLASS = Column(DECIMAL(20, 4))
CAPI_ADE_RATIO = Column(DECIMAL(20, 4))
CASH_ON_HAND = Column(DECIMAL(20, 4))
CASH_RESERVE_RATIO_CNY = Column(DECIMAL(20, 4))
CASH_RESERVE_RATIO_FC = Column(DECIMAL(20, 4))
CORE_CAPI_ADE_RATIO = Column(DECIMAL(20, 4))
CORE_CAPI_NET_AMOUNT = Column(DECIMAL(20, 4))
COST_INCOME_RATIO = Column(DECIMAL(20, 4))
CRNCY_CODE = Column(String(10))
IBUSINESS_LOAN_RATIO = Column(DECIMAL(20, 4))
INTERECT_COLLECTION_RATIO = Column(DECIMAL(20, 4))
INTEREST_BEARING_ASSET = Column(DECIMAL(20, 4))
INTEREST_BEARING_ASSET_COMP = Column(DECIMAL(20, 4))
INTEREST_BEARING_ASSET_IFPUB = Column(DECIMAL(1, 0))
INTEREST_BEARING_LIA = Column(DECIMAL(20, 4))
INTEREST_BEARING_LIA_COMP = Column(DECIMAL(20, 4))
INTEREST_BEARING_LIA_IFPUB = Column(DECIMAL(1, 0))
LARGEST_CUSTOMER_LOAN = Column(DECIMAL(20, 4))
LEND_TO_BANKS_RATIO = Column(DECIMAL(20, 4))
LOAN_DEPO_RATIO = Column(DECIMAL(20, 4))
LOAN_DEPO_RATIO_NORMB = Column(DECIMAL(20, 4))
LOAN_DEPO_RATIO_RMB = Column(DECIMAL(20, 4))
LOAN_FROM_BANKS_RATIO = Column(DECIMAL(20, 4))
LOAN_LOSS_PROVISION = Column(DECIMAL(20, 4))
LONGTERM_LOANS_RATIO_CNY = Column(DECIMAL(20, 4))
LONGTERM_LOANS_RATIO_FC = Column(DECIMAL(20, 4))
MARKET_RISK_CAPITAL = Column(DECIMAL(20, 4))
NET_CAPITAL = Column(DECIMAL(20, 4))
NET_INTEREST_MARGIN = Column(DECIMAL(20, 4))
NET_INTEREST_MARGIN_IFPUB = Column(DECIMAL(1, 0))
NET_INTEREST_MARGIN_IS_ANN = Column(DECIMAL(20, 4))
NET_INTEREST_SPREAD = Column(DECIMAL(20, 4))
NET_INTEREST_SPREAD_IS_ANN = Column(DECIMAL(20, 4))
NON_INTEREST_INCOME = Column(DECIMAL(20, 4))
NON_INTEREST_MARGIN = Column(DECIMAL(20, 4))
NONEANING_ASSET = Column(DECIMAL(20, 4))
NONEANING_LIA = Column(DECIMAL(20, 4))
NPL_PROVISION_COVERAGE = Column(DECIMAL(20, 4))
NPL_RATIO = Column(DECIMAL(20, 4))
OBJECT_ID = Column(String(100), primary_key=True)
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
OVERDUE_LOAN = Column(DECIMAL(20, 4))
OVERSEAS_FUNDS_APP_RATIO = Column(DECIMAL(20, 4))
REPORT_PERIOD = Column(String(8))
RISK_WEIGHT_ASSET = Column(DECIMAL(20, 4))
S_INFO_WINDCODE = Column(String(40))
ST_ASSET_LIQ_RATIO_NORMB = Column(DECIMAL(20, 4))
ST_ASSET_LIQ_RATIO_RMB = Column(DECIMAL(20, 4))
STATEMENT_TYPE = Column(String(10))
TOP_TEN_CUSTOMER_LOAN = Column(DECIMAL(20, 4))
TOTAL_DEPOSIT = Column(DECIMAL(20, 4))
TOTAL_INTEREST_EXP = Column(DECIMAL(20, 4))
TOTAL_INTEREST_INCOME = Column(DECIMAL(20, 4))
TOTAL_LOAN = Column(DECIMAL(20, 4))
class ASHAREBEGUARANTEED(Base):
__tablename__ = 'ASHAREBEGUARANTEED'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_INFO_AMOUNT = Column(DECIMAL(20, 4), doc="金额")
S_INFO_COMP_NAME = Column(String(100), doc="担保公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="担保公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="担保公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHARECALENDAR(Base):
__tablename__ = 'ASHARECALENDAR'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_EXCHMARKET = Column(String(40), doc="交易所英文简称")
TRADE_DAYS = Column(String(8), doc="交易日")
class ASHARECAPITALIZATION(Base):
__tablename__ = 'ASHARECAPITALIZATION'
ANN_DT = Column(String(8), doc="公告日期")
CHANGE_DT = Column(String(8), doc="变动日期")
CHANGE_DT1 = Column(String(8), doc="变动日期1")
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
FLOAT_A_SHR = Column(DECIMAL(20, 4), doc="流通A股(万股)")
FLOAT_B_SHR = Column(DECIMAL(20, 4), doc="流通B股(万股)")
FLOAT_H_SHR = Column(DECIMAL(20, 4), doc="流通H股(万股)")
FLOAT_OVERSEAS_SHR = Column(DECIMAL(20, 4), doc="境外流通股(万股)")
FLOAT_SHR = Column(DECIMAL(20, 4), doc="流通股(万股)")
IS_VALID = Column(DECIMAL(5, 0), doc="是否有效")
NON_TRADABLE_SHR = Column(DECIMAL(20, 4), doc="非流通股")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
OTHER_RESTRICTED_SHR = Column(DECIMAL(20, 4), doc="其他限售股")
RESTRICTED_A_SHR = Column(DECIMAL(20, 4), doc="限售A股(万股)")
RESTRICTED_B_SHR = Column(DECIMAL(20, 4), doc="限售B股(万股)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_SHARE_CHANGEREASON = Column(String(30), doc="股本变动原因")
S_SHARE_H = Column(DECIMAL(20, 4), doc="香港上市股")
S_SHARE_NONTRADABLE = Column(DECIMAL(20, 4), doc="股改前非流通股")
S_SHARE_NTRD_DOMESINITOR = Column(DECIMAL(20, 4), doc="非流通股(境内法人股:境内发起人股)")
S_SHARE_NTRD_FUNDBAL = Column(DECIMAL(20, 4), doc="非流通股(境内法人股:基金持股)")
S_SHARE_NTRD_GENJURIS = Column(DECIMAL(20, 4), doc="非流通股(境内法人股:一般法人股)")
S_SHARE_NTRD_INSDEREMP = Column(DECIMAL(20, 4), doc="内部职工股(万股)")
S_SHARE_NTRD_IPOINIP = Column(DECIMAL(20, 4), doc="非流通股(自然人股)")
S_SHARE_NTRD_IPOJURIS = Column(DECIMAL(20, 4), doc="非流通股(境内法人股:募集法人股)")
S_SHARE_NTRD_NET = Column(DECIMAL(20, 4), doc="NET股(万股)")
S_SHARE_NTRD_NONLSTFRGN = Column(DECIMAL(20, 4), doc="非流通股(非上市外资股)")
S_SHARE_NTRD_PRFSHARE = Column(DECIMAL(20, 4), doc="优先股(万股)")
S_SHARE_NTRD_SNORMNGER = Column(DECIMAL(20, 4), doc="流通股(高管持股)")
S_SHARE_NTRD_STAQ = Column(DECIMAL(20, 4), doc="STAQ股(万股)")
S_SHARE_NTRD_STATE = Column(DECIMAL(20, 4), doc="非流通股(国家股)")
S_SHARE_NTRD_STATE_PCT = Column(DECIMAL(20, 4), doc="非流通股(国有股)")
S_SHARE_NTRD_STATJUR = Column(DECIMAL(20, 4), doc="非流通股(国有法人股)")
S_SHARE_NTRD_STRTINVESTOR = Column(DECIMAL(20, 4), doc="非流通股(境内法人股:战略投资者持股)")
S_SHARE_NTRD_SUBDOMESJUR = Column(DECIMAL(20, 4), doc="非流通股(境内法人股)")
S_SHARE_NTRD_TRFNSHARE = Column(DECIMAL(20, 4), doc="转配股(万股)")
S_SHARE_OTCA = Column(DECIMAL(20, 4), doc="三板A股")
S_SHARE_OTCB = Column(DECIMAL(20, 4), doc="三板B股")
S_SHARE_RTD_DOMESJUR = Column(DECIMAL(20, 4), doc="限售A股(其他内资持股:境内法人持股)")
S_SHARE_RTD_DOMESNP = Column(DECIMAL(20, 4), doc="限售A股(其他内资持股:境内自然人持股)")
S_SHARE_RTD_FRGNJUR = Column(DECIMAL(20, 4), doc="限售A股(境外法人持股)")
S_SHARE_RTD_FRGNNP = Column(DECIMAL(20, 4), doc="限售A股(境外自然人持股)")
S_SHARE_RTD_INST = Column(DECIMAL(20, 4), doc="限售A股(其他内资持股:机构配售股)")
S_SHARE_RTD_SENMANAGER = Column(DECIMAL(20, 4), doc="限售股份(高管持股)(万股)")
S_SHARE_RTD_STATE = Column(DECIMAL(20, 4), doc="限售A股(国家持股)")
S_SHARE_RTD_STATEJUR = Column(DECIMAL(20, 4), doc="限售A股(国有法人持股)")
S_SHARE_RTD_SUBFRGN = Column(DECIMAL(20, 4), doc="限售A股(外资持股)")
S_SHARE_RTD_SUBOTHERDOMES = Column(DECIMAL(20, 4), doc="限售A股(其他内资持股)")
S_SHARE_TOTALA = Column(DECIMAL(20, 4), doc="A股合计")
S_SHARE_TOTALB = Column(DECIMAL(20, 4), doc="B股合计")
S_SHARE_TOTALOTC = Column(DECIMAL(20, 4), doc="三板合计")
S_SHARE_TOTALRESTRICTED = Column(DECIMAL(20, 4), doc="限售股合计")
S_SHARE_TOTALTRADABLE = Column(DECIMAL(20, 4), doc="流通股合计")
TOT_SHR = Column(DECIMAL(20, 4), doc="总股本(万股)")
WIND_CODE = Column(String(40), doc="Wind代码")
class ASHARECAPITALOPERATION(Base):
__tablename__ = 'ASHARECAPITALOPERATION'
ANN_DT = Column(String(8))
CRNCY_CODE = Column(String(3))
OBJECT_ID = Column(String(100), primary_key=True)
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CAPITALOPERAT_COMPWINDCODE = Column(String(40))
S_CAPITALOPERATION_AMOUNT = Column(DECIMAL(20, 4))
S_CAPITALOPERATION_COMPANYNAME = Column(String(100))
S_CAPITALOPERATION_ENDDATE = Column(String(8))
S_CAPITALOPERATION_SHARE = Column(DECIMAL(20, 4))
S_INFO_WINDCODE = Column(String(40))
class ASHARECASHFLOW(Base):
__tablename__ = 'ASHARECASHFLOW'
ACTUAL_ANN_DT = Column(String(8), doc="实际公告日期")
AMORT_INTANG_ASSETS = Column(DECIMAL(20, 4), doc="无形资产摊销")
AMORT_LT_DEFERRED_EXP = Column(DECIMAL(20, 4), doc="长期待摊费用摊销")
ANN_DT = Column(String(8), doc="公告日期")
CASH_CASH_EQU_BEG_PERIOD = Column(DECIMAL(20, 4), doc="期初现金及现金等价物余额")
CASH_CASH_EQU_END_PERIOD = Column(DECIMAL(20, 4), doc="期末现金及现金等价物余额")
CASH_PAID_INVEST = Column(DECIMAL(20, 4), doc="投资支付的现金")
CASH_PAY_ACQ_CONST_FIOLTA = Column(DECIMAL(20, 4), doc="购建固定资产、无形资产和其他长期资产支付的现金")
CASH_PAY_BEH_EMPL = Column(DECIMAL(20, 4), doc="支付给职工以及为职工支付的现金")
CASH_PAY_CLAIMS_ORIG_INCO = Column(DECIMAL(20, 4), doc="支付原保险合同赔付款项的现金")
CASH_PAY_DIST_DPCP_INT_EXP = Column(DECIMAL(20, 4), doc="分配股利、利润或偿付利息支付的现金")
CASH_PAY_GOODS_PURCH_SERV_REC = Column(DECIMAL(20, 4), doc="购买商品、接受劳务支付的现金")
CASH_PREPAY_AMT_BORR = Column(DECIMAL(20, 4), doc="偿还债务支付的现金")
CASH_RECP_BORROW = Column(DECIMAL(20, 4), doc="取得借款收到的现金")
CASH_RECP_CAP_CONTRIB = Column(DECIMAL(20, 4), doc="吸收投资收到的现金")
CASH_RECP_DISP_WITHDRWL_INVEST = Column(DECIMAL(20, 4), doc="收回投资收到的现金")
CASH_RECP_PREM_ORIG_INCO = Column(DECIMAL(20, 4), doc="收到原保险合同保费取得的现金")
CASH_RECP_RETURN_INVEST = Column(DECIMAL(20, 4), doc="取得投资收益收到的现金")
CASH_RECP_SG_AND_RS = Column(DECIMAL(20, 4), doc="销售商品、提供劳务收到的现金")
COMM_INSUR_PLCY_PAID = Column(DECIMAL(20, 4), doc="支付保单红利的现金")
COMP_TYPE_CODE = Column(String(2), doc="公司类型代码")
CONV_CORP_BONDS_DUE_WITHIN_1Y = Column(DECIMAL(20, 4), doc="一年内到期的可转换公司债券")
CONV_DEBT_INTO_CAP = Column(DECIMAL(20, 4), doc="债务转为资本")
CREDIT_IMPAIRMENT_LOSS = Column(DECIMAL(20, 4), doc="信用减值损失")
CRNCY_CODE = Column(String(10), doc="货币代码")
DECR_DEFERRED_EXP = Column(DECIMAL(20, 4), doc="待摊费用减少")
DECR_DEFERRED_INC_TAX_ASSETS = Column(DECIMAL(20, 4), doc="递延所得税资产减少")
DECR_INVENTORIES = Column(DECIMAL(20, 4), doc="存货的减少")
DECR_OPER_PAYABLE = Column(DECIMAL(20, 4), doc="经营性应收项目的减少")
DEPR_FA_COGA_DPBA = Column(DECIMAL(20, 4), doc="固定资产折旧、油气资产折耗、生产性生物资产折旧")
EFF_FX_FLU_CASH = Column(DECIMAL(20, 4), doc="汇率变动对现金的影响")
END_BAL_CASH = Column(DECIMAL(20, 4), doc="现金的期末余额")
FA_FNC_LEASES = Column(DECIMAL(20, 4), doc="融资租入固定资产")
FIN_EXP = Column(DECIMAL(20, 4), doc="财务费用")
FREE_CASH_FLOW = Column(DECIMAL(20, 4), doc="企业自由现金流量(FCFF)")
HANDLING_CHRG_PAID = Column(DECIMAL(20, 4), doc="支付手续费的现金")
IM_NET_CASH_FLOWS_OPER_ACT = Column(DECIMAL(20, 4), doc="间接法-经营活动产生的现金流量净额")
IM_NET_INCR_CASH_CASH_EQU = Column(DECIMAL(20, 4), doc="间接法-现金及现金等价物净增加额")
INCL_CASH_REC_SAIMS = Column(DECIMAL(20, 4), doc="其中:子公司吸收少数股东投资收到的现金")
INCL_DVD_PROFIT_PAID_SC_MS = Column(DECIMAL(20, 4), doc="其中:子公司支付给少数股东的股利、利润")
INCR_ACC_EXP = Column(DECIMAL(20, 4), doc="预提费用增加")
INCR_DEFERRED_INC_TAX_LIAB = Column(DECIMAL(20, 4), doc="递延所得税负债增加")
INCR_OPER_PAYABLE = Column(DECIMAL(20, 4), doc="经营性应付项目的增加")
INVEST_LOSS = Column(DECIMAL(20, 4), doc="投资损失")
IS_CALCULATION = Column(DECIMAL(5, 0), doc="是否计算报表")
LESS_BEG_BAL_CASH = Column(DECIMAL(20, 4), doc="减:现金的期初余额")
LESS_BEG_BAL_CASH_EQU = Column(DECIMAL(20, 4), doc="减:现金等价物的期初余额")
LOSS_DISP_FIOLTA = Column(DECIMAL(20, 4), doc="处置固定、无形资产和其他长期资产的损失")
LOSS_FV_CHG = Column(DECIMAL(20, 4), doc="公允价值变动损失")
LOSS_SCR_FA = Column(DECIMAL(20, 4), doc="固定资产报废损失")
NET_CASH_FLOWS_FNC_ACT = Column(DECIMAL(20, 4), doc="筹资活动产生的现金流量净额")
NET_CASH_FLOWS_INV_ACT = Column(DECIMAL(20, 4), doc="投资活动产生的现金流量净额")
NET_CASH_FLOWS_OPER_ACT = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额")
NET_CASH_PAY_AQUIS_SOBU = Column(DECIMAL(20, 4), doc="取得子公司及其他营业单位支付的现金净额")
NET_CASH_RECEIVED_REINSU_BUS = Column(DECIMAL(20, 4), doc="收到再保业务现金净额")
NET_CASH_RECP_DISP_FIOLTA = Column(DECIMAL(20, 4), doc="处置固定资产、无形资产和其他长期资产收回的现金净额")
NET_CASH_RECP_DISP_SOBU = Column(DECIMAL(20, 4), doc="处置子公司及其他营业单位收到的现金净额")
NET_INCR_CASH_CASH_EQU = Column(DECIMAL(20, 4), doc="现金及现金等价物净增加额")
NET_INCR_CLIENTS_LOAN_ADV = Column(DECIMAL(20, 4), doc="客户贷款及垫款净增加额")
NET_INCR_DEP_CBOB = Column(DECIMAL(20, 4), doc="存放央行和同业款项净增加额")
NET_INCR_DEP_COB = Column(DECIMAL(20, 4), doc="客户存款和同业存放款项净增加额")
NET_INCR_DISP_FAAS = Column(DECIMAL(20, 4), doc="处置可供出售金融资产净增加额")
NET_INCR_DISP_TFA = Column(DECIMAL(20, 4), doc="处置交易性金融资产净增加额")
NET_INCR_FUND_BORR_OFI = Column(DECIMAL(20, 4), doc="向其他金融机构拆入资金净增加额")
NET_INCR_INSURED_DEP = Column(DECIMAL(20, 4), doc="保户储金净增加额")
NET_INCR_INT_HANDLING_CHRG = Column(DECIMAL(20, 4), doc="收取利息和手续费净增加额")
NET_INCR_LOANS_CENTRAL_BANK = Column(DECIMAL(20, 4), doc="向中央银行借款净增加额")
NET_INCR_LOANS_OTHER_BANK = Column(DECIMAL(20, 4), doc="拆入资金净增加额")
NET_INCR_PLEDGE_LOAN = Column(DECIMAL(20, 4), doc="质押贷款净增加额")
NET_INCR_REPURCH_BUS_FUND = Column(DECIMAL(20, 4), doc="回购业务资金净增加额")
NET_PROFIT = Column(DECIMAL(20, 4), doc="净利润")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
OTHER_CASH_PAY_RAL_FNC_ACT = Column(DECIMAL(20, 4), doc="支付其他与筹资活动有关的现金")
OTHER_CASH_PAY_RAL_INV_ACT = Column(DECIMAL(20, 4), doc="支付其他与投资活动有关的现金")
OTHER_CASH_PAY_RAL_OPER_ACT = Column(DECIMAL(20, 4), doc="支付其他与经营活动有关的现金")
OTHER_CASH_RECP_RAL_FNC_ACT = Column(DECIMAL(20, 4), doc="收到其他与筹资活动有关的现金")
OTHER_CASH_RECP_RAL_INV_ACT = Column(DECIMAL(20, 4), doc="收到其他与投资活动有关的现金")
OTHER_CASH_RECP_RAL_OPER_ACT = Column(DECIMAL(20, 4), doc="收到其他与经营活动有关的现金")
OTHER_IMPAIR_LOSS_ASSETS = Column(DECIMAL(20, 4), doc="其他资产减值损失")
OTHERS = Column(DECIMAL(20, 4), doc="其他")
PAY_ALL_TYP_TAX = Column(DECIMAL(20, 4), doc="支付的各项税费")
PLUS_END_BAL_CASH_EQU = Column(DECIMAL(20, 4), doc="加:现金等价物的期末余额")
PLUS_PROV_DEPR_ASSETS = Column(DECIMAL(20, 4), doc="加:资产减值准备")
PROC_ISSUE_BONDS = Column(DECIMAL(20, 4), doc="发行债券收到的现金")
RECP_TAX_RENDS = Column(DECIMAL(20, 4), doc="收到的税费返还")
REPORT_PERIOD = Column(String(8), doc="报告期")
RIGHT_USE_ASSETS_DEP = Column(DECIMAL(20, 4), doc="使用权资产折旧")
S_DISMANTLE_CAPITAL_ADD_NET = Column(DECIMAL(20, 4), doc="拆出资金净增加额")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SECURITIE_NETCASH_RECEIVED = Column(DECIMAL(20, 4), doc="代理买卖证券收到的现金净额(元)")
SPE_BAL_CASH_INFLOWS_FNC = Column(DECIMAL(20, 4), doc="筹资活动现金流入差额(特殊报表科目)")
SPE_BAL_CASH_INFLOWS_INV = Column(DECIMAL(20, 4), doc="投资活动现金流入差额(特殊报表科目)")
SPE_BAL_CASH_INFLOWS_OPER = Column(DECIMAL(20, 4), doc="经营活动现金流入差额(特殊报表科目)")
SPE_BAL_CASH_OUTFLOWS_FNC = Column(DECIMAL(20, 4), doc="筹资活动现金流出差额(特殊报表科目)")
SPE_BAL_CASH_OUTFLOWS_INV = Column(DECIMAL(20, 4), doc="投资活动现金流出差额(特殊报表科目)")
SPE_BAL_CASH_OUTFLOWS_OPER = Column(DECIMAL(20, 4), doc="经营活动现金流出差额(特殊报表科目)")
SPE_BAL_NETCASH_EQU_UNDIR = Column(DECIMAL(20, 4), doc="间接法-经营活动现金流量净额差额(特殊报表科目)")
SPE_BAL_NETCASH_INC = Column(DECIMAL(20, 4), doc="现金净增加额差额(特殊报表科目)")
SPE_BAL_NETCASH_INC_UNDIR = Column(DECIMAL(20, 4), doc="间接法-现金净增加额差额(特殊报表科目)")
STATEMENT_TYPE = Column(String(10), doc="报表类型")
STOT_CASH_INFLOWS_FNC_ACT = Column(DECIMAL(20, 4), doc="筹资活动现金流入小计")
STOT_CASH_INFLOWS_INV_ACT = Column(DECIMAL(20, 4), doc="投资活动现金流入小计")
STOT_CASH_INFLOWS_OPER_ACT = Column(DECIMAL(20, 4), doc="经营活动现金流入小计")
STOT_CASH_OUTFLOWS_FNC_ACT = Column(DECIMAL(20, 4), doc="筹资活动现金流出小计")
STOT_CASH_OUTFLOWS_INV_ACT = Column(DECIMAL(20, 4), doc="投资活动现金流出小计")
STOT_CASH_OUTFLOWS_OPER_ACT = Column(DECIMAL(20, 4), doc="经营活动现金流出小计")
TOT_BAL_CASH_INFLOWS_FNC = Column(DECIMAL(20, 4), doc="筹资活动现金流入差额(合计平衡项目)")
TOT_BAL_CASH_INFLOWS_INV = Column(DECIMAL(20, 4), doc="投资活动现金流入差额(合计平衡项目)")
TOT_BAL_CASH_INFLOWS_OPER = Column(DECIMAL(20, 4), doc="经营活动现金流入差额(合计平衡项目)")
TOT_BAL_CASH_OUTFLOWS_FNC = Column(DECIMAL(20, 4), doc="筹资活动现金流出差额(合计平衡项目)")
TOT_BAL_CASH_OUTFLOWS_INV = Column(DECIMAL(20, 4), doc="投资活动现金流出差额(合计平衡项目)")
TOT_BAL_CASH_OUTFLOWS_OPER = Column(DECIMAL(20, 4), doc="经营活动现金流出差额(合计平衡项目)")
TOT_BAL_NETCASH_EQU_UNDIR = Column(DECIMAL(20, 4), doc="间接法-经营活动现金流量净额差额(合计平衡项目)")
TOT_BAL_NETCASH_INC = Column(DECIMAL(20, 4), doc="现金净增加额差额(合计平衡项目)")
TOT_BAL_NETCASH_INC_UNDIR = Column(DECIMAL(20, 4), doc="间接法-现金净增加额差额(合计平衡项目)")
TOT_BAL_NETCASH_OUTFLOWS_FNC = Column(DECIMAL(20, 4), doc="筹资活动产生的现金流量净额差额(合计平衡项目)")
TOT_BAL_NETCASH_OUTFLOWS_INV = Column(DECIMAL(20, 4), doc="投资活动产生的现金流量净额差额(合计平衡项目)")
TOT_BAL_NETCASH_OUTFLOWS_OPER = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额差额(合计平衡项目)")
UNCONFIRMED_INVEST_LOSS = Column(DECIMAL(20, 4), doc="未确认投资损失")
WIND_CODE = Column(String(40), doc="Wind代码")
class ASHARECIRCULATINGHOLDERS(Base):
__tablename__ = 'ASHARECIRCULATINGHOLDERS'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_HOLDER_ENDDATE = Column(String(10), doc="报告期")
S_HOLDER_PCT = Column(DECIMAL(20, 4), doc="持股比例")
S_INFO_COMP_NAME = Column(String(100), doc="股东公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="股东公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="股东公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHARECOCAPITALOPERATION(Base):
__tablename__ = 'ASHARECOCAPITALOPERATION'
ANN_DATE = Column(String(8), doc="公告日期")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_EVENT_CATEGORYCODE = Column(DECIMAL(9, 0), doc="事件类型代码")
S_EVENT_DESCRIPTION = Column(String(1000), doc="事件说明")
S_EVENT_ID = Column(String(40), doc="事件ID")
S_FINANCING_AMOUNT = Column(DECIMAL(20, 4), doc="融资金额(人民币)")
S_FINANCING_AMOUNT_US = Column(DECIMAL(20, 4), doc="融资金额(美元)")
S_FINANCING_PROCESS = Column(DECIMAL(9, 0), doc="融资进程")
S_FINANCING_RATE = Column(DECIMAL(20, 4), doc="融资费率")
S_FINANCING_RT = Column(DECIMAL(20, 4), doc="融资利率")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_PB = Column(DECIMAL(20, 4), doc="市净率(PB)")
S_PE = Column(DECIMAL(20, 4), doc="市盈率(PE)")
S_PS = Column(DECIMAL(20, 4), doc="市销率(PS)")
S_VALUATION_AMOUNT = Column(DECIMAL(20, 4), doc="估值金额(人民币)")
S_VALUATION_AMOUNT_US = Column(DECIMAL(20, 4), doc="估值金额(美元)")
class ASHARECOMPANYHOLDSHARES(Base):
__tablename__ = 'ASHARECOMPANYHOLDSHARES'
ANN_DT = Column(String(8))
CAPITALCRNCY_CODE = Column(String(10))
ENDDATE = Column(String(8))
IS_CONSOLIDATE = Column(DECIMAL(5, 4))
NOTCONSOLIDATE_REASON = Column(String(500))
OBJECT_ID = Column(String(100), primary_key=True)
OPDATE = Column(DateTime)
OPERATIONCRNCY_CODE = Column(String(10))
OPMODE = Column(String(1))
RELATIONS_CODE = Column(String(40))
S_CAPITALOPERATION_AMOUNT = Column(DECIMAL(20, 4))
S_CAPITALOPERATION_COMAINBUS = Column(String(100))
S_CAPITALOPERATION_COMPANYID = Column(String(10))
S_CAPITALOPERATION_COMPANYNAME = Column(String(100))
S_CAPITALOPERATION_COREGCAP = Column(DECIMAL(20, 4))
S_CAPITALOPERATION_PCT = Column(DECIMAL(20, 4))
S_INFO_WINDCODE = Column(String(40))
VOTING_RIGHTS = Column(DECIMAL(20, 4))
class ASHARECONCEPTUALPLATE(Base):
__tablename__ = 'ASHARECONCEPTUALPLATE'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMP_NAME = Column(String(100), doc="公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHARECREDITORRIGHTS(Base):
__tablename__ = 'ASHARECREDITORRIGHTS'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_INFO_AMOUNT = Column(DECIMAL(20, 4), doc="金额")
S_INFO_COMP_NAME = Column(String(100), doc="债务公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="债务公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="债务公司公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHARECUSTOMER(Base):
__tablename__ = 'ASHARECUSTOMER'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_INFO_AMOUNT = Column(DECIMAL(20, 4), doc="金额")
S_INFO_COMP_NAME = Column(String(100), doc="下游公司名称")
S_INFO_COMP_SNAME = Column(String(100))
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="下游公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
S_INFO_DISCLOSER = Column(String(100), doc="披露公司ID")
class ASHAREDEFENDANT(Base):
__tablename__ = 'ASHAREDEFENDANT'
ANN_DATE = Column(String(8), doc="公告日期")
LITIGATION_EVENTS_ID = Column(String(40), doc="诉讼事件ID")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_CASE_TYPE = Column(String(10), doc="案件类型")
S_INFO_COMP_NAME = Column(String(100), doc="诉讼公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="诉讼公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="诉讼公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREDESCRIPTION(Base):
__tablename__ = 'ASHAREDESCRIPTION'
CRNCY_CODE = Column(String(10), doc="货币代码")
IS_SHSC = Column(DECIMAL(5, 0), doc="是否在沪股通或深港通范围内")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_CODE = Column(String(40), doc="交易代码")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPNAME = Column(String(100), doc="公司中文名称")
S_INFO_COMPNAMEENG = Column(String(100), doc="公司英文名称")
S_INFO_DELISTDATE = Column(String(8), doc="退市日期")
S_INFO_EXCHMARKET = Column(String(40), doc="交易所")
S_INFO_ISINCODE = Column(String(40), doc="ISIN代码")
S_INFO_LISTBOARD = Column(String(10), doc="上市板类型")
S_INFO_LISTBOARDNAME = Column(String(10), doc="上市板")
S_INFO_LISTDATE = Column(String(8), doc="上市日期")
S_INFO_NAME = Column(String(50), doc="证券简称")
S_INFO_PINYIN = Column(String(40), doc="简称拼音")
S_INFO_SEDOLCODE = Column(String(40))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREDIRECTOR(Base):
__tablename__ = 'ASHAREDIRECTOR'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
S_INFO_MANAGER_NAME = Column(String(80), doc="姓名")
S_INFO_MANAGER_POST = Column(String(40), doc="职务")
S_INFO_MANAGER_STARTDATE = Column(String(8), doc="任职日期")
S_INFO_MANID = Column(String(10), doc="人物id")
class ASHAREDIVIDEND(Base):
__tablename__ = 'ASHAREDIVIDEND'
ANN_DT = Column(String(8), doc="最新公告日期")
CASH_DVD_PER_SH_AFTER_TAX = Column(DECIMAL(24, 8), doc="每股派息(税后)(元)")
CASH_DVD_PER_SH_PRE_TAX = Column(DECIMAL(24, 8), doc="每股派息(税前)(元)")
CRNCY_CODE = Column(String(10), doc="货币代码")
DVD_ANN_DT = Column(String(8), doc="分红实施公告日")
DVD_PAYOUT_DT = Column(String(8), doc="派息日")
EQY_RECORD_DT = Column(String(8), doc="股权登记日")
EX_DT = Column(String(8), doc="除权除息日")
IS_CHANGED = Column(DECIMAL(5, 0), doc="方案是否变更")
IS_TRANSFER = Column(DECIMAL(1, 0), doc="是否不分转")
LISTING_DT_OF_DVD_SHR = Column(String(8), doc="红股上市日")
MEMO = Column(String(200), doc="备注")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="分红年度")
S_DIV_BASEDATE = Column(String(8), doc="基准日期")
S_DIV_BASESHARE = Column(DECIMAL(20, 4), doc="基准股本(万股)")
S_DIV_BONUSRATE = Column(DECIMAL(20, 8), doc="每股送股比例")
S_DIV_CHANGE = Column(String(500), doc="方案变更说明")
S_DIV_CONVERSEDRATE = Column(DECIMAL(20, 8), doc="每股转增比例")
S_DIV_OBJECT = Column(String(100), doc="分红对象")
S_DIV_PREANNDT = Column(String(8), doc="预案预披露公告日")
S_DIV_PRELANDATE = Column(String(8), doc="预案公告日")
S_DIV_PROGRESS = Column(String(10), doc="方案进度")
S_DIV_SMTGDATE = Column(String(8), doc="股东大会公告日")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
STK_DVD_PER_SH = Column(DECIMAL(20, 8), doc="每股送转")
WIND_CODE = Column(String(40), doc="Wind代码")
class ASHAREEARNINGEST(Base):
__tablename__ = 'ASHAREEARNINGEST'
ANALYST_ID = Column(String(200), doc="分析师id")
ANALYST_NAME = Column(String(20), doc="分析师名称")
ANN_DT = Column(String(8), doc="公告日期(内部)")
COLLECT_TIME = Column(DateTime, doc="收录时间")
EST_BASE_CAP = Column(DECIMAL(20, 4), doc="预测基准股本(万股)")
EST_BASE_CAP_DIF_CODE = Column(DECIMAL(9, 0), doc="预测基准股本差异原因代码")
EST_DT = Column(String(8), doc="预测日期")
EST_EBIT = Column(DECIMAL(20, 4), doc="预测息税前利润(万元)")
EST_EBITDA = Column(DECIMAL(20, 4), doc="预测息税折旧摊销前利润(万元)")
EST_EPS_DILUTED = Column(DECIMAL(20, 4), doc="预测每股收益(摊薄)(元)")
EST_MAIN_BUS_INC = Column(DECIMAL(20, 4), doc="预测主营业务收入(万元)")
EST_NET_PROFIT = Column(DECIMAL(20, 4), doc="预测净利润(万元)")
FIRST_OPTIME = Column(DateTime, doc="首次入库时间")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_NAME = Column(String(1024), doc="报告标题")
REPORT_SUMMARY = Column(LONGTEXT, doc="报告摘要")
REPORT_TYPECODE = Column(DECIMAL(9, 0), doc="报告类型")
REPORTING_PERIOD = Column(String(8), doc="预测报告期")
RESEARCH_INST_NAME = Column(String(20), doc="研究机构名称")
S_EST_BPS = Column(DECIMAL(20, 4), doc="预测每股净资产")
S_EST_CPS = Column(DECIMAL(20, 4), doc="预测每股现金流")
S_EST_DIVIDENDYIELD = Column(DECIMAL(20, 4), doc="预测股息率")
S_EST_DPS = Column(DECIMAL(20, 4), doc="预测每股股利")
S_EST_EBT = Column(DECIMAL(20, 4), doc="预测利润总额(万元)")
S_EST_ENDDATE = Column(String(8), doc="预测有效截止")
S_EST_EPSBASIC = Column(DECIMAL(20, 4), doc="预测每股收益(基本)(元)")
S_EST_EPSCAL = Column(DECIMAL(20, 4), doc="预测每股收益(换算)")
S_EST_EPSDILUTED = Column(DECIMAL(20, 4), doc="预测每股收益(稀释)(元)")
S_EST_EPSRATE = Column(DECIMAL(20, 4), doc="预测EPS调整比率")
S_EST_EVEBITDA = Column(DECIMAL(20, 4), doc="预测EV/EBITDA")
S_EST_NPCAL = Column(DECIMAL(20, 4), doc="预测净利润(换算)(万元)")
S_EST_NPRATE = Column(DECIMAL(20, 4), doc="预测净利润调整比率")
S_EST_OC = Column(DECIMAL(20, 4), doc="预测营业成本及附加(万元)")
S_EST_OPE = Column(DECIMAL(10, 4), doc="预测主营业务利润率")
S_EST_OPROFIT = Column(DECIMAL(20, 4), doc="预测营业利润(万元)")
S_EST_PB = Column(DECIMAL(20, 4), doc="预测市净率")
S_EST_PE = Column(DECIMAL(20, 4), doc="预测市盈率")
S_EST_ROA = Column(DECIMAL(20, 4), doc="预测总资产收益率")
S_EST_ROE = Column(DECIMAL(20, 4), doc="预测净资产收益率")
S_EST_VALUE_CALCULATION = Column(DECIMAL(5, 0), doc="综合值计算标记")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
WIND_CODE = Column(String(40), doc="Wind代码")
class ASHAREEODPRICES(Base):
__tablename__ = 'ASHAREEODPRICES'
CRNCY_CODE = Column(String(10), doc="货币代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_DQ_ADJCLOSE = Column(DECIMAL(20, 4), doc="复权收盘价(元)")
S_DQ_ADJFACTOR = Column(DECIMAL(20, 6), doc="复权因子")
S_DQ_ADJHIGH = Column(DECIMAL(20, 4), doc="复权最高价(元)")
S_DQ_ADJLOW = Column(DECIMAL(20, 4), doc="复权最低价(元)")
S_DQ_ADJOPEN = Column(DECIMAL(20, 4), doc="复权开盘价(元)")
S_DQ_ADJPRECLOSE = Column(DECIMAL(20, 4), doc="复权昨收盘价(元)")
S_DQ_AMOUNT = Column(DECIMAL(20, 4), doc="成交金额(千元)")
S_DQ_AVGPRICE = Column(DECIMAL(20, 4), doc="均价(VWAP)")
S_DQ_CHANGE = Column(DECIMAL(20, 4), doc="涨跌(元)")
S_DQ_CLOSE = Column(DECIMAL(20, 4), doc="收盘价(元)")
S_DQ_HIGH = Column(DECIMAL(20, 4), doc="最高价(元)")
S_DQ_LIMIT = Column(DECIMAL(20, 4), doc="涨停价(元)")
S_DQ_LOW = Column(DECIMAL(20, 4), doc="最低价(元)")
S_DQ_OPEN = Column(DECIMAL(20, 4), doc="开盘价(元)")
S_DQ_PCTCHANGE = Column(DECIMAL(20, 4), doc="涨跌幅(%)")
S_DQ_PRECLOSE = Column(DECIMAL(20, 4), doc="昨收盘价(元)")
S_DQ_STOPPING = Column(DECIMAL(20, 4), doc="跌停价(元)")
S_DQ_TRADESTATUS = Column(String(10), doc="交易状态")
S_DQ_TRADESTATUSCODE = Column(DECIMAL(5, 0), doc="交易状态代码")
S_DQ_VOLUME = Column(DECIMAL(20, 4), doc="成交量(手)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TRADE_DT = Column(String(8), doc="交易日期")
class ASHAREEQUFROINFO(Base):
__tablename__ = 'ASHAREEQUFROINFO'
ANN_DATE = Column(String(8), doc="公告日期")
DISFROZEN_TIME = Column(String(8), doc="解冻日期")
FROZEN_INSTITUTION = Column(String(100), doc="执行冻结机构")
IS_DISFROZEN = Column(DECIMAL(1, 0), doc="是否解冻")
IS_TURN_FROZEN = Column(DECIMAL(1, 0), doc="是否轮候冻结")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_FRO_BGDATE = Column(String(8), doc="冻结起始时间")
S_FRO_ENDDATE = Column(String(8), doc="冻结结束时间")
S_FRO_SHARES = Column(DECIMAL(20, 4), doc="冻结数量(万股)")
S_FRO_SHR_RATIO = Column(DECIMAL(20, 4), doc="本次冻结股数占公司总股本比例")
S_HOLDER_ID = Column(String(10), doc="股东ID")
S_HOLDER_NAME = Column(String(100), doc="股东名称")
S_HOLDER_TYPE_CODE = Column(DECIMAL(9, 0), doc="股东类型代码")
S_INFO_COMPCODE = Column(String(10), doc="公司id")
S_TOTAL_HOLDING_SHR = Column(DECIMAL(20, 4), doc="持股总数(万股)")
S_TOTAL_HOLDING_SHR_RATIO = Column(DECIMAL(20, 4), doc="持股总数占公司总股本比例")
SHR_CATEGORY_CODE = Column(DECIMAL(9, 0), doc="股份性质类别代码")
class ASHAREEQUITYPLEDGEINFO(Base):
__tablename__ = 'ASHAREEQUITYPLEDGEINFO'
ANN_DT = Column(String(8), doc="公告日期")
IS_DISCHARGE = Column(DECIMAL(1, 0), doc="是否解押")
IS_EQUITY_PLEDGE_REPO = Column(DECIMAL(1, 0), doc="是否股权质押回购")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_DISCHARGE_DATE = Column(String(8), doc="解押日期")
S_HOLDER_ID = Column(String(10), doc="股东ID")
S_HOLDER_NAME = Column(String(200), doc="股东名称")
S_HOLDER_TYPE_CODE = Column(DECIMAL(9, 0), doc="股东类型代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_PLEDGE_BGDATE = Column(String(8), doc="质押起始时间")
S_PLEDGE_ENDDATE = Column(String(8), doc="质押结束时间")
S_PLEDGE_SHARES = Column(DECIMAL(20, 4), doc="质押数量(万股)")
S_PLEDGE_SHR_RATIO = Column(DECIMAL(20, 4), doc="本次质押股数占公司总股本比例")
S_PLEDGOR = Column(String(200), doc="质押方")
S_PLEDGOR_ID = Column(String(10), doc="质押方ID")
S_PLEDGOR_TYPE_CODE = Column(DECIMAL(9, 0), doc="质押方类型代码")
S_REMARK = Column(String(1000), doc="备注")
S_SHR_CATEGORY_CODE = Column(DECIMAL(9, 0), doc="股份性质类别代码")
S_TOTAL_HOLDING_SHR = Column(DECIMAL(20, 4), doc="持股总数")
S_TOTAL_HOLDING_SHR_RATIO = Column(DECIMAL(20, 4), doc="持股总数占公司总股本比例")
S_TOTAL_PLEDGE_SHR = Column(DECIMAL(20, 4), doc="累计质押股数")
class ASHAREEQUITYRELATIONSHIPS(Base):
__tablename__ = 'ASHAREEQUITYRELATIONSHIPS'
ACTUALCONTROLLER_INTRO = Column(String(1000), doc="实际控制人简介")
ACTUALCONTROLLER_IS_ANN = Column(DECIMAL(5, 0), doc="股东是否为公布实际控制人")
ACTUALCONTROLLER_TYPE = Column(String(80), doc="实际控制人类型")
ANN_DT = Column(String(8), doc="公告日期")
ENDDATE = Column(String(8), doc="截止日期")
IS_ACTUALCONTROLLER = Column(DECIMAL(5, 0), doc="股东是否为实际控制人")
IS_CONTROLLING_SHAREHOLDERS = Column(DECIMAL(1, 0), doc="股东是否为公布控股股东")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RELATION_TYPE = Column(String(40), doc="公司与披露方关系")
S_HOLDER_CODE = Column(String(10), doc="股东ID")
S_HOLDER_NAME = Column(String(200), doc="股东名称")
S_HOLDER_PCT = Column(DECIMAL(20, 4), doc="持股比例(%)")
S_HOLDER_TYPE = Column(DECIMAL(5, 0), doc="股东类型")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_COMPNAME = Column(String(200), doc="公司名称")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREESOPDESCRIPTION(Base):
__tablename__ = 'ASHAREESOPDESCRIPTION'
ACT_CAP_RATIO = Column(DECIMAL(20, 4), doc="实际占公司总股本比例")
ACT_FUNDSIZE = Column(DECIMAL(20, 4), doc="实际资金规模")
ACT_SHARESNO = Column(DECIMAL(20, 4), doc="实际股票数量")
ACT_SHARESPRICE = Column(DECIMAL(20, 4), doc="实际股票价格")
ANN_DATE_IMPLEMENTATION = Column(String(8), doc="实施公告日")
ANN_DATE_NEW = Column(String(8), doc="最新公告日")
BM_PREPRO_ANN_DT = Column(String(8), doc="董事会预案公告日")
CAPITAL_RESOURCE_CODE = Column(DECIMAL(9, 0), doc="资金来源代码")
CORR_PRONAME = Column(String(100), doc="持股计划对应产品名称")
DURATION = Column(DECIMAL(20, 0), doc="存续期(月)")
EMPL_SUBS_AMT = Column(DECIMAL(20, 4), doc="员工认购金额(万)")
EMPL_SUBS_PROPORTION = Column(DECIMAL(20, 4), doc="员工认购比例(%)")
ESTIMATED_PRICE = Column(DECIMAL(20, 4), doc="标的股票预估价格")
ESTIMATED_VOLUMN = Column(DECIMAL(20, 4), doc="预计股票数量(万)")
EVENT_ID = Column(String(40), doc="事件ID")
INITIAL_CAPITAL = Column(DECIMAL(20, 4), doc="初始资金规模(万元)")
INITIAL_LEVERAGE = Column(DECIMAL(20, 4), doc="初始杠杆")
IS_SELF_MANAGE = Column(DECIMAL(1, 0), doc="是否自行管理")
LOCK_START_DATE = Column(String(8), doc="锁定起始日")
LOCKUP_PERIOD_M = Column(DECIMAL(20, 0), doc="锁定期限")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PROGRESS_CODE = Column(DECIMAL(9, 0), doc="方案进度代码")
RATIO_OWNFUNDS = Column(DECIMAL(20, 4), doc="员工自有资金占比")
RATIO_TO_TOTALSHARES = Column(DECIMAL(20, 4), doc="预计占公司总股本比例(%)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SENMNGR_SUBS_AMT = Column(DECIMAL(20, 4), doc="高管认购金额(万元)")
SENMNGR_SUBS_NO = Column(DECIMAL(20, 0), doc="高管认购人数")
SENMNGR_SUBS_PROPORTION = Column(DECIMAL(20, 4), doc="高管认购比例(%)")
SHAREHOLD_FINDT = Column(String(8), doc="持股完成日")
SHARES_RESOURCE_CODE = Column(DECIMAL(9, 0), doc="股票来源代码")
SHOLDER_MEETING_ANN_DT = Column(String(8), doc="股东大会公告日")
SHOLDERS_LOAN = Column(DECIMAL(20, 4), doc="股东借款金额")
SHOLDERS_LOANRATIO = Column(DECIMAL(20, 4), doc="股东借款比例")
SHOLDERS_NO = Column(DECIMAL(20, 0), doc="持有人数")
SHOLDERS_PROPORTION = Column(DECIMAL(20, 4), doc="持有人占公司员工比例")
class ASHAREESOPTRADINGINFO(Base):
__tablename__ = 'ASHAREESOPTRADINGINFO'
ANN_DT = Column(String(8), doc="公告日期")
END_DT = Column(String(8), doc="截止日期")
ESOP_WINDCODE = Column(String(40), doc="员工持股计划证券ID")
EVENT_ID = Column(String(40), doc="事件ID")
LOCKUP_PERIOD = Column(DECIMAL(20, 0), doc="锁定期限")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RATIO_TO_TOTALSHARES = Column(DECIMAL(20, 4), doc="占公司总股本比例")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TRADE_AVG_PRICE = Column(DECIMAL(20, 4), doc="成交均价")
TRADING_VOLUME = Column(DECIMAL(20, 4), doc="成交数量")
class ASHAREFINANCIALDERIVATIVE(Base):
__tablename__ = 'ASHAREFINANCIALDERIVATIVE'
ADMINEXPENSETOGR = Column(DECIMAL(20, 4), doc="行政(管理)费用/营业总收入(%)")
ARTURN = Column(DECIMAL(20, 4), doc="应收账款周转率(次)")
ARTURNDAYS = Column(DECIMAL(20, 4), doc="应收账款周转天数(天)")
ASSETSTOEQUITY = Column(DECIMAL(20, 4), doc="权益乘数")
ASSETSTURN = Column(DECIMAL(20, 4), doc="总资产周转率(次)")
BEGINDATE = Column(String(8), doc="起始日期")
BPS = Column(DECIMAL(20, 4), doc="每股净资产(元)")
CAPITALIZEDTODA = Column(DECIMAL(20, 4), doc="资本支出/折旧和摊销")
CASHRATIO = Column(DECIMAL(20, 4), doc="保守速动比率")
CASHTOLIQDEBT = Column(DECIMAL(20, 4), doc="货币资金/流动负债")
CASHTOLIQDEBTWITHINTEREST = Column(DECIMAL(20, 4), doc="货币资金/带息流动负债")
CATOASSETS = Column(DECIMAL(20, 4), doc="流动资产/总资产(%)")
CATURN = Column(DECIMAL(20, 4), doc="流动资产周转率(次)")
CFPS = Column(DECIMAL(20, 4), doc="每股现金流量净额(元)")
COGSTOSALES = Column(DECIMAL(20, 4), doc="销售成本率(%)")
CONTINUED_NET_PROFIT = Column(DECIMAL(20, 4), doc="持续经营净利润/除税后利润(%)")
CRNCY_CODE = Column(String(10), doc="货币代码")
CURRENT1 = Column(DECIMAL(20, 4), doc="流动比率")
CURRENTDEBTTODEBT = Column(DECIMAL(20, 4), doc="流动负债/负债合计(%)")
DEBTTOASSETS = Column(DECIMAL(20, 4), doc="资产负债率(%)")
DEBTTOEQUITY = Column(DECIMAL(20, 4), doc="产权比率")
DUPONT_ASSETSTOEQUITY = Column(DECIMAL(20, 4), doc="权益乘数(用于杜邦分析)")
EBIT = Column(DECIMAL(20, 4), doc="息税前利润(元)")
EBITDA = Column(DECIMAL(20, 4), doc="息税折旧摊销前利润(元)")
EBITDATODEBT = Column(DECIMAL(20, 4), doc="息税折旧摊销前利润/负债合计")
EBITPS = Column(DECIMAL(20, 4), doc="每股息税前利润(元)")
EBITTOGR = Column(DECIMAL(20, 4), doc="息税前利润/营业总收入(%)")
EBITTOINTEREST = Column(DECIMAL(20, 4), doc="已获利息倍数(EBIT/利息费用)")
ENDDATE = Column(String(8), doc="截止日期")
EPS_BASIC = Column(DECIMAL(20, 4), doc="基本每股收益(元)")
EPS_DILUTED = Column(DECIMAL(20, 4), doc="稀释每股收益(元)")
EPS_DILUTED2 = Column(DECIMAL(20, 4), doc="每股收益(期末摊薄)(元)")
EPS_DILUTED3 = Column(DECIMAL(20, 4), doc="每股收益(扣除/期末股本摊薄)(元)")
EQUITYTODEBT = Column(DECIMAL(20, 4), doc="归属于母公司的股东权益/负债合计")
EQUITYTOINTERESTDEBT = Column(DECIMAL(20, 4), doc="归属于母公司的股东权益/带息债务")
EQUITYTOTOTALCAPITAL = Column(DECIMAL(20, 4), doc="归属于母公司的股东权益/全部投入资本(%)")
EXINTERESTDEBT_CURRENT = Column(DECIMAL(20, 4), doc="无息流动负债(元)")
EXINTERESTDEBT_NONCURRENT = Column(DECIMAL(20, 4), doc="无息非流动负债(元)")
FATURN = Column(DECIMAL(20, 4), doc="固定资产周转率(次)")
FCFE = Column(DECIMAL(20, 4), doc="股权自由现金流量(FCFE)(元)")
FCFEPS = Column(DECIMAL(20, 4), doc="每股股东自由现金流量(元)")
FCFF = Column(DECIMAL(20, 4), doc="企业自由现金流量(FCFF)(元)")
FCFFPS = Column(DECIMAL(20, 4), doc="每股企业自由现金流量(元)")
FINAEXPENSETOGR = Column(DECIMAL(20, 4), doc="财务费用/营业总收入(%)")
FISCALYEAR = Column(String(8), doc="会计年度(Wind判定)")
GCTOGR = Column(DECIMAL(20, 4), doc="营业总成本/营业总收入(%)")
GROSSPROFITMARGIN = Column(DECIMAL(20, 4), doc="销售毛利率(%)")
GRPS = Column(DECIMAL(20, 4), doc="每股营业总收入(元)")
INTDEBTTOTOTALCAP = Column(DECIMAL(20, 4), doc="带息债务/全部投入资本(%)")
INTERESTDEBT = Column(DECIMAL(20, 4), doc="带息债务(元)")
INTERVAL_LENGTH = Column(DECIMAL(20, 4), doc="区间长度(月)")
INVESTCAPITAL = Column(DECIMAL(20, 4), doc="全部投入资本(元)")
INVESTINCOME = Column(DECIMAL(20, 4), doc="价值变动净收益(元)")
INVESTINCOMETOEBT = Column(DECIMAL(20, 4), doc="价值变动净收益/除税前利润(%)")
INVTURN = Column(DECIMAL(20, 4), doc="存货周转率(次)")
INVTURNDAYS = Column(DECIMAL(20, 4), doc="存货周转天数(天)")
LONGDEBTODEBT = Column(DECIMAL(20, 4), doc="非流动负债/负债合计(%)")
LONGDEBTTOWORKINGCAPITAL = Column(DECIMAL(20, 4), doc="长期债务与营运资金比率")
NCATOASSETS = Column(DECIMAL(20, 4), doc="非流动资产/总资产(%)")
NET_PROFIT5 = Column(DECIMAL(20, 4), doc="归属母公司的净利润/净利润(%)")
NET_TOTAL_PROFIT = Column(DECIMAL(20, 4), doc="净利润/利润总额(%)")
NETDEBT = Column(DECIMAL(20, 4), doc="净债务(元)")
NETPROFITMARGIN = Column(DECIMAL(20, 4), doc="销售净利率(%)")
NETWORKINGCAPITAL = Column(DECIMAL(20, 4), doc="营运流动资本(元)")
NONNETOPTOTAXPROFIT = Column(DECIMAL(20, 4), doc="非持续经营净利润/除税后利润(%)")
NONOPERATEPROFITTOEBT = Column(DECIMAL(20, 4), doc="营业外收支净额/除税前利润(%)")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OCFPS = Column(DECIMAL(20, 4), doc="每股经营活动产生的现金流量净额(元)")
OCFTODEBT = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/负债合计")
OCFTOINTERESTDEBT = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/带息债务")
OCFTOOR = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/营业收入(%)")
OCFTOOR1 = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/经营活动净收益")
OCFTOPROFIT = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/营业利润(含)(%)")
OCFTOSHORTDEBT = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/流动负债")
OPDATE = Column(DateTime)
OPERATEINCOME = Column(DECIMAL(20, 4), doc="经营活动净收益(元)")
OPERATEINCOMETOEBT = Column(DECIMAL(20, 4), doc="经营活动净收益/除税前利润(%)")
OPMODE = Column(String(1))
OPPROFIT1 = Column(DECIMAL(20, 4), doc="营业利润(含价值变动损益)(元)")
OPTODEBT = Column(DECIMAL(20, 4), doc="营业利润/负债合计")
OPTOGR = Column(DECIMAL(20, 4), doc="营业利润(含价值变动损益)/营业总收入(%)")
OPTOLIQDEBT = Column(DECIMAL(20, 4), doc="营业利润/流动负债")
ORPS = Column(DECIMAL(20, 4), doc="每股营业收入(元)")
PROFITTOGR = Column(DECIMAL(20, 4), doc="净利润/营业总收入(%)")
PROFITTOOP = Column(DECIMAL(20, 4), doc="利润总额/营业总收入(%)")
QUICK = Column(DECIMAL(20, 4), doc="速动比率")
REPORT_TYPE = Column(String(20), doc="报告类型")
RETAINEDEARNINGS = Column(DECIMAL(20, 4), doc="留存收益(元)")
RETAINEDPS = Column(DECIMAL(20, 4), doc="每股留存收益(元)")
ROA = Column(DECIMAL(20, 4), doc="总资产净利润(平均)(%)")
ROA2 = Column(DECIMAL(20, 4), doc="总资产报酬率(平均)(%)")
ROA2_YEARLY = Column(DECIMAL(20, 4), doc="年化总资产报酬率(%)")
ROA_YEARLY = Column(DECIMAL(20, 4), doc="年化总资产净利率(%)")
ROE = Column(DECIMAL(20, 4), doc="净资产收益率(平均)(%)")
ROE_DEDUCTED = Column(DECIMAL(20, 4), doc="净资产收益率(扣除平均)(%)")
ROE_YEARLY = Column(DECIMAL(20, 4), doc="年化净资产收益率(%)")
ROIC = Column(DECIMAL(20, 4), doc="投入资本回报率(平均)(%)")
ROIC_YEARLY = Column(DECIMAL(20, 4), doc="年化投入资本回报率")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_STM_IS = Column(DECIMAL(20, 4), doc="折旧与摊销(元)")
SALEEXPENSETOGR = Column(DECIMAL(20, 4), doc="销售费用/营业总收入(%)")
STATEMENT_TYPE = Column(String(40), doc="报表类型")
SURPLUSCAPITALPS = Column(DECIMAL(20, 4), doc="每股资本公积(元)")
TANGASSETTOINTDEBT = Column(DECIMAL(20, 4), doc="有形资产/带息债务")
TANGIBLEASSET = Column(DECIMAL(20, 4), doc="有形资产(元)")
TANGIBLEASSETSTOASSETS = Column(DECIMAL(20, 4), doc="有形资产/总资产(%)")
TANGIBLEASSETTODEBT = Column(DECIMAL(20, 4), doc="有形资产/负债合计")
TANGIBLEASSETTONETDEBT1 = Column(DECIMAL(20, 4), doc="有形资产/净债务")
TANGIBLEASSETTONETDEBT2 = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/净债务")
TAXTOEBT = Column(DECIMAL(20, 4), doc="所得税/利润总额(%)")
TOT_SHR = Column(DECIMAL(20, 4), doc="期末总股本(股)")
TOTAL_PROFIT_EBIT = Column(DECIMAL(20, 4), doc="利润总额/EBIT(%)")
TURNDAYS = Column(DECIMAL(20, 4), doc="营业周期(天)")
WORKINGCAPITAL = Column(DECIMAL(20, 4), doc="营运资金(元)")
YOY_OR = Column(DECIMAL(20, 4), doc="营业收入同比增长率(%)")
YOY_TR = Column(DECIMAL(20, 4), doc="营业总收入同比增长率(%)")
YOYASSETS = Column(DECIMAL(20, 4), doc="资产总计同比增长率(%)")
YOYBPS = Column(DECIMAL(20, 4), doc="每股净资产同比增长率(%)")
YOYEBT = Column(DECIMAL(20, 4), doc="利润总额同比增长率(%)")
YOYEPS_BASIC = Column(DECIMAL(20, 4), doc="基本每股收益同比增长率(%)")
YOYEPS_DILUTED = Column(DECIMAL(20, 4), doc="稀释每股收益同比增长率(%)")
YOYEQUITY = Column(DECIMAL(20, 4), doc="归属母公司的股东权益同比增长率(%)")
YOYNETPROFIT = Column(DECIMAL(20, 4), doc="归属母公司股东的净利润同比增长率(%)")
YOYNETPROFIT_DEDUCTED = Column(DECIMAL(20, 4), doc="归属母公司股东的净利润-扣除非经常损益同比增长率(%)")
YOYOCF = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额同比增长率(%)")
YOYOCFPS = Column(DECIMAL(20, 4), doc="每股经营活动产生的现金流量净额同比增长率(%)")
YOYOP = Column(DECIMAL(20, 4), doc="营业利润同比增长率(含)(%)")
YOYROE = Column(DECIMAL(20, 4), doc="净资产收益率(平均)?同比增长率(%)")
class ASHAREFINANCIALINDICATOR(Base):
__tablename__ = 'ASHAREFINANCIALINDICATOR'
ANN_DT = Column(String(8), doc="公告日期")
CRNCY_CODE = Column(String(10), doc="货币代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RD_EXPENSE = Column(DECIMAL(20, 4), doc="研发费用")
REPORT_PERIOD = Column(String(8), doc="报告期")
S_FA_ADMINEXPENSETOGR = Column(DECIMAL(20, 4), doc="管理费用/营业总收入")
S_FA_ARTURN = Column(DECIMAL(20, 4), doc="应收账款周转率")
S_FA_ARTURNDAYS = Column(DECIMAL(20, 4), doc="应收账款周转天数")
S_FA_ASSETSTOEQUITY = Column(DECIMAL(20, 4), doc="权益乘数")
S_FA_ASSETSTURN = Column(DECIMAL(20, 4), doc="总资产周转率")
S_FA_BPS = Column(DECIMAL(20, 4), doc="每股净资产")
S_FA_CAPITALIZEDTODA = Column(DECIMAL(20, 4), doc="资本支出/折旧和摊销")
S_FA_CASHRATIO = Column(DECIMAL(20, 4), doc="保守速动比率")
S_FA_CASHTOLIQDEBT = Column(DECIMAL(20, 4), doc="货币资金/流动负债")
S_FA_CASHTOLIQDEBTWITHINTEREST = Column(DECIMAL(20, 4), doc="货币资金/带息流动负债")
S_FA_CATOASSETS = Column(DECIMAL(20, 4), doc="流动资产/总资产")
S_FA_CATURN = Column(DECIMAL(20, 4), doc="流动资产周转率")
S_FA_CFPS = Column(DECIMAL(20, 4), doc="每股现金流量净额")
S_FA_COGSTOSALES = Column(DECIMAL(20, 4), doc="销售成本率")
S_FA_CURRENT = Column(DECIMAL(20, 4), doc="流动比率")
S_FA_CURRENTDEBTTODEBT = Column(DECIMAL(20, 4), doc="流动负债/负债合计")
S_FA_DEBTTOASSETS = Column(DECIMAL(20, 4), doc="资产负债率")
S_FA_DEBTTOEQUITY = Column(DECIMAL(20, 4), doc="产权比率")
S_FA_DEDUCTEDPROFIT = Column(DECIMAL(20, 4), doc="扣除非经常性损益后的净利润(扣除少数股东损益)")
S_FA_DEDUCTEDPROFITTOPROFIT = Column(DECIMAL(20, 4), doc="扣除非经常损益后的净利润/净利润")
S_FA_DUPONT_ASSETSTOEQUITY = Column(DECIMAL(20, 4), doc="权益乘数(用于杜邦分析)")
S_FA_DUPONT_ROA = Column(DECIMAL(20, 4), doc="总资产净利率(杜邦分析)")
S_FA_EBIT = Column(DECIMAL(20, 4), doc="息税前利润")
S_FA_EBITDA = Column(DECIMAL(20, 4), doc="息税折旧摊销前利润")
S_FA_EBITDATODEBT = Column(DECIMAL(20, 4), doc="息税折旧摊销前利润/负债合计")
S_FA_EBITPS = Column(DECIMAL(20, 4), doc="每股息税前利润")
S_FA_EBITTOGR = Column(DECIMAL(20, 4), doc="息税前利润/营业总收入")
S_FA_EBITTOINTEREST = Column(DECIMAL(20, 4), doc="已获利息倍数(EBIT/利息费用)")
S_FA_EPS_BASIC = Column(DECIMAL(20, 4), doc="基本每股收益")
S_FA_EPS_DILUTED = Column(DECIMAL(20, 4), doc="稀释每股收益")
S_FA_EPS_DILUTED2 = Column(DECIMAL(20, 4), doc="期末摊薄每股收益")
S_FA_EQUITYTODEBT = Column(DECIMAL(20, 4), doc="归属于母公司的股东权益/负债合计")
S_FA_EQUITYTOINTERESTDEBT = Column(DECIMAL(20, 4), doc="归属于母公司的股东权益/带息债务")
S_FA_EQUITYTOTOTALCAPITAL = Column(DECIMAL(20, 4), doc="归属于母公司的股东权益/全部投入资本")
S_FA_EXINTERESTDEBT_CURRENT = Column(DECIMAL(20, 4), doc="无息流动负债")
S_FA_EXINTERESTDEBT_NONCURRENT = Column(DECIMAL(20, 4), doc="无息非流动负债")
S_FA_EXPENSETOSALES = Column(DECIMAL(20, 4), doc="销售期间费用率")
S_FA_EXTRAORDINARY = Column(DECIMAL(20, 4), doc="非经常性损益")
S_FA_FATURN = Column(DECIMAL(20, 4), doc="固定资产周转率")
S_FA_FCFE = Column(DECIMAL(20, 4), doc="股权自由现金流量(FCFE)")
S_FA_FCFEPS = Column(DECIMAL(20, 4), doc="每股股东自由现金流量")
S_FA_FCFF = Column(DECIMAL(20, 4), doc="企业自由现金流量(FCFF)")
S_FA_FCFFPS = Column(DECIMAL(20, 4), doc="每股企业自由现金流量")
S_FA_FINAEXPENSETOGR = Column(DECIMAL(20, 4), doc="财务费用/营业总收入")
S_FA_GCTOGR = Column(DECIMAL(20, 4), doc="营业总成本/营业总收入")
S_FA_GROSSMARGIN = Column(DECIMAL(20, 4), doc="毛利")
S_FA_GROSSPROFITMARGIN = Column(DECIMAL(20, 4), doc="销售毛利率")
S_FA_GRPS = Column(DECIMAL(20, 4), doc="每股营业总收入")
S_FA_IMPAIRTOGR_TTM = Column(DECIMAL(20, 4), doc="资产减值损失/营业总收入")
S_FA_INTDEBTTOTOTALCAP = Column(DECIMAL(20, 4), doc="带息债务/全部投入资本")
S_FA_INTERESTDEBT = Column(DECIMAL(20, 4), doc="带息债务")
S_FA_INVESTCAPITAL = Column(DECIMAL(20, 4), doc="全部投入资本")
S_FA_INVESTINCOME = Column(DECIMAL(20, 4), doc="价值变动净收益")
S_FA_INVESTINCOMETOEBT = Column(DECIMAL(20, 4), doc="价值变动净收益/利润总额")
S_FA_INVTURN = Column(DECIMAL(20, 4), doc="存货周转率")
S_FA_INVTURNDAYS = Column(DECIMAL(20, 4), doc="存货周转天数")
S_FA_LONGDEBTODEBT = Column(DECIMAL(20, 4), doc="非流动负债/负债合计")
S_FA_LONGDEBTTOWORKINGCAPITAL = Column(DECIMAL(20, 4), doc="长期债务与营运资金比率")
S_FA_NCATOASSETS = Column(DECIMAL(20, 4), doc="非流动资产/总资产")
S_FA_NETDEBT = Column(DECIMAL(20, 4), doc="净债务")
S_FA_NETPROFITMARGIN = Column(DECIMAL(20, 4), doc="销售净利率")
S_FA_NETWORKINGCAPITAL = Column(DECIMAL(20, 4), doc="营运流动资本")
S_FA_NONOPERATEPROFITTOEBT = Column(DECIMAL(20, 4), doc="营业外收支净额/利润总额")
S_FA_NONOPPROFIT = Column(DECIMAL(20, 4), doc="非营业利润")
S_FA_NOPTOEBT = Column(DECIMAL(20, 4), doc="非营业利润/利润总额")
S_FA_OCFPS = Column(DECIMAL(20, 4), doc="每股经营活动产生的现金流量净额")
S_FA_OCFTODEBT = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/负债合计")
S_FA_OCFTOINTERESTDEBT = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/带息债务")
S_FA_OCFTONETDEBT = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/净债务")
S_FA_OCFTOOPERATEINCOME = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/经营活动净收益")
S_FA_OCFTOOR = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/营业收入")
S_FA_OCFTOPROFIT = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/营业利润")
S_FA_OCFTOSHORTDEBT = Column(DECIMAL(20, 4), doc="经营活动产生的现金流量净额/流动负债")
S_FA_OPERATEINCOME = Column(DECIMAL(20, 4), doc="经营活动净收益")
S_FA_OPERATEINCOMETOEBT = Column(DECIMAL(20, 4), doc="经营活动净收益/利润总额")
S_FA_OPTODEBT = Column(DECIMAL(20, 4), doc="营业利润/负债合计")
S_FA_OPTOEBT = Column(DECIMAL(20, 4), doc="营业利润/利润总额")
S_FA_OPTOGR = Column(DECIMAL(20, 4), doc="营业利润/营业总收入")
S_FA_OPTOLIQDEBT = Column(DECIMAL(20, 4), doc="营业利润/流动负债")
S_FA_ORPS = Column(DECIMAL(20, 4), doc="每股营业收入")
S_FA_PREFINEXPENSE_OPPROFIT = Column(DECIMAL(20, 4), doc="扣除财务费用前营业利润")
S_FA_PROFITTOGR = Column(DECIMAL(20, 4), doc="净利润/营业总收入")
S_FA_PROFITTOOP = Column(DECIMAL(20, 4), doc="利润总额/营业收入")
S_FA_QUICK = Column(DECIMAL(20, 4), doc="速动比率")
S_FA_RETAINEDEARNINGS = Column(DECIMAL(20, 4), doc="留存收益")
S_FA_RETAINEDPS = Column(DECIMAL(20, 4), doc="每股留存收益")
S_FA_ROA = Column(DECIMAL(20, 4), doc="总资产净利率")
S_FA_ROA2 = Column(DECIMAL(20, 4), doc="总资产报酬率")
S_FA_ROA2_YEARLY = Column(DECIMAL(20, 4), doc="年化总资产报酬率")
S_FA_ROA_YEARLY = Column(DECIMAL(20, 4), doc="年化总资产净利率")
S_FA_ROE = Column(DECIMAL(20, 4), doc="净资产收益率")
S_FA_ROE_AVG = Column(DECIMAL(20, 4), doc="平均净资产收益率(增发条件)")
S_FA_ROE_DEDUCTED = Column(DECIMAL(20, 4), doc="净资产收益率(扣除非经常损益)")
S_FA_ROE_YEARLY = Column(DECIMAL(20, 4), doc="年化净资产收益率")
S_FA_ROIC = Column(DECIMAL(20, 4), doc="投入资本回报率")
S_FA_ROIC_YEARLY = Column(DECIMAL(20, 4), doc="年化投入资本回报率")
S_FA_SALEEXPENSETOGR = Column(DECIMAL(20, 4), doc="销售费用/营业总收入")
S_FA_SALESCASHINTOOR = Column(DECIMAL(20, 4), doc="销售商品提供劳务收到的现金/营业收入")
S_FA_SURPLUSCAPITALPS = Column(DECIMAL(20, 4), doc="每股资本公积")
S_FA_SURPLUSRESERVEPS = Column(DECIMAL(20, 4), doc="每股盈余公积")
S_FA_TANGASSETTOINTDEBT = Column(DECIMAL(20, 4), doc="有形资产/带息债务")
S_FA_TANGIBLEASSET = Column(DECIMAL(20, 4), doc="有形资产")
S_FA_TANGIBLEASSETSTOASSETS = Column(DECIMAL(20, 4), doc="有形资产/总资产")
S_FA_TANGIBLEASSETTODEBT = Column(DECIMAL(20, 4), doc="有形资产/负债合计")
S_FA_TANGIBLEASSETTONETDEBT = Column(DECIMAL(20, 4), doc="有形资产/净债务")
S_FA_TAXTOEBT = Column(DECIMAL(20, 4), doc="所得税/利润总额")
S_FA_TOT_FATURN = Column(DECIMAL(20, 4), doc="固定资产合计周转率")
S_FA_TURNDAYS = Column(DECIMAL(20, 4), doc="营业周期")
S_FA_UNDISTRIBUTEDPS = Column(DECIMAL(20, 4), doc="每股未分配利润")
S_FA_WORKINGCAPITAL = Column(DECIMAL(20, 4), doc="营运资金")
S_FA_YOY_EQUITY = Column(DECIMAL(20, 4), doc="净资产(同比增长率)")
S_FA_YOY_OR = Column(DECIMAL(20, 4), doc="营业收入同比增长率(%)")
S_FA_YOY_TR = Column(DECIMAL(20, 4), doc="营业总收入同比增长率(%)")
S_FA_YOYASSETS = Column(DECIMAL(20, 4), doc="相对年初增长率-资产总计(%)")
S_FA_YOYBPS = Column(DECIMAL(20, 4), doc="相对年初增长率-每股净资产(%)")
S_FA_YOYEBT = Column(DECIMAL(20, 4), doc="同比增长率-利润总额(%)")
S_FA_YOYEPS_BASIC = Column(DECIMAL(20, 4), doc="同比增长率-基本每股收益(%)")
S_FA_YOYEPS_DILUTED = Column(DECIMAL(20, 4), doc="同比增长率-稀释每股收益(%)")
S_FA_YOYEQUITY = Column(DECIMAL(20, 4), doc="相对年初增长率-归属母公司的股东权益(%)")
S_FA_YOYNETPROFIT = Column(DECIMAL(20, 4), doc="同比增长率-归属母公司股东的净利润(%)")
S_FA_YOYNETPROFIT_DEDUCTED = Column(DECIMAL(20, 4), doc="同比增长率-归属母公司股东的净利润-扣除非经常损益(%)")
S_FA_YOYOCF = Column(DECIMAL(20, 4), doc="同比增长率-经营活动产生的现金流量净额(%)")
S_FA_YOYOCFPS = Column(DECIMAL(20, 4), doc="同比增长率-每股经营活动产生的现金流量净额(%)")
S_FA_YOYOP = Column(DECIMAL(20, 4), doc="同比增长率-营业利润(%)")
S_FA_YOYROE = Column(DECIMAL(20, 4), doc="同比增长率-净资产收益率(摊薄)(%)")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_QFA_ADMINEXPENSETOGR = Column(DECIMAL(20, 4), doc="单季度.管理费用/营业总收入")
S_QFA_CGRGR = Column(DECIMAL(20, 4), doc="单季度.营业总收入环比增长率(%)")
S_QFA_CGRNETPROFIT = Column(DECIMAL(20, 4), doc="单季度.归属母公司股东的净利润环比增长率(%)")
S_QFA_CGROP = Column(DECIMAL(20, 4), doc="单季度.营业利润环比增长率(%)")
S_QFA_CGRPROFIT = Column(DECIMAL(20, 4), doc="单季度.净利润环比增长率(%)")
S_QFA_CGRSALES = Column(DECIMAL(20, 4), doc="单季度.营业收入环比增长率(%)")
S_QFA_DEDUCTEDPROFIT = Column(DECIMAL(20, 4), doc="单季度.扣除非经常损益后的净利润")
S_QFA_DEDUCTEDPROFITTOPROFIT = Column(DECIMAL(20, 4), doc="单季度.扣除非经常损益后的净利润/净利润")
S_QFA_EPS = Column(DECIMAL(24, 6), doc="单季度.每股收益")
S_QFA_EXPENSETOSALES = Column(DECIMAL(20, 4), doc="单季度.销售期间费用率")
S_QFA_FINAEXPENSETOGR = Column(DECIMAL(20, 4), doc="单季度.财务费用/营业总收入")
S_QFA_GCTOGR = Column(DECIMAL(20, 4), doc="单季度.营业总成本/营业总收入")
S_QFA_GROSSPROFITMARGIN = Column(DECIMAL(20, 4), doc="单季度.销售毛利率")
S_QFA_IMPAIRTOGR_TTM = Column(DECIMAL(20, 4), doc="单季度.资产减值损失/营业总收入")
S_QFA_INVESTINCOME = Column(DECIMAL(20, 4), doc="单季度.价值变动净收益")
S_QFA_INVESTINCOMETOEBT = Column(DECIMAL(20, 4), doc="单季度.价值变动净收益/利润总额")
S_QFA_NETPROFITMARGIN = Column(DECIMAL(20, 4), doc="单季度.销售净利率")
S_QFA_OCFTOOR = Column(DECIMAL(20, 4), doc="单季度.经营活动产生的现金流量净额/经营活动净收益")
S_QFA_OCFTOSALES = Column(DECIMAL(20, 4), doc="单季度.经营活动产生的现金流量净额/营业收入")
S_QFA_OPERATEINCOME = Column(DECIMAL(20, 4), doc="单季度.经营活动净收益")
S_QFA_OPERATEINCOMETOEBT = Column(DECIMAL(20, 4), doc="单季度.经营活动净收益/利润总额")
S_QFA_OPTOGR = Column(DECIMAL(20, 4), doc="单季度.营业利润/营业总收入")
S_QFA_PROFITTOGR = Column(DECIMAL(20, 4), doc="单季度.净利润/营业总收入")
S_QFA_ROA = Column(DECIMAL(20, 4), doc="单季度.总资产净利润")
S_QFA_ROE = Column(DECIMAL(24, 6), doc="单季度.净资产收益率")
S_QFA_ROE_DEDUCTED = Column(DECIMAL(24, 6), doc="单季度.净资产收益率(扣除非经常损益)")
S_QFA_SALEEXPENSETOGR = Column(DECIMAL(20, 4), doc="单季度.销售费用/营业总收入")
S_QFA_SALESCASHINTOOR = Column(DECIMAL(20, 4), doc="单季度.销售商品提供劳务收到的现金/营业收入")
S_QFA_YOYGR = Column(DECIMAL(20, 4), doc="单季度.营业总收入同比增长率(%)")
S_QFA_YOYNETPROFIT = Column(DECIMAL(20, 4), doc="单季度.归属母公司股东的净利润同比增长率(%)")
S_QFA_YOYOP = Column(DECIMAL(20, 4), doc="单季度.营业利润同比增长率(%)")
S_QFA_YOYPROFIT = Column(DECIMAL(20, 4), doc="单季度.净利润同比增长率(%)")
S_QFA_YOYSALES = Column(DECIMAL(20, 4), doc="单季度.营业收入同比增长率(%)")
S_STM_BS = Column(DECIMAL(20, 4), doc="固定资产合计")
S_STM_IS = Column(DECIMAL(20, 4), doc="折旧与摊销")
S_STMNOTE_FINEXP = Column(DECIMAL(20, 4), doc="利息费用")
WAA_ROE = Column(DECIMAL(24, 6), doc="加权平均净资产收益率")
WIND_CODE = Column(String(40), doc="Wind代码")
class ASHAREFLOATHOLDER(Base):
__tablename__ = 'ASHAREFLOATHOLDER'
ANN_DT = Column(String(8), doc="公告日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_HOLDER_ENDDATE = Column(String(8), doc="截止日期")
S_HOLDER_HOLDERCATEGORY = Column(String(1), doc="股东类型")
S_HOLDER_NAME = Column(String(300), doc="持有人")
S_HOLDER_QUANTITY = Column(DECIMAL(20, 4), doc="数量(股)")
S_HOLDER_WINDNAME = Column(String(200), doc="持有人(容错后)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREFREEFLOAT(Base):
__tablename__ = 'ASHAREFREEFLOAT'
ANN_DT = Column(String(8), doc="公告日期")
CHANGE_DT = Column(String(8), doc="变动日期(除权日)")
CHANGE_DT1 = Column(String(8), doc="变动日期(上市日)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_SHARE_FREESHARES = Column(DECIMAL(20, 4), doc="自由流通股本(万股)")
class ASHAREGROUP(Base):
__tablename__ = 'ASHAREGROUP'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMP_NAME = Column(String(100), doc="集团公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="集团公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="集团公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREGROUPINFORMATION(Base):
__tablename__ = 'ASHAREGROUPINFORMATION'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMP_NAME = Column(String(100), doc="集团公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="集团公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="集团公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREGUARANTEERELATIONSHIP(Base):
__tablename__ = 'ASHAREGUARANTEERELATIONSHIP'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_INFO_AMOUNT = Column(DECIMAL(20, 4), doc="金额")
S_INFO_COMP_NAME = Column(String(100), doc="被担保公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="被担保公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="被担保公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREGUARANTEESTATISTICS(Base):
__tablename__ = 'ASHAREGUARANTEESTATISTICS'
AMOUNT_OF_GUARANTEE = Column(DECIMAL(20, 4), doc="担保总额")
AMOUNT_OF_GUARANTEE_RATE = Column(DECIMAL(20, 4), doc="担保总额占净资产比例")
AMOUNT_OF_GUARANTEE_TOTAL = Column(DECIMAL(20, 4), doc="担保额度合计")
ANN_DATE = Column(String(8), doc="公告日期")
CONTROLLING_TOTAL = Column(DECIMAL(20, 4), doc="为控股股东及其他关联方提供担保金额")
CURRENCY_CODE = Column(String(10), doc="货币代码")
DEADLINE = Column(String(8), doc="截止日期")
EXTERNAL_GUARANTEE = Column(DECIMAL(20, 4), doc="对外担保额度合计")
GUARANTEE_BALANCE_TOTAL = Column(DECIMAL(20, 4), doc="担保余额合计")
HOLDING_AMOUNT_OF_GUARANTEE = Column(DECIMAL(20, 4), doc="对控股子公司担保额度合计")
HOLDING_TOTAL = Column(DECIMAL(20, 4), doc="对控股子公司担保余额合计")
HOLDING_TOTAL_AMOUNT_GUARANTEE = Column(DECIMAL(20, 4), doc="对控股子公司担保发生额合计")
IS_MORE_THAN_NET_ASSETS = Column(DECIMAL(5, 0), doc="担保总额是否超过净资产50%")
MORE_THAN_NET_ASSETS_AMOUNT = Column(DECIMAL(20, 4), doc="担保总额超过净资产50%部分的金额")
NET_ASSETS_RATE = Column(DECIMAL(20, 4), doc="担保总额占净资产比例(%)(计算值)")
NET_ASSETS_RATE2 = Column(DECIMAL(20, 4), doc="担保额度占净资产比例")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMPCODE = Column(String(10), doc="公司id")
TOTAL_AMOUNT_GUARANTEE = Column(DECIMAL(20, 4), doc="担保发生额合计")
TOTAL_AMOUNT_GUARANTEE1 = Column(DECIMAL(20, 4), doc="为高负债对象提供担保金额")
VIOLATION_AMOUNT_GUARANTEE = Column(DECIMAL(20, 4), doc="违规担保总额")
class ASHAREHOLDER(Base):
__tablename__ = 'ASHAREHOLDER'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_HOLDER_ENDDATE = Column(String(10), doc="报告期")
S_HOLDER_PCT = Column(DECIMAL(20, 4), doc="持股比例")
S_INFO_COMP_NAME = Column(String(100), doc="股东公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="股东公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="股东公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREHOLDERNUMBER(Base):
__tablename__ = 'ASHAREHOLDERNUMBER'
ANN_DT = Column(String(8), doc="公告日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_HOLDER_ENDDATE = Column(String(8), doc="截止日期")
S_HOLDER_NUM = Column(DECIMAL(20, 4), doc="A股股东户数")
S_HOLDER_TOTAL_NUM = Column(DECIMAL(20, 4), doc="股东总户数")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREHOLDING(Base):
__tablename__ = 'ASHAREHOLDING'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_HOLDER_ENDDATE = Column(String(10), doc="报告期")
S_HOLDER_PCT = Column(DECIMAL(20, 4), doc="持股比例")
S_INFO_COMP_NAME = Column(String(100), doc="投资公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="投资公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="投资公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREIBROKERINDICATOR(Base):
__tablename__ = 'ASHAREIBROKERINDICATOR'
ANN_DT = Column(String(8))
ASSET_LIABILITY_RATIO = Column(DECIMAL(20, 4))
ASSET_TURNOVER_RATIO = Column(DECIMAL(20, 4))
CONTINGENT_LIABILITY_RATIO = Column(DECIMAL(20, 4))
CONVERTIBLE_BOND = Column(DECIMAL(20, 4))
CURRENT_RATIO = Column(DECIMAL(20, 4))
FEE_BUSINESS_RATIO = Column(DECIMAL(20, 4))
FIXED_CAPITAL_RATIO = Column(DECIMAL(20, 4))
IFLISTED_DATA = Column(DECIMAL(5, 0))
INVESTMENT_FUNDS = Column(DECIMAL(20, 4))
LONGTERM_INVEST_RATIO = Column(DECIMAL(20, 4))
NET_CAP_NET_ASSETS = Column(DECIMAL(20, 4))
NET_CAP_TOTAL_RISKPROV = Column(DECIMAL(20, 4))
NET_CAPITAL = Column(DECIMAL(20, 4))
NET_CAPITAL_RETURN = Column(DECIMAL(20, 4))
NET_CAPITAL_YIELD = Column(DECIMAL(20, 4))
NET_GEARING_RATIO = Column(DECIMAL(20, 4))
OBJECT_ID = Column(String(100), primary_key=True)
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PER_CAPITA_PROFITS = Column(DECIMAL(20, 4))
PROP_EQU_DER_NETCAP = Column(DECIMAL(20, 4))
PROP_EQUITY_RATIO = Column(DECIMAL(20, 4))
PROP_FIXEDINCOME_NETCAP = Column(DECIMAL(20, 4))
PROP_SECURITIES = Column(DECIMAL(20, 4))
REPORT_PERIOD = Column(String(8))
S_INFO_WINDCODE = Column(String(40))
STATEMENT_TYPE = Column(String(40))
STOCKS = Column(DECIMAL(20, 4))
TOTAL_CAPITAL_RETURN = Column(DECIMAL(20, 4))
TREASURY_BOND = Column(DECIMAL(20, 4))
TRUSTED_CAPITAL = Column(DECIMAL(20, 4))
class ASHAREILLEGALITY(Base):
__tablename__ = 'ASHAREILLEGALITY'
AMOUNT = Column(DECIMAL(20, 4), doc="处罚金额(元)")
ANN_DT = Column(String(8), doc="公告日期")
ANN_ID = Column(DECIMAL(11, 0), doc="公告id")
BAN_YEAR = Column(DECIMAL(20, 4), doc="市场禁入期限(年)")
BEHAVIOR = Column(LONGTEXT, doc="违规行为")
DISPOSAL_DT = Column(String(8), doc="处罚日期")
DISPOSAL_TYPE = Column(String(100), doc="处分类型")
ILLEG_TYPE = Column(String(100), doc="违规类型")
ILLEG_TYPE_CODE = Column(String(1000), doc="违规类型代码")
METHOD = Column(String(2000), doc="处分措施")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PROCESSOR = Column(String(200), doc="处理人")
REF_RULE = Column(String(1000), doc="相关法规")
RELATION_TYPE = Column(DECIMAL(9, 0), doc="与上市公司的关系")
S_INFO_COMPCODE = Column(String(40), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SUBJECT = Column(String(100), doc="违规主体")
SUBJECT_TYPE = Column(DECIMAL(9, 0), doc="主体类别代码")
class ASHAREINCDESCRIPTION(Base):
__tablename__ = 'ASHAREINCDESCRIPTION'
ANN_DT = Column(String(8), doc="公告日期")
EQINC_PLAN_EVENT_ID = Column(String(40), doc="股权激励事件ID")
GM_DATE = Column(String(8), doc="股东大会公告日")
IMPLEMENT_DATE = Column(String(8), doc="首次实施公告日")
INC_FUND_DESCRIPTION = Column(String(1000), doc="激励基金说明")
INC_NUMBERS_RATE = Column(DECIMAL(20, 4), doc="激励数量占当前总股本比例(%)")
INTERVAL_MONTHS = Column(DECIMAL(20, 4), doc="授权日与首次可行权日间隔时间(月)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PREPLAN_ANN_DATE = Column(String(8), doc="预案公告日")
PRICE_DESCRIPTION = Column(String(80), doc="价格说明")
PROGRESS = Column(String(10), doc="方案进度")
S_INC_ENDINC = Column(String(8), doc="到期日")
S_INC_EXPIRYDATE = Column(DECIMAL(20, 4), doc="有效期")
S_INC_FIRSTINC = Column(String(8), doc="起始日")
S_INC_INCENTCONDITION = Column(String(2000), doc="激励授予条件")
S_INC_INCENTSHARESALEDESCRIPT = Column(String(1000), doc="激励股票出售说明")
S_INC_INITEXECPRI = Column(DECIMAL(20, 4), doc="期权初始行权价格(股票转让价格)")
S_INC_OPTEXESPECIALCONDITION = Column(String(2000), doc="期权行权特别条件")
S_INC_PROGRAMDESCRIPT = Column(String(3000), doc="方案说明")
S_INC_QUANTITY = Column(DECIMAL(20, 4), doc="激励总数(万股/万份)")
S_INC_SEQUENCE = Column(String(6), doc="序号")
S_INC_SUBJECT = Column(DECIMAL(9, 0), doc="激励标的")
S_INC_TYPE = Column(DECIMAL(9, 0), doc="激励方式")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREINCEXECQTYPRI(Base):
__tablename__ = 'ASHAREINCEXECQTYPRI'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INC_EXECDATE = Column(String(8), doc="行权日期")
S_INC_EXECPRI = Column(DECIMAL(20, 4), doc="行权价格")
S_INC_EXECQTY = Column(DECIMAL(20, 4), doc="行权数量(万份)")
S_INC_NAME = Column(String(200), doc="姓名")
S_INC_SEQUENCE = Column(String(10), doc="序号")
S_INFO_WINDCODE = Column(String(100), doc="Wind代码")
class ASHAREINCEXERCISEPCT(Base):
__tablename__ = 'ASHAREINCEXERCISEPCT'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INC_EXECBATCH = Column(String(6), doc="行权期")
S_INC_EXECPCT = Column(DECIMAL(20, 4), doc="行权比例(%)")
S_INC_INTERVALTIME = Column(DECIMAL(20, 4), doc="首个授权日至行权期间隔时间(月)")
S_INC_SEQUENCE = Column(String(6), doc="序号")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREINCEXERCISEPCTZL(Base):
__tablename__ = 'ASHAREINCEXERCISEPCTZL'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INC_EXECBATCH = Column(String(6), doc="行权期")
S_INC_EXECPCT = Column(DECIMAL(20, 4), doc="行权比例(%)")
S_INC_INTERVALTIME = Column(DECIMAL(20, 4), doc="首个授权日至行权期间隔时间(月)")
S_INC_SEQUENCE = Column(String(6), doc="序号")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREINCOME(Base):
__tablename__ = 'ASHAREINCOME'
ACTUAL_ANN_DT = Column(String(8), doc="实际公告日期")
ADJLOSSGAIN_PREVYEAR = Column(DECIMAL(20, 4), doc="调整以前年度损益")
ANN_DT = Column(String(8), doc="公告日期")
ASSET_DISPOSAL_INCOME = Column(DECIMAL(20, 4), doc="资产处置收益")
CAPITALIZED_COMSTOCK_DIV = Column(DECIMAL(20, 4), doc="转作股本的普通股股利")
CHG_INSUR_CONT_RSRV = Column(DECIMAL(20, 4), doc="提取保险责任准备金")
CHG_UNEARNED_PREM_RES = Column(DECIMAL(20, 4), doc="提取未到期责任准备金")
COMP_TYPE_CODE = Column(String(2), doc="公司类型代码")
COMSHARE_DVD_PAYABLE = Column(DECIMAL(20, 4), doc="应付普通股股利")
CONTINUED_NET_PROFIT = Column(DECIMAL(20, 4), doc="持续经营净利润")
CREDIT_IMPAIRMENT_LOSS = Column(DECIMAL(20, 4), doc="信用减值损失")
CRNCY_CODE = Column(String(10), doc="货币代码")
DISTRIBUTABLE_PROFIT = Column(DECIMAL(20, 4), doc="可分配利润")
DISTRIBUTABLE_PROFIT_SHRHDER = Column(DECIMAL(20, 4), doc="可供股东分配的利润")
DVD_EXP_INSURED = Column(DECIMAL(20, 4), doc="保户红利支出")
EBIT = Column(DECIMAL(20, 4), doc="息税前利润")
EBITDA = Column(DECIMAL(20, 4), doc="息税折旧摊销前利润")
END_NET_PROFIT = Column(DECIMAL(20, 4), doc="终止经营净利润")
FIN_EXP_INT_INC = Column(DECIMAL(20, 4), doc="财务费用:利息收入")
HANDLING_CHRG_COMM_INC = Column(DECIMAL(20, 4), doc="手续费及佣金收入")
IL_NET_LOSS_DISP_NONCUR_ASSET = Column(DECIMAL(20, 4), doc="其中:减:非流动资产处置净损失")
INC_TAX = Column(DECIMAL(20, 4), doc="所得税")
INCL_INC_INVEST_ASSOC_JV_ENTP = Column(DECIMAL(20, 4), doc="其中:对联营企业和合营企业的投资收益")
INCL_REINSURANCE_PREM_INC = Column(DECIMAL(20, 4), doc="其中:分保费收入")
INSUR_PREM_UNEARNED = Column(DECIMAL(20, 4), doc="已赚保费")
INSURANCE_EXPENSE = Column(DECIMAL(20, 4), doc="保险业务支出")
INT_INC = Column(DECIMAL(20, 4), doc="利息收入")
IS_CALCULATION = Column(DECIMAL(5, 0), doc="是否计算报表")
LESS_CEDED_OUT_PREM = Column(DECIMAL(20, 4), doc="减:分出保费")
LESS_CLAIM_RECB_REINSURER = Column(DECIMAL(20, 4), doc="减:摊回赔付支出")
LESS_EXP_RECB_REINSURER = Column(DECIMAL(20, 4), doc="减:摊回分保费用")
LESS_FIN_EXP = Column(DECIMAL(20, 4), doc="减:财务费用")
LESS_GERL_ADMIN_EXP = Column(DECIMAL(20, 4), doc="减:管理费用")
LESS_HANDLING_CHRG_COMM_EXP = Column(DECIMAL(20, 4), doc="减:手续费及佣金支出")
LESS_IMPAIR_LOSS_ASSETS = Column(DECIMAL(20, 4), doc="减:资产减值损失")
LESS_INS_RSRV_RECB_REINSURER = Column(DECIMAL(20, 4), doc="减:摊回保险责任准备金")
LESS_INT_EXP = Column(DECIMAL(20, 4), doc="减:利息支出")
LESS_NON_OPER_EXP = Column(DECIMAL(20, 4), doc="减:营业外支出")
LESS_OPER_COST = Column(DECIMAL(20, 4), doc="减:营业成本")
LESS_SELLING_DIST_EXP = Column(DECIMAL(20, 4), doc="减:销售费用")
LESS_TAXES_SURCHARGES_OPS = Column(DECIMAL(20, 4), doc="减:营业税金及附加")
MEMO = Column(String(1000), doc="备注")
MINORITY_INT_INC = Column(DECIMAL(20, 4), doc="少数股东损益")
NET_AFTER_DED_NR_LP_CORRECT = Column(DECIMAL(20, 4), doc="扣除非经常性损益后的净利润(财务重要指标(更正前))")
NET_EXPOSURE_HEDGING_BENEFITS = Column(DECIMAL(20, 4), doc="净敞口套期收益")
NET_HANDLING_CHRG_COMM_INC = Column(DECIMAL(20, 4), doc="手续费及佣金净收入")
NET_INC_EC_ASSET_MGMT_BUS = Column(DECIMAL(20, 4), doc="受托客户资产管理业务净收入")
NET_INC_OTHER_OPS = Column(DECIMAL(20, 4), doc="其他经营净收益")
NET_INC_SEC_TRADING_BROK_BUS = Column(DECIMAL(20, 4), doc="代理买卖证券业务净收入")
NET_INC_SEC_UW_BUS = Column(DECIMAL(20, 4), doc="证券承销业务净收入")
NET_INT_INC = Column(DECIMAL(20, 4), doc="利息净收入")
NET_PROFIT_AFTER_DED_NR_LP = Column(DECIMAL(20, 4), doc="扣除非经常性损益后净利润(扣除少数股东损益)")
NET_PROFIT_EXCL_MIN_INT_INC = Column(DECIMAL(20, 4), doc="净利润(不含少数股东损益)")
NET_PROFIT_INCL_MIN_INT_INC = Column(DECIMAL(20, 4), doc="净利润(含少数股东损益)")
NET_PROFIT_UNDER_INTL_ACC_STA = Column(DECIMAL(20, 4), doc="国际会计准则净利润")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPER_EXP = Column(DECIMAL(20, 4), doc="营业支出")
OPER_PROFIT = Column(DECIMAL(20, 4), doc="营业利润")
OPER_REV = Column(DECIMAL(20, 4), doc="营业收入")
OPMODE = Column(String(1))
OTHER_BUS_COST = Column(DECIMAL(20, 4), doc="其他业务成本")
OTHER_BUS_INC = Column(DECIMAL(20, 4), doc="其他业务收入")
OTHER_COMPREH_INC = Column(DECIMAL(20, 4), doc="其他综合收益")
OTHER_IMPAIR_LOSS_ASSETS = Column(DECIMAL(20, 4), doc="其他资产减值损失")
OTHER_INCOME = Column(DECIMAL(20, 4), doc="其他收益")
PLUS_NET_GAIN_CHG_FV = Column(DECIMAL(20, 4), doc="加:公允价值变动净收益")
PLUS_NET_GAIN_FX_TRANS = Column(DECIMAL(20, 4), doc="加:汇兑净收益")
PLUS_NET_INC_OTHER_BUS = Column(DECIMAL(20, 4), doc="加:其他业务净收益")
PLUS_NET_INVEST_INC = Column(DECIMAL(20, 4), doc="加:投资净收益")
PLUS_NON_OPER_REV = Column(DECIMAL(20, 4), doc="加:营业外收入")
PREM_INC = Column(DECIMAL(20, 4), doc="保费业务收入")
PREPAY_SURR = Column(DECIMAL(20, 4), doc="退保金")
PRFSHARE_DVD_PAYABLE = Column(DECIMAL(20, 4), doc="应付优先股股利")
RD_EXPENSE = Column(DECIMAL(20, 4), doc="研发费用")
REINSURANCE_EXP = Column(DECIMAL(20, 4), doc="分保费用")
REPORT_PERIOD = Column(String(8), doc="报告期")
S_FA_EPS_BASIC = Column(DECIMAL(20, 4), doc="基本每股收益")
S_FA_EPS_DILUTED = Column(DECIMAL(20, 4), doc="稀释每股收益")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SPE_BAL_NET_PROFIT = Column(DECIMAL(20, 4), doc="净利润差额(特殊报表科目)")
SPE_BAL_OPER_PROFIT = Column(DECIMAL(20, 4), doc="营业利润差额(特殊报表科目)")
SPE_BAL_TOT_PROFIT = Column(DECIMAL(20, 4), doc="利润总额差额(特殊报表科目)")
STATEMENT_TYPE = Column(String(10), doc="报表类型")
STMNOTE_FINEXP = Column(DECIMAL(20, 4), doc="财务费用:利息费用")
TOT_BAL_NET_PROFIT = Column(DECIMAL(20, 4), doc="净利润差额(合计平衡项目)")
TOT_BAL_OPER_PROFIT = Column(DECIMAL(20, 4), doc="营业利润差额(合计平衡项目)")
TOT_BAL_TOT_PROFIT = Column(DECIMAL(20, 4), doc="利润总额差额(合计平衡项目)")
TOT_CLAIM_EXP = Column(DECIMAL(20, 4), doc="赔付总支出")
TOT_COMPREH_INC = Column(DECIMAL(20, 4), doc="综合收益总额")
TOT_COMPREH_INC_MIN_SHRHLDR = Column(DECIMAL(20, 4), doc="综合收益总额(少数股东)")
TOT_COMPREH_INC_PARENT_COMP = Column(DECIMAL(20, 4), doc="综合收益总额(母公司)")
TOT_OPER_COST = Column(DECIMAL(20, 4), doc="营业总成本")
TOT_OPER_COST2 = Column(DECIMAL(20, 4), doc="营业总成本2")
TOT_OPER_REV = Column(DECIMAL(20, 4), doc="营业总收入")
TOT_PROFIT = Column(DECIMAL(20, 4), doc="利润总额")
TRANSFER_FROM_HOUSINGIMPREST = Column(DECIMAL(20, 4), doc="住房周转金转入")
TRANSFER_FROM_OTHERS = Column(DECIMAL(20, 4), doc="其他转入")
TRANSFER_FROM_SURPLUSRESERVE = Column(DECIMAL(20, 4), doc="盈余公积转入")
UNCONFIRMED_INVEST_LOSS = Column(DECIMAL(20, 4), doc="未确认投资损失")
UNDISTRIBUTED_PROFIT = Column(DECIMAL(20, 4), doc="年初未分配利润")
WIND_CODE = Column(String(40), doc="Wind代码")
WITHDR_BUZEXPWELFARE = Column(DECIMAL(20, 4), doc="提取企业发展基金")
WITHDR_LEGALPUBWELFUNDS = Column(DECIMAL(20, 4), doc="提取法定公益金")
WITHDR_LEGALSURPLUS = Column(DECIMAL(20, 4), doc="提取法定盈余公积")
WITHDR_OTHERSURPRESERVE = Column(DECIMAL(20, 4), doc="提取任意盈余公积金")
WITHDR_RESERVEFUND = Column(DECIMAL(20, 4), doc="提取储备基金")
WORKERS_WELFARE = Column(DECIMAL(20, 4), doc="职工奖金福利")
class ASHAREINCQUANTITYDETAILS(Base):
__tablename__ = 'ASHAREINCQUANTITYDETAILS'
ANN_DT = Column(String(8), doc="公告日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INC_NAME = Column(String(80), doc="姓名")
S_INC_POST = Column(String(80), doc="职位")
S_INC_QUANTITY = Column(DECIMAL(20, 4), doc="数量(万股/万份)")
S_INC_SEQUENCE = Column(String(6), doc="序号")
S_INC_TOTALQTYPCT = Column(DECIMAL(20, 4), doc="占本次授予总数量比例(%)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREINCQUANTITYPRICE(Base):
__tablename__ = 'ASHAREINCQUANTITYPRICE'
ANN_DT = Column(String(8), doc="公告日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INC_DNEXEC_QUANTITY = Column(DECIMAL(20, 4), doc="已授权未行权的期权数量(万份)")
S_INC_ENDDATE = Column(String(8), doc="截止日期")
S_INC_GETFUNDQTY = Column(DECIMAL(20, 4), doc="提取激励基金数量(元)")
S_INC_GRANTDATE = Column(String(8), doc="期权授权日")
S_INC_ISCOMPLETED = Column(DECIMAL(5, 0), doc="股权激励是否全部完成")
S_INC_QUANTITY = Column(DECIMAL(20, 4), doc="激励数量(万份)")
S_INC_SEQUENCE = Column(String(6), doc="序号")
S_INC_TRANSFERPRIPER = Column(DECIMAL(20, 4), doc="每股转让价格(行权价格)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREINDUSRATING(Base):
__tablename__ = 'ASHAREINDUSRATING'
COLLECT_DT = Column(String(8), doc="[内部]公告日期")
END_DT = Column(String(8), doc="有效期截止日")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RATING = Column(String(100), doc="新评级")
RATING_ANALYST = Column(String(20), doc="分析师")
RATING_ANALYST_ID = Column(String(200), doc="[内部]分析师id")
RATING_DT = Column(String(8), doc="评级日期")
RATING_INSTITUTE = Column(String(20), doc="机构")
RATING_INSTITUTE_ID = Column(String(38), doc="[内部]机构id")
RATING_MEMO = Column(String(2000), doc="[内部]摘要")
REPORT_IND = Column(String(100), doc="原始行业")
REPORT_TYPE_CODE = Column(DECIMAL(9, 0), doc="报告类型代码")
SCORE = Column(DECIMAL(20, 0), doc="得分")
WIND_IND_CODE = Column(String(50), doc="Wind行业ID")
class ASHAREINDUSTRIESCLASSCITICS(Base):
__tablename__ = 'ASHAREINDUSTRIESCLASSCITICS'
CITICS_IND_CODE = Column(String(50), doc="中信行业代码")
CUR_SIGN = Column(String(10), doc="最新标志")
ENTRY_DT = Column(String(8), doc="纳入日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REMOVE_DT = Column(String(8), doc="剔除日期")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
WIND_CODE = Column(String(40))
class ASHAREINDUSTRIESCLASSCITICSZL(Base):
__tablename__ = 'ASHAREINDUSTRIESCLASSCITICSZL'
CITICS_IND_CODE = Column(String(50), doc="中信行业代码")
CUR_SIGN = Column(String(10), doc="最新标志")
ENTRY_DT = Column(String(8), doc="纳入日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REMOVE_DT = Column(String(8), doc="剔除日期")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
WIND_CODE = Column(String(40))
class ASHAREINDUSTRIESCODE(Base):
__tablename__ = 'ASHAREINDUSTRIESCODE'
CHINESEDEFINITION = Column(String(600), doc="板块中文定义")
INDUSTRIESALIAS = Column(String(12), doc="板块别名")
INDUSTRIESCODE = Column(String(38), doc="行业代码")
INDUSTRIESCODE_OLD = Column(String(38), doc="行业代码(旧)")
INDUSTRIESNAME = Column(String(50), doc="行业名称")
LEVELNUM = Column(DECIMAL(1, 0), doc="级数")
MEMO = Column(String(100), doc="备注")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
SEQUENCE = Column(DECIMAL(4, 0), doc="展示序号")
USED = Column(DECIMAL(1, 0), doc="是否有效")
WIND_NAME_ENG = Column(String(200), doc="板块英文名称")
class ASHAREINSIDEHOLDER(Base):
__tablename__ = 'ASHAREINSIDEHOLDER'
ANN_DT = Column(String(8), doc="公告日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(10), doc="报告期")
S_HOLDER_ANAME = Column(String(100), doc="股东名称")
S_HOLDER_ENDDATE = Column(String(8), doc="截止日期")
S_HOLDER_HOLDERCATEGORY = Column(String(1), doc="股东类型")
S_HOLDER_MEMO = Column(String(1500), doc="股东说明")
S_HOLDER_NAME = Column(String(100), doc="股东名称")
S_HOLDER_PCT = Column(DECIMAL(20, 4), doc="持股比例")
S_HOLDER_QUANTITY = Column(DECIMAL(20, 4), doc="持股数量")
S_HOLDER_RESTRICTEDQUANTITY = Column(DECIMAL(20, 4), doc="持有限售股份(非流通股)数量")
S_HOLDER_SEQUENCE = Column(String(200), doc="关联方序号")
S_HOLDER_SHARECATEGORY = Column(String(40), doc="持股性质代码")
S_HOLDER_SHARECATEGORYNAME = Column(String(40), doc="持股性质")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREINSIDERTRADE(Base):
__tablename__ = 'ASHAREINSIDERTRADE'
ACTUAL_ANN_DT = Column(String(8), doc="实际公告日期")
ANN_DT = Column(String(8), doc="填报日期")
CHANGE_VOLUME = Column(DECIMAL(20, 4), doc="变动数")
IS_SHORT_TERM_TRADE = Column(DECIMAL(5, 0), doc="是否为短线交易")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
POSITION_AFTER_TRADE = Column(DECIMAL(20, 4), doc="变动后持股")
RELATED_MANAGER_NAME = Column(String(100), doc="相关管理层姓名")
RELATED_MANAGER_POST = Column(String(80), doc="相关管理层职务")
REPORTED_TRADER_NAME = Column(String(100), doc="变动人姓名")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TRADE_AVG_PRICE = Column(DECIMAL(20, 4), doc="成交均价")
TRADE_DT = Column(String(8), doc="变动日期")
TRADE_REASON_CODE = Column(DECIMAL(9, 0), doc="变动原因类型代码")
TRADER_MANAGER_RELATION = Column(String(20), doc="变动人与管理层的关系")
class ASHAREINSTHOLDERDERDATA(Base):
__tablename__ = 'ASHAREINSTHOLDERDERDATA'
ANN_DATE = Column(String(8), doc="公告日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_FLOAT_A_SHR = Column(DECIMAL(20, 4), doc="流通A股")
S_HOLDER_COMPCODE = Column(String(40), doc="股东公司id")
S_HOLDER_HOLDERCATEGORY = Column(String(40), doc="股东类型")
S_HOLDER_NAME = Column(String(200), doc="股东名称")
S_HOLDER_PCT = Column(DECIMAL(20, 6), doc="持股比例(计算)")
S_HOLDER_QUANTITY = Column(DECIMAL(20, 4), doc="持股数")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREINSURANCEINDICATOR(Base):
__tablename__ = 'ASHAREINSURANCEINDICATOR'
ACTUAL_CAPITAL_GROUP = Column(DECIMAL(20, 4))
ACTUAL_CAPITAL_LIFE = Column(DECIMAL(20, 4))
ACTUAL_CAPITAL_PROPERTY = Column(DECIMAL(20, 4))
ANN_DT = Column(String(8))
CAP_ADEQUACY_RATIO_LIFE = Column(DECIMAL(20, 4))
CAP_ADEQUACY_RATIO_PROPERTY = Column(DECIMAL(20, 4))
CAPITAL_ADEQUACY_RATIO_GROUP = Column(DECIMAL(20, 4))
COMBINED_COST_PROPERTY = Column(DECIMAL(20, 4))
CRNCY_CODE = Column(String(10))
FEE_RATIO_PROPERTY = Column(DECIMAL(20, 4))
INTRINSIC_VALUE_LIFE = Column(DECIMAL(20, 4))
LOSS_RATIO_PROPERTY = Column(DECIMAL(20, 4))
MINIMUN_CAPITAL_GROUP = Column(DECIMAL(20, 4))
MINIMUN_CAPITAL_LIFE = Column(DECIMAL(20, 4))
MINIMUN_CAPITAL_PROPERTY = Column(DECIMAL(20, 4))
NET_INVESTMENT_YIELD = Column(DECIMAL(20, 4))
OBJECT_ID = Column(String(100), primary_key=True)
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
POLICY_PERSISTENCY_RATE_13M = Column(DECIMAL(20, 4))
POLICY_PERSISTENCY_RATE_14M = Column(DECIMAL(20, 4))
POLICY_PERSISTENCY_RATE_25M = Column(DECIMAL(20, 4))
POLICY_PERSISTENCY_RATE_26M = Column(DECIMAL(20, 4))
REPORT_PERIOD = Column(String(8))
REPORT_TYPE = Column(DECIMAL(9, 0))
RISK_DISCOUNT_RATE = Column(DECIMAL(20, 4))
S_INFO_WINDCODE = Column(String(40))
STATEMENT_TYPE = Column(DECIMAL(9, 0))
SURRENDER_RATE = Column(DECIMAL(20, 4))
TOTAL_INVESTMENT_YIELD = Column(DECIMAL(20, 4))
VALUE_EFFECTIVE_BUSINESS_LIFE = Column(DECIMAL(20, 4))
VALUE_NEW_BUSINESS_LIFE = Column(DECIMAL(20, 4))
class ASHAREINTENSITYTREND(Base):
__tablename__ = 'ASHAREINTENSITYTREND'
BBI = Column(DECIMAL(20, 8), doc="BBI多空指数(3,6,12,24日)")
BOTTOMING_B = Column(DECIMAL(20, 8), doc="筑底指标B(125,5,20日)")
BOTTOMING_D = Column(DECIMAL(20, 8), doc="筑底指标D(125,5,20日)")
DDI = Column(DECIMAL(20, 8), doc="DDI方向标准离差指数DDI(13,30,5,10日)")
DDI_AD = Column(DECIMAL(20, 8), doc="DDI方向标准离差指数AD(13,30,5,10日)")
DDI_ADDI = Column(DECIMAL(20, 8), doc="DDI方向标准离差指数ADDI(13,30,5,10日)")
DMA_AMA = Column(DECIMAL(20, 8), doc="DMA平均线差AMA(10,50,10日)")
DMA_DDD = Column(DECIMAL(20, 8), doc="DMA平均线差DDD(10,50,10日)")
DMI_ADX = Column(DECIMAL(20, 8), doc="DMI趋向指标ADX(14,6日)")
DMI_ADXR = Column(DECIMAL(20, 8), doc="DMI趋向指标ADXR(14,6日)")
DMI_MDI = Column(DECIMAL(20, 8), doc="DMI趋向指标MDI(14,6日)")
DMI_PDI = Column(DECIMAL(20, 8), doc="DMI趋向指标PDI(14,6日)")
EXPMA = Column(DECIMAL(20, 8), doc="EXPMA指数平均数(12日)")
MA_10D = Column(DECIMAL(20, 8), doc="MA简单移动平均(10日)")
MA_120D = Column(DECIMAL(20, 8), doc="MA简单移动平均(120日)")
MA_20D = Column(DECIMAL(20, 8), doc="MA简单移动平均(20日)")
MA_250D = Column(DECIMAL(20, 8), doc="MA简单移动平均(250日)")
MA_30D = Column(DECIMAL(20, 8), doc="MA简单移动平均30日)")
MA_5D = Column(DECIMAL(20, 8), doc="MA简单移动平均(5日)")
MA_60D = Column(DECIMAL(20, 8), doc="MA简单移动平均(60日)")
MACD_DEA = Column(DECIMAL(20, 8), doc="MACD指数平滑异同平均DEA(26,12,9日)")
MACD_DIFF = Column(DECIMAL(20, 8), doc="MACD指数平滑异同平均DIFF(26,12,9日)")
MACD_MACD = Column(DECIMAL(20, 8), doc="MACD指数平滑异同平均MACD(26,12,9日)")
MARKET = Column(DECIMAL(20, 8), doc="大盘同步指标沪深300(7日)")
MTM = Column(DECIMAL(20, 8), doc="MTM动力指标MTM(6,6日)")
MTM_MTMMA = Column(DECIMAL(20, 8), doc="MTM动力指标MTMMA(6,6日)")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PRICEOSC = Column(DECIMAL(20, 8), doc="PRICEOSC价格振荡指标(26,12日)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SAR = Column(DECIMAL(20, 8), doc="SAR抛物转向(4,2,20日)")
STRENGTH = Column(DECIMAL(20, 8), doc="阶段强势指标沪深300(20日)")
TRADE_DT = Column(String(8), doc="日期")
TRIX = Column(DECIMAL(20, 8), doc="TRIX三重指数平滑平均TRIX(12,20日)")
TRMA = Column(DECIMAL(20, 8), doc="TRIX三重指数平滑平均TRMA(12,20日)")
WEAKKNESS = Column(DECIMAL(20, 8), doc="阶段弱势指标沪深300(20日)")
class ASHAREINTENSITYTRENDADJ(Base):
__tablename__ = 'ASHAREINTENSITYTRENDADJ'
BBI = Column(DECIMAL(20, 8), doc="BBI多空指数(3,6,12,24日)")
BOTTOMING_B = Column(DECIMAL(20, 8), doc="筑底指标B(125,5,20日)")
BOTTOMING_D = Column(DECIMAL(20, 8), doc="筑底指标D(125,5,20日)")
DDI = Column(DECIMAL(20, 8), doc="DDI方向标准离差指数DDI(13,30,5,10日)")
DDI_AD = Column(DECIMAL(20, 8), doc="DDI方向标准离差指数AD(13,30,5,10日)")
DDI_ADDI = Column(DECIMAL(20, 8), doc="DDI方向标准离差指数ADDI(13,30,5,10日)")
DMA_AMA = Column(DECIMAL(20, 8), doc="DMA平均线差AMA(10,50,10日)")
DMA_DDD = Column(DECIMAL(20, 8), doc="DMA平均线差DDD(10,50,10日)")
DMI_ADX = Column(DECIMAL(20, 8), doc="DMI趋向指标ADX(14,6日)")
DMI_ADXR = Column(DECIMAL(20, 8), doc="DMI趋向指标ADXR(14,6日)")
DMI_MDI = Column(DECIMAL(20, 8), doc="DMI趋向指标MDI(14,6日)")
DMI_PDI = Column(DECIMAL(20, 8), doc="DMI趋向指标PDI(14,6日)")
EXPMA = Column(DECIMAL(20, 8), doc="EXPMA指数平均数(12日)")
MA_10D = Column(DECIMAL(20, 8), doc="MA简单移动平均(10日)")
MA_120D = Column(DECIMAL(20, 8), doc="MA简单移动平均(120日)")
MA_20D = Column(DECIMAL(20, 8), doc="MA简单移动平均(20日)")
MA_250D = Column(DECIMAL(20, 8), doc="MA简单移动平均(250日)")
MA_30D = Column(DECIMAL(20, 8), doc="MA简单移动平均30日)")
MA_5D = Column(DECIMAL(20, 8), doc="MA简单移动平均(5日)")
MA_60D = Column(DECIMAL(20, 8), doc="MA简单移动平均(60日)")
MACD_DEA = Column(DECIMAL(20, 8), doc="MACD指数平滑异同平均DEA(26,12,9日)")
MACD_DIFF = Column(DECIMAL(20, 8), doc="MACD指数平滑异同平均DIFF(26,12,9日)")
MACD_MACD = Column(DECIMAL(20, 8), doc="MACD指数平滑异同平均MACD(26,12,9日)")
MARKET = Column(DECIMAL(20, 8), doc="大盘同步指标沪深300(7日)")
MTM = Column(DECIMAL(20, 8), doc="MTM动力指标MTM(6,6日)")
MTM_MTMMA = Column(DECIMAL(20, 8), doc="MTM动力指标MTMMA(6,6日)")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PRICEOSC = Column(DECIMAL(20, 8), doc="PRICEOSC价格振荡指标(26,12日)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SAR = Column(DECIMAL(20, 8), doc="SAR抛物转向(4,2,20日)")
STRENGTH = Column(DECIMAL(20, 8), doc="阶段强势指标沪深300(20日)")
TRADE_DT = Column(String(8), doc="日期")
TRIX = Column(DECIMAL(20, 8), doc="TRIX三重指数平滑平均TRIX(12,20日)")
TRMA = Column(DECIMAL(20, 8), doc="TRIX三重指数平滑平均TRMA(12,20日)")
WEAKKNESS = Column(DECIMAL(20, 8), doc="阶段弱势指标沪深300(20日)")
class ASHAREINVESTMENTPEVC(Base):
__tablename__ = 'ASHAREINVESTMENTPEVC'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_HOLDER_DATE = Column(String(10), doc="投资时间")
S_HOLDER_PCT = Column(DECIMAL(20, 4), doc="股权比例")
S_INFO_COMP_NAME = Column(String(100), doc="投资公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="投资公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="投资公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREIPOPRICINGFORECAST(Base):
__tablename__ = 'ASHAREIPOPRICINGFORECAST'
ANALYST_ID = Column(String(100), doc="作者ID")
ANALYST_NAME = Column(String(40), doc="分析师名称")
COLLECT_TIME = Column(String(8), doc="[内部]公告日期")
EST_DT = Column(String(8), doc="预测日期")
EST_MEMO = Column(String(500), doc="预测摘要")
FIRST_OPTIME = Column(DateTime, doc="[内部]发布日期")
INQUIRY_PRICE_CEILING = Column(DECIMAL(20, 4), doc="询价建议区间上限")
INQUIRY_PRICE_FLOOR = Column(DECIMAL(20, 4), doc="询价建议区间下限")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PRICE_CEILING = Column(DECIMAL(20, 4), doc="定价区间上限")
PRICE_FLOOR = Column(DECIMAL(20, 4), doc="定价区间下限")
RESEARCH_INST_ID = Column(String(10), doc="机构ID")
RESEARCH_INST_ID2 = Column(String(40), doc="机构编码")
RESEARCH_INST_NAME = Column(String(20), doc="机构名称")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHARELONGLOAN(Base):
__tablename__ = 'ASHARELONGLOAN'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_INFO_AMOUNT = Column(DECIMAL(20, 4), doc="金额")
S_INFO_COMP_NAME = Column(String(100), doc="债务公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="债务公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="债务公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREMAJORHOLDERPLANHOLD(Base):
__tablename__ = 'ASHAREMAJORHOLDERPLANHOLD'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_HOLDER_NAME = Column(String(200), doc="股东名称")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_PH_CALCULATEDAYS = Column(DECIMAL(20, 4), doc="计算天数")
S_PH_CALCULATEPRICEMODE = Column(String(80), doc="价格计算方式")
S_PH_CONDITIONORNOT = Column(DECIMAL(5, 0), doc="是否无条件增持")
S_PH_CONTINUOUSDAYS = Column(DECIMAL(20, 4), doc="连续天数")
S_PH_ENDDATE = Column(String(8), doc="增持计划截止日期")
S_PH_INTENDPUTMONEYDOWNLIMIT = Column(DECIMAL(20, 4), doc="拟投入金额下限(亿元)")
S_PH_INTENDPUTMONEYUPLIMIT = Column(DECIMAL(20, 4), doc="拟投入金额上限(亿元)")
S_PH_PRICEUPLIMIT = Column(DECIMAL(20, 4), doc="增持价格上限")
S_PH_SHARENUMDOWNLIMIT = Column(DECIMAL(20, 4), doc="增持股数下限(万股)")
S_PH_SHARENUMUPLIMIT = Column(DECIMAL(20, 4), doc="增持股数上限(万股)")
S_PH_STARTDATE = Column(String(8), doc="增持计划起始日期")
S_PH_TRIGGERPRICE = Column(DECIMAL(20, 4), doc="增持触发价格")
class ASHAREMAJORHOLDERPLANHOLDZL(Base):
__tablename__ = 'ASHAREMAJORHOLDERPLANHOLDZL'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_HOLDER_NAME = Column(String(200), doc="股东名称")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_PH_CALCULATEDAYS = Column(DECIMAL(20, 4), doc="计算天数")
S_PH_CALCULATEPRICEMODE = Column(String(80), doc="价格计算方式")
S_PH_CONDITIONORNOT = Column(DECIMAL(5, 0), doc="是否无条件增持")
S_PH_CONTINUOUSDAYS = Column(DECIMAL(20, 4), doc="连续天数")
S_PH_ENDDATE = Column(String(8), doc="增持计划截止日期")
S_PH_INTENDPUTMONEYDOWNLIMIT = Column(DECIMAL(20, 4), doc="拟投入金额下限(亿元)")
S_PH_INTENDPUTMONEYUPLIMIT = Column(DECIMAL(20, 4), doc="拟投入金额上限(亿元)")
S_PH_PRICEUPLIMIT = Column(DECIMAL(20, 4), doc="增持价格上限")
S_PH_SHARENUMDOWNLIMIT = Column(DECIMAL(20, 4), doc="增持股数下限(万股)")
S_PH_SHARENUMUPLIMIT = Column(DECIMAL(20, 4), doc="增持股数上限(万股)")
S_PH_STARTDATE = Column(String(8), doc="增持计划起始日期")
S_PH_TRIGGERPRICE = Column(DECIMAL(20, 4), doc="增持触发价格")
class ASHAREMANAGEMENT(Base):
__tablename__ = 'ASHAREMANAGEMENT'
ANN_DATE = Column(String(8), doc="公告日期")
DISPLAY_ORDER = Column(DECIMAL(4, 0), doc="展示顺序")
MANID = Column(String(10), doc="人物ID")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_MANAGER_BIRTHYEAR = Column(String(8), doc="出生年份")
S_INFO_MANAGER_EDUCATION = Column(String(10), doc="学历")
S_INFO_MANAGER_GENDER = Column(String(10), doc="性别")
S_INFO_MANAGER_INTRODUCTION = Column(String(2000), doc="个人简历")
S_INFO_MANAGER_LEAVEDATE = Column(String(8), doc="离职日期")
S_INFO_MANAGER_NAME = Column(String(80), doc="姓名")
S_INFO_MANAGER_NATIONALITY = Column(String(40), doc="国籍")
S_INFO_MANAGER_POST = Column(String(40), doc="职务")
S_INFO_MANAGER_STARTDATE = Column(String(8), doc="任职日期")
S_INFO_MANAGER_TYPE = Column(DECIMAL(5, 0), doc="管理层类别")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHAREMANAGEMENTHOLDREWARD(Base):
__tablename__ = 'ASHAREMANAGEMENTHOLDREWARD'
ANN_DATE = Column(String(8), doc="公告日期")
CRNY_CODE = Column(String(10), doc="货币代码")
END_DATE = Column(String(8), doc="截止日期")
MANID = Column(String(10), doc="人物ID")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_MANAGER_NAME = Column(String(80), doc="姓名")
S_INFO_MANAGER_POST = Column(String(300), doc="职务")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_MANAGER_QUANTITY = Column(DECIMAL(20, 4), doc="持股数量")
S_MANAGER_RETURN = Column(DECIMAL(20, 4), doc="报酬")
S_MANAGER_RETURN_OTHER = Column(DECIMAL(5, 0), doc="是否在股东或关联单位领取报酬、津贴")
class ASHAREMARGINSUBJECT(Base):
__tablename__ = 'ASHAREMARGINSUBJECT'
OBJECT_ID = Column(String(100), primary_key=True)
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40))
S_MARGIN_CONVERSIONRATE = Column(DECIMAL(20, 4))
S_MARGIN_EFFECTDATE = Column(String(8))
S_MARGIN_ELIMINDATE = Column(String(8))
S_MARGIN_MARGINRATE = Column(DECIMAL(20, 4))
S_MARGIN_RATEEFFECTDATE = Column(String(8))
S_MARGIN_SHARETYPE = Column(DECIMAL(9, 0))
class ASHAREMARGINTRADE(Base):
__tablename__ = 'ASHAREMARGINTRADE'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_MARGIN_MARGINTRADEBALANCE = Column(DECIMAL(20, 4), doc="融资融券余额(元)")
S_MARGIN_PURCHWITHBORROWMONEY = Column(DECIMAL(20, 4), doc="融资买入额(元,股)")
S_MARGIN_REPAYMENTOFBORROWSEC = Column(DECIMAL(20, 4), doc="融券偿还量(股,份,手)")
S_MARGIN_REPAYMENTTOBROKER = Column(DECIMAL(20, 4), doc="融资偿还额(元,股)")
S_MARGIN_SALESOFBORROWEDSEC = Column(DECIMAL(20, 4), doc="融券卖出量(股,份,手)")
S_MARGIN_SECLENDINGBALANCE = Column(DECIMAL(20, 4), doc="融券余额(元)")
S_MARGIN_SECLENDINGBALANCEVOL = Column(DECIMAL(20, 4), doc="融券余量(股,份,手)")
S_MARGIN_TRADINGBALANCE = Column(DECIMAL(20, 4), doc="融资余额(元)")
S_REFIN_REPAY_VOL = Column(DECIMAL(20, 0), doc="转融券偿还量")
S_REFIN_SB_EOD_VOL = Column(DECIMAL(20, 0), doc="转融券融入日成交量")
S_REFIN_SB_VOL_14D = Column(DECIMAL(20, 0), doc="转融券融入数量(14天)")
S_REFIN_SB_VOL_3D = Column(DECIMAL(20, 0), doc="转融券融入数量(3天)")
S_REFIN_SB_VOL_7D = Column(DECIMAL(20, 0), doc="转融券融入数量(7天)")
S_REFIN_SL_EOD_VOL = Column(DECIMAL(20, 0), doc="转融券融出日成交量")
S_REFIN_SL_EOP_BAL = Column(DECIMAL(20, 4), doc="转融券期末余额")
S_REFIN_SL_EOP_VOL = Column(DECIMAL(20, 0), doc="转融券期末余量")
S_REFIN_SL_VOL_14D = Column(DECIMAL(20, 0), doc="转融券融出数量(14天)")
S_REFIN_SL_VOL_182D = Column(DECIMAL(20, 0), doc="转融券融出数量(182天)")
S_REFIN_SL_VOL_28D = Column(DECIMAL(20, 0), doc="转融券融出数量(28天)")
S_REFIN_SL_VOL_3D = Column(DECIMAL(20, 0), doc="转融券融出数量(3天)")
S_REFIN_SL_VOL_7D = Column(DECIMAL(20, 0), doc="转融券融出数量(7天)")
S_SB_VOL_182D = Column(DECIMAL(20, 0), doc="转融券融入数量(182天)")
S_SB_VOL_28D = Column(DECIMAL(20, 0), doc="转融券融入数量(28天)")
TRADE_DT = Column(String(8), doc="日期")
class ASHAREMARGINTRADESUM(Base):
__tablename__ = 'ASHAREMARGINTRADESUM'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_MARSUM_CIRCULATION_VALUE = Column(DECIMAL(20, 4), doc="A股流通市值(万元)")
S_MARSUM_EXCHMARKET = Column(String(40), doc="交易所英文简称")
S_MARSUM_FINANCING_MARGIN = Column(DECIMAL(20, 4), doc="融资余量股(份/手)")
S_MARSUM_FLOW_EQUITY = Column(DECIMAL(20, 4), doc="A股流通股本(万股)")
S_MARSUM_MARGIN_MARGIN = Column(DECIMAL(20, 4), doc="融券余量")
S_MARSUM_MARGINTRADEBALANCE = Column(DECIMAL(20, 4), doc="融资融券余额(元)")
S_MARSUM_PURCHWITHBORROWMONEY = Column(DECIMAL(20, 4), doc="融资买入额(元)")
S_MARSUM_REPAYMENTTOBROKER = Column(DECIMAL(20, 4), doc="融资偿还额(元)")
S_MARSUM_SALESOFBORROWEDSEC = Column(DECIMAL(20, 4), doc="融券卖出量(股,份,手)")
S_MARSUM_SECLENDINGBALANCE = Column(DECIMAL(20, 4), doc="融券余额(元)")
S_MARSUM_SECURITIES_REPAY = Column(DECIMAL(20, 4), doc="融券偿还额(元)")
S_MARSUM_SECURITIES_SALES = Column(DECIMAL(20, 4), doc="融券卖出额(元)")
S_MARSUM_TRADINGBALANCE = Column(DECIMAL(20, 4), doc="融资余额(元)")
S_MARSUM_TURNOVER_AMOUNT = Column(DECIMAL(20, 4), doc="A股成交金额(万元)")
TRADE_DT = Column(String(8), doc="日期")
class ASHAREMECHANISMOWNERSHIP(Base):
__tablename__ = 'ASHAREMECHANISMOWNERSHIP'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_HOLDER_ENDDATE = Column(String(10), doc="报告期")
S_HOLDER_PCT = Column(DECIMAL(20, 4), doc="持股比例")
S_INFO_COMP_NAME = Column(String(100), doc="股东公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="股东公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="股东公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREMERGERSUBJECT(Base):
__tablename__ = 'ASHAREMERGERSUBJECT'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMP_NAME = Column(String(100), doc="并购公司名称")
S_INFO_COMP_NAME1 = Column(String(100), doc="并购目标公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="并购公司中文简称")
S_INFO_COMP_SNAME1 = Column(String(40), doc="并购目标公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="并购公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
S_INFO_PROGRESS = Column(String(300), doc="方案进度")
S_MEETEVENT_ID = Column(String(20), doc="事件ID")
S_UPDATE_DATE = Column(String(8), doc="最新披露日期")
class ASHAREMJRHOLDERTRADE(Base):
__tablename__ = 'ASHAREMJRHOLDERTRADE'
ANN_DT = Column(String(8), doc="公告日期")
AVG_PRICE = Column(DECIMAL(20, 4), doc="平均价格")
BLOCKTRADE_QUANTITY = Column(DECIMAL(20, 4), doc="通过大宗交易系统的变动数量")
HOLDER_NAME = Column(String(200), doc="持有人")
HOLDER_QUANTITY_NEW = Column(DECIMAL(20, 4), doc="最新持有流通数量")
HOLDER_QUANTITY_NEW_RATIO = Column(DECIMAL(20, 4), doc="最新持有流通数量占流通量比例(%)")
HOLDER_TYPE = Column(String(1), doc="持有人类型")
IS_REANNOUNCED = Column(DECIMAL(1, 0), doc="是否重复披露")
IS_RESTRICTED = Column(DECIMAL(1, 0), doc="是否为减持限售股份")
NEW_HOLD_TOT = Column(DECIMAL(20, 4), doc="最新持股总数")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TRADE_DETAIL = Column(String(1000), doc="变动说明")
TRANSACT_ENDDATE = Column(String(8), doc="变动截至日期")
TRANSACT_QUANTITY = Column(DECIMAL(20, 4), doc="变动数量")
TRANSACT_QUANTITY_RATIO = Column(DECIMAL(20, 4), doc="变动数量占流通量比例(%)")
TRANSACT_STARTDATE = Column(String(8), doc="变动起始日期")
TRANSACT_TYPE = Column(String(4), doc="买卖方向")
WHETHER_AGREED_REPUR_TRANS = Column(DECIMAL(1, 0), doc="是否约定购回式交易")
class ASHAREPEVCINVESTMENT(Base):
__tablename__ = 'ASHAREPEVCINVESTMENT'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_HOLDER_DATE = Column(String(8), doc="融资时间")
S_HOLDER_PCT = Column(DECIMAL(12, 4), doc="股权比例")
S_INFO_COMP_NAME = Column(String(100), doc="股东公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="股东公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="股东公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREPLAINTIFF(Base):
__tablename__ = 'ASHAREPLAINTIFF'
ANN_DATE = Column(String(8), doc="公告日期")
LITIGATION_EVENTS_ID = Column(String(40), doc="诉讼事件ID")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_CASE_TYPE = Column(String(10), doc="案件类型")
S_INFO_COMP_NAME = Column(String(100), doc="诉讼公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="诉讼公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="诉讼公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHAREPLEDGEPROPORTION(Base):
__tablename__ = 'ASHAREPLEDGEPROPORTION'
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_ENDDATE = Column(String(8), doc="截止日期")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_PLEDGE_NUM = Column(DECIMAL(20, 0), doc="质押笔数")
S_PLEDGE_RATIO = Column(DECIMAL(20, 4), doc="质押比例")
S_SHARE_RESTRICTED_NUM = Column(DECIMAL(20, 4), doc="有限售股份质押数量")
S_SHARE_UNRESTRICTED_NUM = Column(DECIMAL(20, 4), doc="无限售股份质押数量")
S_TOT_SHR = Column(DECIMAL(20, 4), doc="A股总股本")
class ASHAREPLEDGETRADE(Base):
__tablename__ = 'ASHAREPLEDGETRADE'
INITIAL_NUM = Column(DECIMAL(20, 4), doc="初始交易数量")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPURCHASE_ALLOWANCE = Column(DECIMAL(20, 4), doc="待购回余量")
REPURCHASE_ALLOWANCE1 = Column(DECIMAL(20, 4), doc="待购回余量(无限售条件)")
REPURCHASE_ALLOWANCE2 = Column(DECIMAL(20, 4), doc="待购回余量(有限售条件)")
REPURCHASE_NUM = Column(DECIMAL(20, 4), doc="购回交易数量")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TRADE_DT = Column(String(8), doc="交易日期")
class ASHAREPREVIOUSENNAME(Base):
__tablename__ = 'ASHAREPREVIOUSENNAME'
CHANGE_DT = Column(String(8), doc="变动日期")
CUR_SIGN = Column(DECIMAL(1, 0), doc="是否最新")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_ENAME = Column(String(100), doc="英文简称")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SEC_ID = Column(String(10), doc="证券id")
class ASHAREPRODUCT(Base):
__tablename__ = 'ASHAREPRODUCT'
FREQUENCY_CODE = Column(DECIMAL(9, 0), doc="频率代码")
NUMBER_TYPECODE = Column(DECIMAL(9, 0), doc="数量类型代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_ENDDATE = Column(String(8), doc="截止日期")
S_INFO_COMP_NAME = Column(String(100), doc="公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
S_PRODUCT_NAME = Column(String(100), doc="产品名称")
S_PRODUCT_NUMBER = Column(DECIMAL(20, 4), doc="数量")
class ASHAREPROFITEXPRESS(Base):
__tablename__ = 'ASHAREPROFITEXPRESS'
ACTUAL_ANN_DT = Column(String(8), doc="[内部]实际公告日期")
ANN_DT = Column(String(8), doc="公告日期")
BRIEF_PERFORMANCE = Column(String(2000), doc="业绩简要说明")
EPS_DILUTED = Column(DECIMAL(20, 4), doc="每股收益-基本(元)")
GROWTH_BPS_SH = Column(DECIMAL(20, 4), doc="比年初增长率:归属于母公司股东的每股净资产")
LAST_YEAR_EPS_DILUTED = Column(DECIMAL(20, 4), doc="去年同期每股收益")
LAST_YEAR_NET_PROFIT_EXCL_INC = Column(DECIMAL(20, 4), doc="去年同期净利润")
LAST_YEAR_OPER_PROFIT = Column(DECIMAL(20, 4), doc="去年同期营业利润")
LAST_YEAR_OPER_REV = Column(DECIMAL(20, 4), doc="去年同期营业收入")
LAST_YEAR_TOT_PROFIT = Column(DECIMAL(20, 4), doc="去年同期利润总额")
MEMO = Column(String(400), doc="备注")
NET_PROFIT_EXCL_MIN_INT_INC = Column(DECIMAL(20, 4), doc="净利润(元)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPER_PROFIT = Column(DECIMAL(20, 4), doc="营业利润(元)")
OPER_REV = Column(DECIMAL(20, 4), doc="营业收入(元)")
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
ROE_DILUTED = Column(DECIMAL(20, 4), doc="净资产收益率-加权(%)")
S_EARLY_BPS = Column(DECIMAL(20, 4), doc="期初每股净资产")
S_EARLY_NET_ASSETS = Column(DECIMAL(20, 4), doc="期初净资产")
S_FA_BPS = Column(DECIMAL(20, 4), doc="每股净资产")
S_FA_GROWTH_ASSETS = Column(DECIMAL(20, 4), doc="比年初增长率:总资产")
S_FA_ROE_YEARLY = Column(DECIMAL(20, 4), doc="同比增减:加权平均净资产收益率")
S_FA_YOYEBT = Column(DECIMAL(20, 4), doc="同比增长率:利润总额")
S_FA_YOYEPS_BASIC = Column(DECIMAL(20, 4), doc="同比增长率:基本每股收益")
S_FA_YOYEQUITY = Column(DECIMAL(20, 4), doc="比年初增长率:归属母公司的股东权益")
S_FA_YOYNETPROFIT_DEDUCTED = Column(DECIMAL(20, 4), doc="同比增长率:归属母公司股东的净利润")
S_FA_YOYOP = Column(DECIMAL(20, 4), doc="同比增长率:营业利润")
S_FA_YOYSALES = Column(DECIMAL(20, 4), doc="同比增长率:营业收入")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_ISAUDIT = Column(DECIMAL(5, 0), doc="是否审计")
TOT_ASSETS = Column(DECIMAL(20, 4), doc="总资产(元)")
TOT_PROFIT = Column(DECIMAL(20, 4), doc="利润总额(元)")
TOT_SHRHLDR_EQY_EXCL_MIN_INT = Column(DECIMAL(20, 4), doc="股东权益合计(不含少数股东权益)(元)")
YOYNET_PROFIT_EXCL_MIN_INT_INC = Column(DECIMAL(20, 4), doc="去年同期修正后净利润")
class ASHAREPROFITNOTICE(Base):
__tablename__ = 'ASHAREPROFITNOTICE'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMPCODE = Column(String(40), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_PROFITNOTICE_ABSTRACT = Column(String(200), doc="业绩预告摘要")
S_PROFITNOTICE_CHANGEMAX = Column(DECIMAL(20, 4), doc="预告净利润变动幅度上限(%)")
S_PROFITNOTICE_CHANGEMIN = Column(DECIMAL(20, 4), doc="预告净利润变动幅度下限(%)")
S_PROFITNOTICE_DATE = Column(String(8), doc="公告日期")
S_PROFITNOTICE_FIRSTANNDATE = Column(String(8), doc="首次公告日")
S_PROFITNOTICE_NET_PARENT_FIRM = Column(DECIMAL(20, 4), doc="上年同期归母净利润")
S_PROFITNOTICE_NETPROFITMAX = Column(DECIMAL(20, 4), doc="预告净利润上限(万元)")
S_PROFITNOTICE_NETPROFITMIN = Column(DECIMAL(20, 4), doc="预告净利润下限(万元)")
S_PROFITNOTICE_NUMBER = Column(DECIMAL(15, 4), doc="公布次数")
S_PROFITNOTICE_PERIOD = Column(String(8), doc="报告期")
S_PROFITNOTICE_REASON = Column(String(2000), doc="业绩变动原因")
S_PROFITNOTICE_SIGNCHANGE = Column(String(10), doc="是否变脸")
S_PROFITNOTICE_STYLE = Column(DECIMAL(9, 0), doc="业绩预告类型代码")
class ASHAREPROSECUTION(Base):
__tablename__ = 'ASHAREPROSECUTION'
ACCUSER = Column(String(3000), doc="原告方")
AMOUNT = Column(DECIMAL(20, 4), doc="涉案金额")
ANN_DT = Column(String(8), doc="公告日期")
APPELLANT = Column(String(1), doc="二审上诉方(是否原告)")
BRIEFRESULT = Column(String(100), doc="诉讼结果")
COURT = Column(String(200), doc="一审受理法院")
COURT2 = Column(String(200), doc="二审受理法院")
CRNCY_CODE = Column(String(10), doc="货币代码")
DEFENDANT = Column(String(3000), doc="被告方")
EXECUTION = Column(LONGTEXT, doc="执行情况")
INTRODUCTION = Column(LONGTEXT, doc="案件描述")
IS_APPEAL = Column(DECIMAL(5, 0), doc="是否上诉")
JUDGE_DT = Column(String(8), doc="判决日期")
JUDGE_DT2 = Column(String(8), doc="二审判决日期")
LITIGATION_EVENTS_ID = Column(String(40), doc="诉讼事件ID")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PRO_TYPE = Column(String(10), doc="诉讼类型")
PROSECUTE_DT = Column(String(8), doc="起诉日期")
RESULT = Column(LONGTEXT, doc="判决内容")
RESULT2 = Column(String(2000), doc="二审判决内容")
RESULTAMOUNT = Column(DECIMAL(20, 4), doc="判决金额")
S_INFO_COMPCODE = Column(String(40), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TITLE = Column(String(40), doc="案件名称")
class ASHARERECEIVABLES(Base):
__tablename__ = 'ASHARERECEIVABLES'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PERIOD = Column(String(50), doc="拖欠时间")
REPORT_PERIOD = Column(String(8), doc="报告期")
S_INFO_AMOUNT = Column(DECIMAL(20, 4), doc="金额")
S_INFO_COMP_NAME = Column(String(100), doc="下游公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="下游公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="下游公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
S_INFO_DISCLOSER = Column(String(100), doc="披露公司ID")
class ASHAREREGINV(Base):
__tablename__ = 'ASHAREREGINV'
COMP_ID = Column(String(10), doc="公司ID")
END_ANNDATE = Column(String(8), doc="结束公告日期")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SEC_ID = Column(String(10), doc="证券id")
STR_ANNDATE = Column(String(8), doc="开始公告日期")
STR_DATE = Column(String(8), doc="开始日期")
SUR_INSTITUTE = Column(String(100), doc="调查机构")
SUR_REASONS = Column(String(500), doc="调查原因")
class ASHARERELATEDPARTYDEBT(Base):
__tablename__ = 'ASHARERELATEDPARTYDEBT'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_INFO_AMOUNT = Column(DECIMAL(20, 4), doc="金额")
S_INFO_COMP_NAME = Column(String(100), doc="债权公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="债权公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="债权公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
class ASHARERIGHTISSUE(Base):
__tablename__ = 'ASHARERIGHTISSUE'
ANN_DT = Column(String(8), doc="最新公告日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_ALLOTMENT_OBJECT = Column(String(40), doc="配股对象")
S_CIRCULATED_SHARE_NUM = Column(DECIMAL(20, 4), doc="已流通股理论配股数量(万股)")
S_CIRCULATED_SHARE_NUM1 = Column(DECIMAL(20, 4), doc="已流通股实际配股数量(万股)")
S_EMPLOYEE_STOCK_SHARE_NUM = Column(DECIMAL(20, 4), doc="职工股理论配股数量(万股)")
S_EMPLOYEE_STOCK_SHARE_NUM1 = Column(DECIMAL(20, 4), doc="职工股实际配股数量(万股)")
S_EXPECTED_FUND_RAISING = Column(DECIMAL(20, 4), doc="预计募集资金(元)")
S_HOLDER_HELD_NUMBER = Column(DECIMAL(20, 4), doc="持股5%以上大股东持股数量(万股)")
S_HOLDER_SUBSCRIPTION_METHOD = Column(String(100), doc="持股5%以上大股东认购方式(万股)")
S_HOLDER_SUBSCRIPTION_NUMBER = Column(DECIMAL(20, 4), doc="持股5%以上大股东认购股数量(万股)")
S_HOLDER_SUBSCRIPTION_NUMBER1 = Column(DECIMAL(20, 4), doc="持股5%以上的大股东理论认购股数量(万股)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_LEGAL_PERSON_SHARE_NUM = Column(DECIMAL(20, 4), doc="法人股理论配股数量(万股)")
S_LEGAL_PERSON_SHARE_NUM1 = Column(DECIMAL(20, 4), doc="法人股实际配股数量(万股)")
S_LOWER_PRICE_LIMIT = Column(DECIMAL(20, 4), doc="配股预案价下限")
S_PRICE_CAP = Column(DECIMAL(20, 4), doc="配股预案价上限")
S_RATIO_DENOMINATOR = Column(DECIMAL(20, 4), doc="配股比例分母")
S_RATIO_MOLECULAR = Column(DECIMAL(20, 4), doc="配股比例分子")
S_RIGHTSISSUE_AMOUNT = Column(DECIMAL(20, 4), doc="配股计划数量(万股)")
S_RIGHTSISSUE_AMOUNTACT = Column(DECIMAL(20, 4), doc="配股实际数量(万股)")
S_RIGHTSISSUE_ANNCEDATE = Column(String(8), doc="配股实施公告日")
S_RIGHTSISSUE_APPROVEDDATE = Column(String(8), doc="证监会核准公告日")
S_RIGHTSISSUE_CODE = Column(String(10), doc="配售代码")
S_RIGHTSISSUE_CONTENT = Column(String(150), doc="配股说明")
S_RIGHTSISSUE_COST = Column(DECIMAL(20, 4), doc="配售费用")
S_RIGHTSISSUE_EXDIVIDENDDATE = Column(String(8), doc="除权日")
S_RIGHTSISSUE_GUARANTOR = Column(String(8), doc="基准年度")
S_RIGHTSISSUE_GUARTYPE = Column(DECIMAL(20, 4), doc="基准股本(万股)")
S_RIGHTSISSUE_LISTANNDATE = Column(String(8), doc="上市公告日")
S_RIGHTSISSUE_LISTEDDATE = Column(String(8), doc="配股上市日")
S_RIGHTSISSUE_NAME = Column(String(40), doc="配股简称")
S_RIGHTSISSUE_NETCOLLECTION = Column(DECIMAL(20, 4), doc="募集资金(元)")
S_RIGHTSISSUE_PASSDATE = Column(String(8), doc="发审委通过公告日")
S_RIGHTSISSUE_PAYENDDATE = Column(String(8), doc="缴款终止日")
S_RIGHTSISSUE_PAYSTARTDATE = Column(String(8), doc="缴款起始日")
S_RIGHTSISSUE_PREPLANDATE = Column(String(8), doc="预案公告日")
S_RIGHTSISSUE_PRICE = Column(DECIMAL(20, 4), doc="配股价格(元)")
S_RIGHTSISSUE_PROGRESS = Column(String(10), doc="方案进度")
S_RIGHTSISSUE_RATIO = Column(DECIMAL(20, 4), doc="配股比例")
S_RIGHTSISSUE_REGDATE_BSHARE = Column(String(8), doc="B股股权登记日")
S_RIGHTSISSUE_REGDATESHAREB = Column(String(8), doc="股权登记日")
S_RIGHTSISSUE_RESULTDATE = Column(String(8), doc="配股结果公告日")
S_RIGHTSISSUE_SMTGANNCEDATE = Column(String(8), doc="股东大会公告日")
S_RIGHTSISSUE_YEAR = Column(String(8), doc="配股年度")
S_STATE_OWNED_SHARE_NUM = Column(DECIMAL(20, 4), doc="国有股理论配股数量(万股)")
S_STATE_OWNED_SHARE_NUM1 = Column(DECIMAL(20, 4), doc="国有股实际配股数量(万股)")
S_SUBSCRIPTION_METHOD = Column(String(30), doc="认购方式")
S_TRANSFER_SHARE_NUM = Column(DECIMAL(20, 4), doc="转配股理论配股数量(万股)")
S_TRANSFER_SHARE_NUM1 = Column(DECIMAL(20, 4), doc="转配股实际配股数量(万股)")
S_UNDERWRITER_SUBSCRIPTION = Column(DECIMAL(20, 4), doc="承销商余额认购数量(万股)")
S_UNDERWRITING_METHOD = Column(String(20), doc="承销方式")
class ASHARESELLSUBJECT(Base):
__tablename__ = 'ASHARESELLSUBJECT'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMP_NAME = Column(String(100), doc="并购公司名称")
S_INFO_COMP_NAME1 = Column(String(100), doc="并购出售目标公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="并购公司中文简称")
S_INFO_COMP_SNAME1 = Column(String(40), doc="并购出售目标公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="并购出售目标公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
S_INFO_PROGRESS = Column(String(300), doc="方案进度")
S_MEETEVENT_ID = Column(String(20), doc="事件ID")
S_UPDATE_DATE = Column(String(8), doc="最新披露日期")
class ASHAREST(Base):
__tablename__ = 'ASHAREST'
ANN_DT = Column(String(8), doc="公告日期")
ENTRY_DT = Column(String(8), doc="实施日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REASON = Column(String(100), doc="实施原因")
REMOVE_DT = Column(String(8), doc="撤销日期")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_TYPE_ST = Column(String(8), doc="特别处理类型")
class ASHARESTAFF(Base):
__tablename__ = 'ASHARESTAFF'
ANN_DT = Column(String(8), doc="公告日期")
END_DT = Column(String(8), doc="截止日期")
MEMO = Column(String(1000), doc="特殊情况说明")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMPCODE = Column(String(40), doc="公司id")
S_INFO_TOTALEMPLOYEES = Column(DECIMAL(20, 4), doc="员工人数(人)")
S_INFO_TOTALEMPLOYEES2 = Column(DECIMAL(20, 0), doc="母公司员工人数(人)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHARESTAFFSTRUCTURE(Base):
__tablename__ = 'ASHARESTAFFSTRUCTURE'
ANN_DT = Column(String(8), doc="公告日期")
END_DT = Column(String(8), doc="截止日期")
ITEM_CODE = Column(DECIMAL(9, 0), doc="项目代码")
ITEM_NAME = Column(String(100), doc="项目")
ITEM_TYPE_CODE = Column(DECIMAL(9, 0), doc="项目分类代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PROPORTION = Column(DECIMAL(20, 4), doc="所占比例")
REPORT_TYPE_CODE = Column(DECIMAL(9, 0), doc="报告类型代码")
S_INFO_COMPCODE = Column(String(40), doc="交易代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
STAFF_NUMBER = Column(DECIMAL(20, 0), doc="人数")
STAFF_TYPE_CODE = Column(DECIMAL(9, 0), doc="人数类别代码")
class ASHARESTIBHOLDERVOTE(Base):
__tablename__ = 'ASHARESTIBHOLDERVOTE'
ANN_DATE = Column(String(8), doc="公告日期")
DEADLINE = Column(String(8), doc="截止日期")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_HOLDER_CODE = Column(String(10), doc="股东ID")
S_HOLDER_HOLDERCATEGORY = Column(String(1), doc="股东类型")
S_HOLDER_LSTTYPECODE = Column(DECIMAL(9, 0), doc="股份类型")
S_HOLDER_NAME = Column(String(100), doc="股东名称")
S_HOLDER_QUANTITY = Column(DECIMAL(20, 4), doc="持股数量(股)")
S_HOLDER_VOTING_NUMBER = Column(DECIMAL(20, 4), doc="表决权数量(票)")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
class ASHARESTOCKRATING(Base):
__tablename__ = 'ASHARESTOCKRATING'
ANN_DT = Column(String(8), doc="公告日期(内部)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_EST_ESTNEWTIME_INST = Column(String(8), doc="评级日期")
S_EST_HIGHPRICE_INST = Column(DECIMAL(20, 4), doc="本次最高目标价")
S_EST_INSTITUTE = Column(String(100), doc="研究机构名称")
S_EST_LOWPRICE_INST = Column(DECIMAL(20, 4), doc="本次最低目标价")
S_EST_PREHIGHPRICE_INST = Column(DECIMAL(20, 4), doc="前次最高目标价")
S_EST_PRELOWPRICE_INST = Column(DECIMAL(20, 4), doc="前次最低目标价")
S_EST_PRERATING_INST = Column(String(20), doc="前次评级")
S_EST_PRESCORERATING_INST = Column(DECIMAL(20, 4), doc="前次标准评级")
S_EST_RATING_INST = Column(String(20), doc="本次评级")
S_EST_RATINGANALYST = Column(String(100), doc="分析师名称")
S_EST_RATINGANALYSTID = Column(String(200), doc="分析师id")
S_EST_REPORT_TITLE = Column(String(400), doc="报告标题")
S_EST_REPORT_TYPE = Column(DECIMAL(9, 0), doc="报告类别")
S_EST_SCORERATING_INST = Column(DECIMAL(20, 4), doc="本次标准评级")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_RATING_CHANGE = Column(DECIMAL(9, 0), doc="评级变动方向")
S_RATING_VALIDENDDT = Column(String(8), doc="评级有效截止日")
class ASHARESUPERVISOR(Base):
__tablename__ = 'ASHARESUPERVISOR'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
S_INFO_MANAGER_NAME = Column(String(80), doc="姓名")
S_INFO_MANAGER_POST = Column(String(40), doc="职务")
S_INFO_MANAGER_STARTDATE = Column(String(8), doc="任职日期")
S_INFO_MANID = Column(String(10), doc="人物id")
class ASHARESUPPLIER(Base):
__tablename__ = 'ASHARESUPPLIER'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_INFO_AMOUNT = Column(DECIMAL(20, 4), doc="金额")
S_INFO_COMP_NAME = Column(String(100), doc="上游公司名称")
S_INFO_COMP_SNAME = Column(String(40), doc="上游公司中文简称")
S_INFO_COMPCODE = Column(String(100), doc="公司ID")
S_INFO_COMPCODE1 = Column(String(100), doc="上游公司ID")
S_INFO_DIMENSION = Column(String(100), doc="维度")
S_INFO_DIMENSION1 = Column(String(100), doc="子维度")
S_INFO_DISCLOSER = Column(String(100), doc="披露公司ID")
class ASHARETRADINGSUSPENSION(Base):
__tablename__ = 'ASHARETRADINGSUSPENSION'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_DQ_CHANGEREASON = Column(String(400), doc="停牌原因")
S_DQ_CHANGEREASONTYPE = Column(DECIMAL(9, 0), doc="停牌原因代码")
S_DQ_RESUMPDATE = Column(String(8), doc="复牌日期")
S_DQ_SUSPENDDATE = Column(String(8), doc="停牌日期")
S_DQ_SUSPENDTYPE = Column(DECIMAL(9, 0), doc="停牌类型代码")
S_DQ_TIME = Column(String(200), doc="停复牌时间")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class ASHARETYPECODE(Base):
__tablename__ = 'ASHARETYPECODE'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CLASSIFICATION = Column(String(100), doc="分类")
S_ORIGIN_TYPCODE = Column(String(40), doc="原始类型代码")
S_TYPCODE = Column(String(40), doc="类型代码")
S_TYPNAME = Column(String(300), doc="类型名称")
class CFUNDBANKACCOUNT(Base):
__tablename__ = 'CFUNDBANKACCOUNT'
ACCOUNT_NAME = Column(String(100), doc="账户名称")
BANK_ACCOUNT = Column(String(50), doc="银行账号")
BANK_NUMBER = Column(String(30), doc="开户行编号")
COMP_ID = Column(String(10), doc="公司ID")
COMP_NAME = Column(String(100), doc="公司名称")
EXCHANGE_NUMBER = Column(String(30), doc="交换行号")
IS_EFFECTIVE = Column(DECIMAL(1, 0), doc="是否有效")
LINE_NUMBER = Column(String(30), doc="联行行号")
LINE_PAYMENT_SYSTEM = Column(String(30), doc="人行支付系统行号")
NAME_BANK_ACCOUNT = Column(String(100), doc="开户银行名称")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
UPDATE1 = Column(String(8), doc="更新日期")
class CFUNDCHANGEWINDCODE(Base):
__tablename__ = 'CFUNDCHANGEWINDCODE'
CHANGE_DATE = Column(String(10), doc="代码变更日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_NEWWINDCODE = Column(String(40), doc="变更后代码")
S_INFO_OLDWINDCODE = Column(String(40), doc="变更前代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CFUNDCODEANDSNAME(Base):
__tablename__ = 'CFUNDCODEANDSNAME'
IS_COMMON = Column(DECIMAL(1, 0), doc="是否通用代码")
MEMO = Column(String(800), doc="业务说明")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CODE = Column(String(40), doc="业务代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_SNAME = Column(String(100), doc="业务简称")
SEC_ID = Column(String(40), doc="品种ID")
TYPE_CODE = Column(DECIMAL(9, 0), doc="业务代码类型")
class CFUNDCOMPANYPREVIOUSNAME(Base):
__tablename__ = 'CFUNDCOMPANYPREVIOUSNAME'
ANN_DT = Column(String(8), doc="公告日期")
CHANGE_DT = Column(String(8), doc="变动日期")
CHANGE_REASON = Column(String(100), doc="更名原因")
COMP_NAME = Column(String(200), doc="公司名称")
COMP_NAME_ENG = Column(String(200), doc="公司英文名称")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
class CFUNDFACTIONALSTYLE(Base):
__tablename__ = 'CFUNDFACTIONALSTYLE'
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
ENTRY_DT = Column(String(8), doc="纳入日期")
MEMO = Column(String(500), doc="[内部]备注")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REMOVE_DT = Column(String(8), doc="剔除日期")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
SEC_IND_CODE = Column(String(20), doc="所属板块代码")
class CFUNDHOLDRESTRICTEDCIRCULATION(Base):
__tablename__ = 'CFUNDHOLDRESTRICTEDCIRCULATION'
BOOK_VALUE_NET_WORTH = Column(DECIMAL(20, 4), doc="账面价值占净值比例(%)")
CIRCULATION_DATE = Column(String(8), doc="可流通日期")
COST_TO_NET_WORTH = Column(DECIMAL(20, 4), doc="成本占净值比例(%)")
ENDDATE = Column(String(8), doc="截止日期")
F_INFO_RESTRICTEDCODE = Column(String(20))
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
INVESTMENT_AMOUNT = Column(DECIMAL(20, 4), doc="投资金额(成本)(元)")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PLACING_DATE = Column(String(8), doc="配售日期")
PLACING_METHOD = Column(String(40), doc="配售方式")
PLACING_QUANTITY = Column(DECIMAL(20, 4), doc="配售(持有)数量(股/张)")
RESTRICTED_TYPE = Column(String(20), doc="流通受限类型")
YEAR_END_VALUATION = Column(DECIMAL(20, 4), doc="年末估值/市价(账面价值)(元)")
class CFUNDINDEXMEMBERS(Base):
__tablename__ = 'CFUNDINDEXMEMBERS'
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_INDATE = Column(String(8), doc="纳入日期")
S_CON_OUTDATE = Column(String(8), doc="剔除日期")
S_CON_WINDCODE = Column(String(40), doc="成份股Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="指数Wind代码")
class CFUNDINDEXTABLE(Base):
__tablename__ = 'CFUNDINDEXTABLE'
F_CON_WINDCODE = Column(String(40), doc="指数Wind代码")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
F_TRACKDEV = Column(DECIMAL(20, 4), doc="日均跟踪偏离度阀值")
F_TRACKINGERROR = Column(DECIMAL(20, 4), doc="年化跟踪误差阀值")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
class CFUNDINDUSTRIESCODE(Base):
__tablename__ = 'CFUNDINDUSTRIESCODE'
CHINESEDEFINITION = Column(String(600), doc="板块中文定义")
INDUSTRIESALIAS = Column(String(12), doc="板块别名")
INDUSTRIESCODE = Column(String(38), doc="板块代码")
INDUSTRIESNAME = Column(String(50), doc="板块名称")
LEVELNUM = Column(DECIMAL(1, 0), doc="级数")
MEMO = Column(String(100), doc="[内部]备注")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
SEQUENCE1 = Column(DECIMAL(4, 0), doc="展示序号")
USED = Column(DECIMAL(1, 0), doc="是否使用")
class CFUNDINTRODUCTION(Base):
__tablename__ = 'CFUNDINTRODUCTION'
ADDRESS = Column(String(200), doc="注册地址")
BRIEFING = Column(String(2000), doc="公司简介")
BUSINESSSCOPE = Column(String(2000), doc="经营范围")
CHAIRMAN = Column(String(100), doc="法人代表")
CITY = Column(String(50), doc="城市")
COMP_ID = Column(String(40), doc="公司ID")
COMP_NAME = Column(String(100), doc="公司名称")
COMP_NAME_ENG = Column(String(100), doc="英文名称")
COMP_PROPERTY = Column(String(100), doc="企业性质")
COMP_SNAME = Column(String(40), doc="公司中文简称")
COMP_SNAMEENG = Column(String(100), doc="英文名称缩写")
COMP_TYPE = Column(String(100), doc="公司类型")
COMPANY_TYPE = Column(String(10), doc="公司类别")
COUNTRY = Column(String(20), doc="国籍")
CURRENCYCODE = Column(String(10), doc="货币代码")
DISCLOSER = Column(String(500), doc="信息披露人")
EMAIL = Column(String(80), doc="电子邮件")
ENDDATE = Column(String(8), doc="公司终止日期")
FAX = Column(String(50), doc="传真")
FOUNDDATE = Column(String(8), doc="成立日期")
MAIN_BUSINESS = Column(String(1000), doc="主要产品及业务")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OFFICE = Column(String(200), doc="办公地址")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PHONE = Column(String(50), doc="电话")
PRESIDENT = Column(String(100), doc="总经理")
PROVINCE = Column(String(20), doc="省份")
REGCAPITAL = Column(DECIMAL(20, 4), doc="注册资本")
REGISTERNUMBER = Column(String(20), doc="统一社会信用代码")
S_INFO_ORG_CODE = Column(String(30), doc="组织机构代码")
S_INFO_TOTALEMPLOYEES = Column(DECIMAL(20, 0), doc="员工总数(人)")
SOCIAL_CREDIT_CODE = Column(String(30), doc="统一社会信用编码")
WEBSITE = Column(String(80), doc="公司网址")
ZIPCODE = Column(String(10), doc="邮编")
class CFUNDMANAGEMENT(Base):
__tablename__ = 'CFUNDMANAGEMENT'
ANN_DATE = Column(String(8), doc="公告日期")
MANID = Column(String(10), doc="人物ID")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMPCODE = Column(String(40), doc="公司id")
S_INFO_MANAGER_BIRTHYEAR = Column(String(8), doc="出生日期")
S_INFO_MANAGER_EDUCATION = Column(String(10), doc="学历")
S_INFO_MANAGER_GENDER = Column(String(10), doc="性别代码")
S_INFO_MANAGER_INTRODUCTION = Column(String(2000), doc="个人简历")
S_INFO_MANAGER_LEAVEDATE = Column(String(8), doc="离职日期")
S_INFO_MANAGER_NAME = Column(String(80), doc="姓名")
S_INFO_MANAGER_NATIONALITY = Column(String(40), doc="国籍")
S_INFO_MANAGER_POST = Column(String(40), doc="公布职务名称")
S_INFO_MANAGER_STARTDATE = Column(String(8), doc="任职日期")
S_INFO_MANAGER_TYPE = Column(String(20), doc="管理层类别")
class CFUNDPCHREDM(Base):
__tablename__ = 'CFUNDPCHREDM'
F_INFO_APPROVED_DATE = Column(String(8), doc="获批日期")
F_INFO_FUNDMANAGEMENTCOMP = Column(DECIMAL(1, 0), doc="是否基金主体")
F_INFO_INVSHARE = Column(DECIMAL(20, 4), doc="合同生效时管理人员工持有份额")
F_INFO_INVTOTRTO = Column(DECIMAL(24, 6), doc="合同生效时管理人员工持有比例(%)")
F_INFO_ISSUE_OBJECT = Column(DECIMAL(9, 0), doc="发行对象代码")
F_INFO_ISSUEDATE = Column(String(8), doc="发行公告日")
F_INFO_ISSUETYPE = Column(DECIMAL(9, 0), doc="发行方式代码")
F_INFO_SETUPDATE = Column(String(8), doc="成立公告日")
F_INFO_SUB_MODE = Column(DECIMAL(9, 0), doc="投资者认购方式代码")
F_INFO_SUSPCHDAY = Column(DECIMAL(20, 4), doc="申购确认天数")
F_INFO_SUSPCHDAY1 = Column(DECIMAL(20, 4), doc="申购确认查询天数")
F_INFO_SUSREDMDAY1 = Column(DECIMAL(20, 4), doc="赎回交收天数")
F_INFO_SUSREDMDAY2 = Column(DECIMAL(20, 4), doc="赎回确认天数")
F_INFO_SUSREDMDAY3 = Column(DECIMAL(20, 4), doc="实际赎回交收天数")
F_INFO_SUSREDMDAY4 = Column(DECIMAL(20, 4), doc="赎回确认查询天数")
F_INFO_TDSETMTYPECODE = Column(DECIMAL(9, 0), doc="交易结算模式代码")
F_INFO_TRADE = Column(DECIMAL(1, 0), doc="是否交易")
F_INFO_TYPECODE = Column(DECIMAL(9, 0), doc="产品异常状态代码")
F_INFO_VALMETCODE = Column(DECIMAL(9, 0), doc="估值方法代码")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
class CFUNDPORTFOLIOCHANGES(Base):
__tablename__ = 'CFUNDPORTFOLIOCHANGES'
ACCUMULATED_AMOUNT = Column(DECIMAL(20, 4), doc="累计金额")
ANN_DT = Column(String(8), doc="公告日期")
BEGIN_NET_ASSET_RATIO = Column(DECIMAL(20, 4), doc="占期初基金资产净值比例")
CHANGE_TYPE = Column(String(10), doc="变动类型")
F_INFO_WINDCODE = Column(String(40), doc="基金代码")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REPORT_PERIOD = Column(String(8), doc="报告期")
S_INFO_WINDCODE = Column(String(40), doc="股票代码")
class CFUNDPREVIOUSNAME(Base):
__tablename__ = 'CFUNDPREVIOUSNAME'
ANN_DT = Column(String(8), doc="公告日期")
BEGINDATE = Column(String(8), doc="起始日期")
CHANGEREASON = Column(DECIMAL(9, 0), doc="变动原因代码")
ENDDATE = Column(String(8), doc="截至日期")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_NAME = Column(String(40), doc="证券简称")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CFUNDRALATEDSECURITIESCODE(Base):
__tablename__ = 'CFUNDRALATEDSECURITIESCODE'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_EFFECTIVE_DT = Column(String(8), doc="生效日期")
S_INFO_INVALID_DT = Column(String(8), doc="失效日期")
S_INFO_RALATEDCODE = Column(String(40), doc="关联证券Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_RELATION_TYPCODE = Column(DECIMAL(9, 0), doc="关系类型代码")
class CFUNDRATESENSITIVE(Base):
__tablename__ = 'CFUNDRATESENSITIVE'
CHANGE_AMOUNT = Column(DECIMAL(20, 4), doc="基金资产净值相对变动额")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PRICE_FLUNCUATION = Column(DECIMAL(20, 4), doc="价格变动")
REPORT_PERIOD = Column(String(8), doc="报告期")
TYPE_CODE = Column(DECIMAL(9, 0), doc="敏感分析价格类型代码")
class CFUNDSTYLECOEFFICIENT(Base):
__tablename__ = 'CFUNDSTYLECOEFFICIENT'
AVG_MARKET_VALUE = Column(DECIMAL(20, 4), doc="1年日均市值(万元)")
DATE_CLOSING_DATE = Column(String(8), doc="引用数据的截止日期")
GROSS_OPER_NETPROFIT = Column(DECIMAL(20, 4), doc="净利润增长率(%)")
GROSS_OPER_REV = Column(DECIMAL(20, 4), doc="营业收入增长率(%)")
GROWTH_Z = Column(DECIMAL(20, 4), doc="成长性Z分值")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CHANGE_DATE = Column(String(8), doc="变动日期")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
STYLE_COEFFICIENT = Column(DECIMAL(20, 4), doc="风格系数")
VALUE_COEFFICIENT = Column(DECIMAL(20, 4), doc="调整市值系数")
VALUE_Z = Column(DECIMAL(20, 4), doc="价值因子Z分值(ZVP)")
class CFUNDSTYLETHRESHOLD(Base):
__tablename__ = 'CFUNDSTYLETHRESHOLD'
DATE_CLOSING_DATE = Column(String(8), doc="引用数据的截止日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CHANGE_DATE = Column(String(8), doc="变动日期")
THRESHOLD_GROWTH_STOCK = Column(DECIMAL(20, 4), doc="成长型门限值")
THRESHOLD_LARGE_STOCK = Column(DECIMAL(20, 4), doc="大盘股门限值(万元)")
THRESHOLD_MID_STOCK = Column(DECIMAL(20, 4), doc="中盘股门限值(万元)")
THRESHOLD_VALUE_STOCK = Column(DECIMAL(20, 4), doc="价值型门限值")
class CFUNDTACODE(Base):
__tablename__ = 'CFUNDTACODE'
COMP_ID = Column(String(10), doc="品种ID")
COMP_TYPE_CODE = Column(DECIMAL(9, 0), doc="主体类别代码")
IS_COMMON = Column(DECIMAL(1, 0), doc="是否通用代码")
MEMO = Column(String(800), doc="业务说明")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CODE = Column(String(40), doc="业务代码")
S_SNAME = Column(String(100), doc="业务简称")
TYPE_CODE = Column(DECIMAL(9, 0), doc="业务代码类型")
class CFUNDTYPECODE(Base):
__tablename__ = 'CFUNDTYPECODE'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CLASSIFICATION = Column(String(100), doc="分类")
S_ORIGIN_TYPCODE = Column(DECIMAL(9, 0), doc="类型代码")
S_TYPCODE = Column(String(40), doc="类型代码")
S_TYPNAME = Column(String(300), doc="类型名称")
class CFUNDWINDCUSTOMCODE(Base):
__tablename__ = 'CFUNDWINDCUSTOMCODE'
CRNCY_CODE = Column(String(10), doc="交易币种")
CRNCY_NAME = Column(String(40), doc="交易币种")
EXCHMARKET = Column(String(40), doc="交易所英文简称")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_ASHARECODE = Column(String(10), doc="证券id")
S_INFO_CODE = Column(String(40), doc="交易代码")
S_INFO_COMPCODE = Column(String(10), doc="公司id")
S_INFO_COUNTRYCODE = Column(String(10), doc="国家及地区代码")
S_INFO_COUNTRYNAME = Column(String(100), doc="国家及地区代码")
S_INFO_ENAME = Column(String(200), doc="[内部]证券英文简称")
S_INFO_EXCHMARKET = Column(String(40), doc="交易所英文简称")
S_INFO_EXCHMARKETNAME = Column(String(40), doc="交易所名称(兼容)")
S_INFO_ISINCODE = Column(String(40), doc="[内部]ISIN代码")
S_INFO_LOT_SIZE = Column(DECIMAL(20, 4), doc="每手数量")
S_INFO_MIN_PRICE_CHG_UNIT = Column(DECIMAL(24, 8), doc="最小价格变动单位")
S_INFO_NAME = Column(String(50), doc="证券中文简称")
S_INFO_ORG_CODE = Column(String(20), doc="组织机构代码")
S_INFO_SECTYPENAME = Column(String(40), doc="品种类型(兼容)")
S_INFO_SECURITIESTYPES = Column(String(10), doc="品种类型(兼容)")
S_INFO_TYPECODE = Column(DECIMAL(9, 0), doc="[内部]产品用分类代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SECURITY_STATUS = Column(DECIMAL(9, 0), doc="存续状态")
class CFUNDWINDINDEXCOMPONENT(Base):
__tablename__ = 'CFUNDWINDINDEXCOMPONENT'
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_CODE = Column(String(16), doc="成份板块代码")
S_CON_NAME = Column(String(100), doc="成份板块名称")
S_INFO_WINDCODE = Column(String(40), doc="指数Wind代码")
class CFUNDWINDINDEXMEMBERS(Base):
__tablename__ = 'CFUNDWINDINDEXMEMBERS'
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CON_CODE = Column(String(16), doc="板块代码")
S_CON_NAME = Column(String(100), doc="板块名称")
S_INFO_WINDCODE = Column(String(40), doc="成份万得代码")
class CHANGEWINDCODE(Base):
__tablename__ = 'CHANGEWINDCODE'
CHANGE_DATE = Column(String(8), doc="Wind代码变更日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_CHANGE_REASON = Column(DECIMAL(9, 0), doc="变更原因代码")
S_INFO_NEWWINDCODE = Column(String(40), doc="变更后Wind代码")
S_INFO_OLDWINDCODE = Column(String(40), doc="变更前Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CHINACLOSEDFUNDEODPRICE(Base):
__tablename__ = 'CHINACLOSEDFUNDEODPRICE'
CRNCY_CODE = Column(String(10), doc="货币代码")
DISCOUNT_RATE = Column(DECIMAL(20, 6), doc="贴水率(%)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_DQ_ADJCLOSE = Column(DECIMAL(20, 4), doc="复权收盘价(元)")
S_DQ_ADJFACTOR = Column(DECIMAL(20, 6), doc="复权因子")
S_DQ_ADJHIGH = Column(DECIMAL(20, 4), doc="复权最高价(元)")
S_DQ_ADJLOW = Column(DECIMAL(20, 4), doc="复权最低价(元)")
S_DQ_ADJOPEN = Column(DECIMAL(20, 4), doc="复权开盘价(元)")
S_DQ_ADJPRECLOSE = Column(DECIMAL(20, 4), doc="复权昨收盘价(元)")
S_DQ_AMOUNT = Column(DECIMAL(20, 4), doc="成交金额(千元)")
S_DQ_AVGPRICE = Column(DECIMAL(20, 4), doc="均价(VWAP)")
S_DQ_CHANGE = Column(DECIMAL(20, 4), doc="涨跌(元)")
S_DQ_CLOSE = Column(DECIMAL(20, 4), doc="收盘价(元)")
S_DQ_HIGH = Column(DECIMAL(20, 4), doc="最高价(元)")
S_DQ_LOW = Column(DECIMAL(20, 4), doc="最低价(元)")
S_DQ_OPEN = Column(DECIMAL(20, 4), doc="开盘价(元)")
S_DQ_PCTCHANGE = Column(DECIMAL(20, 4), doc="涨跌幅(%)")
S_DQ_PRECLOSE = Column(DECIMAL(20, 4), doc="昨收盘价(元)")
S_DQ_VOLUME = Column(DECIMAL(20, 4), doc="成交量(手)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TRADE_DT = Column(String(8), doc="交易日期")
TRADES_COUNT = Column(DECIMAL(20, 4), doc="成交笔数")
class CHINAFEEDERFUND(Base):
__tablename__ = 'CHINAFEEDERFUND'
F_INFO_FEEDER_WINDCODE = Column(String(40), doc="联接基金Wind代码")
F_INFO_WINDCODE = Column(String(40), doc="被联接基金Wind代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="被联接基金指数Wind代码")
class CHINAGRADINGFUND(Base):
__tablename__ = 'CHINAGRADINGFUND'
F_INFO_FEEDER_SHARERATIO = Column(DECIMAL(20, 4), doc="子基金份额占比")
F_INFO_FEEDER_TYPECODE = Column(DECIMAL(9, 0), doc="子基金类型代码")
F_INFO_FEEDER_WINDCODE = Column(String(40), doc="子基金Wind代码")
F_INFO_PERIOD_IFDIV = Column(DECIMAL(1, 0), doc="运作期内是否分红")
F_INFO_PREFER_FORMULA = Column(String(200), doc="优先份额约定年收益表达式")
F_INFO_PREFER_IFADD = Column(DECIMAL(1, 0), doc="优先份额约定收益是否得到累计")
F_INFO_PREFER_IFDIS = Column(DECIMAL(1, 0), doc="优先份额是否参与超额收益分配")
F_INFO_TERM_IFTRANS = Column(DECIMAL(1, 0), doc="存续期内是否有份额配对转换")
F_INFO_TERM_TYPECODE = Column(DECIMAL(9, 0), doc="存续期类型代码")
F_INFO_TRANS_BGNDATE = Column(String(8), doc="份额配对转换起始日期")
F_INFO_TRANS_ENDDATE = Column(String(8), doc="份额配对转换截止日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="母基金Wind代码")
class CHINAMFMPERFORMANCE(Base):
__tablename__ = 'CHINAMFMPERFORMANCE'
ANNRETURNES = Column(DECIMAL(20, 8), doc="履任以来年化回报")
BESTTOTRETURN_6M = Column(DECIMAL(20, 8), doc="最高连续六月回报")
FMINDEX_POINT = Column(DECIMAL(20, 8), doc="基金经理指数点位")
FMINDEX_TYPE = Column(DECIMAL(9, 0), doc="基金经理指数类型")
FUNDMANAGER_ID = Column(String(10), doc="基金经理ID")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RANKING_10Y = Column(String(20), doc="最近10年同类排名")
RANKING_1M = Column(String(20), doc="最近1月同类排名")
RANKING_1W = Column(String(20), doc="最近1周同类排名")
RANKING_1Y = Column(String(20), doc="最近1年同类排名")
RANKING_2Y = Column(String(20), doc="最近2年同类排名")
RANKING_3M = Column(String(20), doc="最近3月同类排名")
RANKING_3Y = Column(String(20), doc="最近3年同类排名")
RANKING_5Y = Column(String(20), doc="最近5年同类排名")
RANKING_6M = Column(String(20), doc="最近6月同类排名")
RANKING_ES = Column(String(20), doc="履任以来同类排名")
RANKING_YTD = Column(String(20), doc="今年以来同类排名")
SUCBASERETURN_10Y = Column(DECIMAL(20, 8), doc="最近10年超越基准回报")
SUCBASERETURN_1M = Column(DECIMAL(20, 8), doc="最近1月超越基准回报")
SUCBASERETURN_1W = Column(DECIMAL(20, 8), doc="最近1周超越基准回报")
SUCBASERETURN_1Y = Column(DECIMAL(20, 8), doc="最近1年超越基准回报")
SUCBASERETURN_2Y = Column(DECIMAL(20, 8), doc="最近2年超越基准回报")
SUCBASERETURN_3M = Column(DECIMAL(20, 8), doc="最近3月超越基准回报")
SUCBASERETURN_3Y = Column(DECIMAL(20, 8), doc="最近3年超越基准回报")
SUCBASERETURN_5Y = Column(DECIMAL(20, 8), doc="最近5年超越基准回报")
SUCBASERETURN_6M = Column(DECIMAL(20, 8), doc="最近6月超越基准回报")
SUCBASERETURN_ES = Column(DECIMAL(20, 8), doc="履任以来超越基准回报")
SUCBASERETURN_YTD = Column(DECIMAL(20, 8), doc="今年以来超越基准回报")
TOTRETURN_10Y = Column(DECIMAL(20, 8), doc="最近10年回报")
TOTRETURN_1M = Column(DECIMAL(20, 8), doc="最近1月回报")
TOTRETURN_1W = Column(DECIMAL(20, 8), doc="最近1周回报")
TOTRETURN_1Y = Column(DECIMAL(20, 8), doc="最近1年回报")
TOTRETURN_2Y = Column(DECIMAL(20, 8), doc="最近2年回报")
TOTRETURN_3M = Column(DECIMAL(20, 8), doc="最近3月回报")
TOTRETURN_3Y = Column(DECIMAL(20, 8), doc="最近3年回报")
TOTRETURN_5Y = Column(DECIMAL(20, 8), doc="最近5年回报")
TOTRETURN_6M = Column(DECIMAL(20, 8), doc="最近6月回报")
TOTRETURN_ES = Column(DECIMAL(20, 8), doc="履任以来回报")
TOTRETURN_YTD = Column(DECIMAL(20, 8), doc="今年以来回报")
TRADE_DATE = Column(String(8), doc="日期")
WORSTTOTRETURN_6M = Column(DECIMAL(20, 8), doc="最差连续六月回报")
class CHINAMFPERFORMANCE(Base):
__tablename__ = 'CHINAMFPERFORMANCE'
F_ALPHA_1Y = Column(DECIMAL(20, 8), doc="ALPHA(1年)")
F_ALPHA_2Y = Column(DECIMAL(20, 8), doc="ALPHA(2年)")
F_ALPHA_3Y = Column(DECIMAL(20, 8), doc="ALPHA(3年)")
F_ALPHA_6M = Column(DECIMAL(20, 8), doc="ALPHA(6月)")
F_ANNUALYEILD = Column(DECIMAL(20, 6), doc="年化收益率")
F_ANNUALYEILD_SINCEFOUND = Column(DECIMAL(20, 6), doc="成立以来年化收益率")
F_AVGRETURN_DAY = Column(DECIMAL(20, 6), doc="收益率(当天)")
F_AVGRETURN_FIVEYEAR = Column(DECIMAL(20, 6), doc="收益率(五年)")
F_AVGRETURN_FOURYEAR = Column(DECIMAL(20, 6), doc="收益率(四年)")
F_AVGRETURN_HALFYEAR = Column(DECIMAL(20, 6), doc="收益率(六个月)")
F_AVGRETURN_MONTH = Column(DECIMAL(20, 6), doc="收益率(一个月)")
F_AVGRETURN_QUARTER = Column(DECIMAL(20, 6), doc="收益率(三个月)")
F_AVGRETURN_SINCEFOUND = Column(DECIMAL(20, 6), doc="收益率(成立以来)")
F_AVGRETURN_SIXYEAR = Column(DECIMAL(20, 6), doc="收益率(六年)")
F_AVGRETURN_THISMONTH = Column(DECIMAL(20, 6), doc="收益率(本月以来)")
F_AVGRETURN_THISQUARTER = Column(DECIMAL(20, 6), doc="收益率(本季以来)")
F_AVGRETURN_THISWEEK = Column(DECIMAL(20, 6), doc="收益率(本周以来)")
F_AVGRETURN_THISYEAR = Column(DECIMAL(20, 6), doc="收益率(本年以来)")
F_AVGRETURN_THREEYEAR = Column(DECIMAL(20, 6), doc="收益率(三年)")
F_AVGRETURN_TWOYEA = Column(DECIMAL(20, 6), doc="收益率(两年)")
F_AVGRETURN_WEEK = Column(DECIMAL(20, 6), doc="收益率(一周)")
F_AVGRETURN_YEAR = Column(DECIMAL(20, 6), doc="收益率(一年)")
F_BETA_1Y = Column(DECIMAL(20, 8), doc="BETA(1年)")
F_BETA_2Y = Column(DECIMAL(20, 8), doc="BETA(2年)")
F_BETA_3Y = Column(DECIMAL(20, 8), doc="BETA(3年)")
F_BETA_6M = Column(DECIMAL(20, 8), doc="BETA(6月)")
F_FUNDTYPE = Column(String(50), doc="基金分类")
F_SFANNUALYEILD = Column(DECIMAL(20, 6), doc="同类基金年化收益率")
F_SFRANK_ANNUALYEILD = Column(DECIMAL(20, 0), doc="年化收益率同类排名")
F_SFRANK_ANNUALYEILDT = Column(String(50), doc="年化收益率同类排名")
F_SFRANK_DAY = Column(DECIMAL(20, 0), doc="当日同类收益率排名 ")
F_SFRANK_DAYT = Column(String(50), doc="当日同类收益率排名")
F_SFRANK_RECENTFIVEYEAR = Column(DECIMAL(20, 0), doc="最近五年同类排名")
F_SFRANK_RECENTFIVEYEART = Column(String(50), doc="最近五年同类排名")
F_SFRANK_RECENTHALFYEAR = Column(DECIMAL(20, 0), doc="最近六月同类排名")
F_SFRANK_RECENTHALFYEART = Column(String(50), doc="最近六月同类排名")
F_SFRANK_RECENTMONTH = Column(DECIMAL(20, 0), doc="最近一月同类排名")
F_SFRANK_RECENTMONTHT = Column(String(50), doc="最近一月同类排名")
F_SFRANK_RECENTQUARTER = Column(DECIMAL(20, 0), doc="最近三月同类排名")
F_SFRANK_RECENTQUARTERT = Column(String(50), doc="最近三月同类排名")
F_SFRANK_RECENTTHREEYEAR = Column(DECIMAL(20, 0), doc="最近三年同类排名")
F_SFRANK_RECENTTHREEYEART = Column(String(50), doc="最近三年同类排名")
F_SFRANK_RECENTTWOYEAR = Column(DECIMAL(20, 0), doc="最近两年同类排名")
F_SFRANK_RECENTTWOYEART = Column(String(50), doc="最近两年同类排名")
F_SFRANK_RECENTWEEK = Column(DECIMAL(20, 0), doc="最近一周同类排名")
F_SFRANK_RECENTWEEKT = Column(String(50), doc="最近一周同类排名")
F_SFRANK_RECENTYEAR = Column(DECIMAL(20, 0), doc="最近一年同类排名")
F_SFRANK_RECENTYEART = Column(String(50), doc="最近一年同类排名")
F_SFRANK_SINCEFOUND = Column(DECIMAL(20, 0), doc="成立以来同类排名(不建议使用)")
F_SFRANK_SINCEFOUNDT = Column(String(50), doc="成立以来同类排名")
F_SFRANK_THISYEAR = Column(DECIMAL(20, 0), doc="今年以来同类排名")
F_SFRANK_THISYEART = Column(String(50), doc="今年以来同类排名")
F_SFRETURN_DAY = Column(DECIMAL(20, 6), doc="当日同类收益率")
F_SFRETURN_RECENTFIVEYEAR = Column(DECIMAL(20, 6), doc="最近五年同类基金收益率")
F_SFRETURN_RECENTHALFYEAR = Column(DECIMAL(20, 6), doc="最近六月同类基金收益率")
F_SFRETURN_RECENTMONTH = Column(DECIMAL(20, 6), doc="最近一月同类基金收益率")
F_SFRETURN_RECENTQUARTER = Column(DECIMAL(20, 6), doc="最近三月同类基金收益率")
F_SFRETURN_RECENTTHREEYEAR = Column(DECIMAL(20, 6), doc="最近三年同类基金收益率")
F_SFRETURN_RECENTTWOYEAR = Column(DECIMAL(20, 6), doc="最近两年同类基金收益率")
F_SFRETURN_RECENTWEEK = Column(DECIMAL(20, 6), doc="最近一周同类基金收益率")
F_SFRETURN_RECENTYEAR = Column(DECIMAL(20, 6), doc="最近一年同类基金收益率")
F_SFRETURN_SINCEFOUND = Column(DECIMAL(20, 6), doc="成立以来同类基金收益率")
F_SFRETURN_THISYEAR = Column(DECIMAL(20, 6), doc="今年以来同类基金收益率")
F_SHARPRATIO_HALFYEAR = Column(DECIMAL(20, 6), doc="夏普比率(六个月)")
F_SHARPRATIO_THREEYEAR = Column(DECIMAL(20, 6), doc="夏普比率(三年)")
F_SHARPRATIO_TWOYEAR = Column(DECIMAL(20, 6), doc="夏普比率(两年)")
F_SHARPRATIO_YEAR = Column(DECIMAL(20, 6), doc="夏普比率(一年)")
F_STDARDDEV_FIVEYEAR = Column(DECIMAL(20, 6), doc="标准差(五年)")
F_STDARDDEV_HALFYEAR = Column(DECIMAL(20, 6), doc="标准差(六个月)")
F_STDARDDEV_SINCEFOUND = Column(DECIMAL(20, 6), doc="标准差(成立以来)")
F_STDARDDEV_THREEYEAR = Column(DECIMAL(20, 6), doc="标准差(三年)")
F_STDARDDEV_TWOYEAR = Column(DECIMAL(20, 6), doc="标准差(两年)")
F_STDARDDEV_YEAR = Column(DECIMAL(20, 6), doc="标准差(一年)")
F_TRACKDEV_THISDAY = Column(DECIMAL(20, 6), doc="当天跟踪偏离度")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TRADE_DT = Column(String(8), doc="交易日期")
class CHINAMUTUALFUNDASSETPORTFOLIO(Base):
__tablename__ = 'CHINAMUTUALFUNDASSETPORTFOLIO'
CRNCY_CODE = Column(String(10), doc="货币代码")
F_ANN_DATE = Column(String(8), doc="公告日期")
F_MMF_AVGPTM = Column(DECIMAL(20, 4), doc="投资组合平均剩余期限(天)")
F_MMF_REPO = Column(DECIMAL(20, 4), doc="卖出回购证券(元)")
F_MMF_REVERSEREPO = Column(DECIMAL(20, 4), doc="买入返售证券(元)")
F_PRT_ABSVALUE = Column(DECIMAL(20, 4), doc="持有资产支持证券市值(元)")
F_PRT_ABSVALUETONAV = Column(DECIMAL(20, 4), doc="持有资产支持证券占资产净值比例(%)")
F_PRT_BDTONAV_NOGOV = Column(DECIMAL(20, 4), doc="持有债券市值(不含国债)占资产净值比例(%)")
F_PRT_BDVALUE_NOGOV = Column(DECIMAL(20, 4), doc="持有债券市值(不含国债)(元)")
F_PRT_BONDTONAV = Column(DECIMAL(20, 4), doc="持有债券市值总计占资产净值比例(%)")
F_PRT_BONDTONAVCHANGE = Column(DECIMAL(20, 4), doc="持有债券比例较上期变化(%)")
F_PRT_BONDTOTOT = Column(DECIMAL(20, 4), doc="持有债券市值占资产总值比例(%)")
F_PRT_BONDVALUE = Column(DECIMAL(20, 4), doc="持有债券市值总计(元)")
F_PRT_CASH = Column(DECIMAL(20, 4), doc="持有现金(元)")
F_PRT_CASHTONAV = Column(DECIMAL(20, 4), doc="持有现金占资产净值比例(%)")
F_PRT_CASHTONAVCHANGE = Column(DECIMAL(20, 4), doc="持有现金比例较上期变化(%)")
F_PRT_CASHTOTOT = Column(DECIMAL(20, 4), doc="持有现金占资产总值比例(%)")
F_PRT_CDS = Column(DECIMAL(20, 4), doc="持有同业存单市值(元)")
F_PRT_CORPBOND = Column(DECIMAL(20, 4), doc="持有企债市值(元)")
F_PRT_CORPBONDTONAV = Column(DECIMAL(20, 4), doc="持有企债市值占资产净值比例(%)")
F_PRT_COVERTBOND = Column(DECIMAL(15, 2), doc="持有可转债市值(元)")
F_PRT_COVERTBONDTONAV = Column(DECIMAL(20, 4), doc="持有可转债市值占资产净值比例(%)")
F_PRT_CPVALUE = Column(DECIMAL(20, 4), doc="持有短期融资券市值(元)")
F_PRT_CTRBANKBILL = Column(DECIMAL(20, 4), doc="持有央行票据市值(元)")
F_PRT_CTRBANKBILLTONAV = Column(DECIMAL(20, 4), doc="持有央行票据市值占资产净值比例(%)")
F_PRT_DEBCREBALANCE = Column(DECIMAL(20, 4), doc="借贷方差额(元)")
F_PRT_ENDDATE = Column(String(8), doc="截止日期")
F_PRT_FINANBOND = Column(DECIMAL(20, 4), doc="持有金融债市值(元)")
F_PRT_FINANBONDTONAV = Column(DECIMAL(20, 4), doc="持有金融债市值占资产净值比例(%)")
F_PRT_FUNDTONAV = Column(DECIMAL(20, 4), doc="持有基金市值占资产净值比例(%)")
F_PRT_FUNDTOTOT = Column(DECIMAL(20, 4), doc="持有基金市值占资产总值比例(%)")
F_PRT_FUNDVALUE = Column(DECIMAL(20, 4), doc="持有基金市值(元)")
F_PRT_GOVBOND = Column(DECIMAL(20, 4), doc="持有国债市值(元)")
F_PRT_GOVBONDTONAV = Column(DECIMAL(20, 4), doc="持有国债市值占资产净值比例(%)")
F_PRT_GOVCASHTONAV = Column(DECIMAL(20, 4), doc="持有国债及现金占资产净值比例(%)")
F_PRT_GOVCASHVALUE = Column(DECIMAL(20, 4), doc="持有国债及现金总值(元)")
F_PRT_HKSTOCKTONAV = Column(DECIMAL(20, 4), doc="港股通投资港股市值占资产净值比")
F_PRT_HKSTOCKVALUE = Column(DECIMAL(20, 4), doc="港股通投资港股市值")
F_PRT_MMTONAV = Column(DECIMAL(20, 4), doc="持有货币市场工具市值占资产净值比例(%)")
F_PRT_MMTOTOT = Column(DECIMAL(20, 4), doc="持有货币市场工具市值占资产总值比例(%)")
F_PRT_MMVALUE = Column(DECIMAL(20, 4), doc="持有货币市场工具市值(元)")
F_PRT_MTNVALUE = Column(DECIMAL(20, 4), doc="持有中期票据市值(元)")
F_PRT_NETASSET = Column(DECIMAL(20, 4), doc="资产净值(元)")
F_PRT_OTHER = Column(DECIMAL(20, 4), doc="持有其他资产(元)")
F_PRT_OTHERTONAV = Column(DECIMAL(20, 4), doc="持有其他资产占资产净值比例(%)")
F_PRT_OTHERTOTOT = Column(DECIMAL(20, 4), doc="持有其他资产占资产总值比例(%)")
F_PRT_OTHERTOTOTCHANGE = Column(DECIMAL(20, 4), doc="持有其他资产比例较上期变化(%)")
F_PRT_PASVSTKTONAV = Column(DECIMAL(20, 4), doc="指数投资持有股票市值占资产净值比例(%)")
F_PRT_PASVSTKVALUE = Column(DECIMAL(15, 2), doc="指数投资持有股票市值(元)")
F_PRT_POLIFINANBDTONAV = Column(DECIMAL(20, 4), doc="持有政策性金融债市值占资产净值比例(%)")
F_PRT_POLIFINANBDVALUE = Column(DECIMAL(20, 4), doc="持有政策性金融债市值(元)")
F_PRT_POSVSTKTONAV = Column(DECIMAL(20, 4), doc="积极投资持有股票市值占资产净值比例(%)")
F_PRT_POSVSTKVALUE = Column(DECIMAL(20, 4), doc="积极投资持有股票市值(元)")
F_PRT_REVERSEREPOTONAV = Column(DECIMAL(20, 4), doc="持有买入返售证券占资产净值比例(%)")
F_PRT_REVERSEREPOTOTOT = Column(DECIMAL(20, 4), doc="持有买入返售证券占资产总值比例(%)")
F_PRT_STOCKTONAV = Column(DECIMAL(20, 4), doc="持有股票市值占资产净值比例(%)")
F_PRT_STOCKTONAVCHANGE = Column(DECIMAL(20, 4), doc="持有股票比例较上期变化(%)")
F_PRT_STOCKTOTOT = Column(DECIMAL(20, 4), doc="持有股票市值占资产总值比例(%)")
F_PRT_STOCKVALUE = Column(DECIMAL(20, 4), doc="持有股票市值(元)")
F_PRT_TOTALASSET = Column(DECIMAL(20, 4), doc="资产总值(元)")
F_PRT_WARRANTONAV = Column(DECIMAL(20, 4), doc="持有权证市值占资产净值比例(%)")
F_PRT_WARRANTOTOT = Column(DECIMAL(20, 4), doc="持有权证市值占资产总值比例(%)")
F_PRT_WARRANTVALUE = Column(DECIMAL(20, 4), doc="持有权证市值(元)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CHINAMUTUALFUNDBENCHMARK(Base):
__tablename__ = 'CHINAMUTUALFUNDBENCHMARK'
ANN_DT = Column(String(8), doc="公告日期")
CUR_SIGN = Column(DECIMAL(1, 0), doc="是否最新")
IS_COMPOUND = Column(DECIMAL(1, 0), doc="是否复利")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INC_SEQUENCE = Column(DECIMAL(2, 0), doc="序号")
S_INFO_AFTERTAXORNOT = Column(DECIMAL(1, 0), doc="是否税后")
S_INFO_BGNDT = Column(String(8), doc="起始日期")
S_INFO_CONSTANT = Column(DECIMAL(20, 4), doc="常数")
S_INFO_ENDDT = Column(String(8), doc="截止日期")
S_INFO_FXCODE = Column(String(40), doc="汇率Wind代码")
S_INFO_INDEXWEG = Column(DECIMAL(20, 4), doc="指数权重")
S_INFO_INDEXWINDCODE = Column(String(40), doc="指数Wind代码")
S_INFO_OPERATORS = Column(String(20), doc="运算符")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SEC_ID = Column(String(10), doc="证券ID")
SEC_ID2 = Column(String(10), doc="证券ID2")
class CHINAMUTUALFUNDBENCHMARKEOD(Base):
__tablename__ = 'CHINAMUTUALFUNDBENCHMARKEOD'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_DQ_CLOSE = Column(DECIMAL(20, 8), doc="收盘价")
S_DQ_PCTCHANGE = Column(DECIMAL(20, 8), doc="涨跌幅(%)")
S_INFO_WINDCODE = Column(String(40), doc="指数Wind代码")
TRADE_DT = Column(String(8), doc="交易日期")
class CHINAMUTUALFUNDBONDPORTFOLIO(Base):
__tablename__ = 'CHINAMUTUALFUNDBONDPORTFOLIO'
CRNCY_CODE = Column(String(10), doc="货币代码")
F_ANN_DATE = Column(String(8), doc="公告日期")
F_PRT_BDQUANTITY = Column(DECIMAL(20, 4), doc="持有债券数量(张)")
F_PRT_BDVALUE = Column(DECIMAL(20, 4), doc="持有债券市值(元)")
F_PRT_BDVALUETONAV = Column(DECIMAL(20, 4), doc="持有债券市值占基金净值比例(%)")
F_PRT_ENDDATE = Column(String(8), doc="截止日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_BONDWINDCODE = Column(String(40), doc="持有债券Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="基金Wind代码")
class CHINAMUTUALFUNDDESCRIPTION(Base):
__tablename__ = 'CHINAMUTUALFUNDDESCRIPTION'
CLOSE_INSTITU_OEF_DOWN = Column(DECIMAL(20, 4), doc="封闭期机构投资者认购下限(万元)")
CLOSE_INSTITU_OEF_UP = Column(DECIMAL(20, 4), doc="封闭期机构投资者认购上限(万元)")
CRNY_CODE = Column(String(10), doc="货币代码")
F_CLOSED_OPERATION_INTERVAL = Column(DECIMAL(20, 4), doc="封闭运作期满开放日间隔")
F_CLOSED_OPERATION_PERIOD = Column(DECIMAL(20, 4), doc="封闭运作期")
F_INFO_ANNDATE = Column(String(8), doc="公告日期")
F_INFO_BACKEND_CODE = Column(String(40), doc="后端代码")
F_INFO_BENCHMARK = Column(String(500), doc="业绩比较基准")
F_INFO_CORP_FUNDMANAGEMENTCOMP = Column(String(100), doc="管理人")
F_INFO_CORP_FUNDMANAGEMENTID = Column(String(10), doc="基金管理人ID")
F_INFO_CUSTODIANBANK = Column(String(100), doc="托管人")
F_INFO_CUSTODIANBANKID = Column(String(40), doc="托管人id")
F_INFO_CUSTODIANFEERATIO = Column(DECIMAL(20, 4), doc="托管费")
F_INFO_DECISION_BASIS = Column(String(2000), doc="决策依据")
F_INFO_DELISTDATE = Column(String(8), doc="退市日期")
F_INFO_EXCHMARKET = Column(String(10), doc="交易所")
F_INFO_EXPECTEDRATEOFRETURN = Column(DECIMAL(20, 4), doc="预期收益率")
F_INFO_FIRSTINVESTSTYLE = Column(String(20), doc="投资风格")
F_INFO_FIRSTINVESTTYPE = Column(String(40), doc="投资类型")
F_INFO_FRONT_CODE = Column(String(40), doc="前端代码")
F_INFO_FULLNAME = Column(String(100), doc="名称")
F_INFO_FUND_ID = Column(String(100), doc="基金品种ID")
F_INFO_INVESTCONCEPTION = Column(String(2000), doc="投资理念")
F_INFO_INVESTOBJECT = Column(String(500), doc="投资目标")
F_INFO_INVESTSCOPE = Column(String(2000), doc="投资范围")
F_INFO_ISINITIAL = Column(DECIMAL(5, 0), doc="是否为初始基金")
F_INFO_ISSUEDATE = Column(String(8), doc="发行日期")
F_INFO_ISSUINGPLACE = Column(String(100), doc="发行地")
F_INFO_LISTDATE = Column(String(8), doc="上市时间")
F_INFO_MANAGEMENTFEERATIO = Column(DECIMAL(20, 4), doc="管理费")
F_INFO_MATURITYDATE = Column(String(8), doc="到期日期")
F_INFO_MINBUYAMOUNT = Column(DECIMAL(20, 4), doc="起点金额")
F_INFO_NAME = Column(String(100), doc="简称")
F_INFO_PARVALUE = Column(DECIMAL(20, 4), doc="面值")
F_INFO_PINYIN = Column(String(40), doc="简称拼音")
F_INFO_PTMYEAR = Column(DECIMAL(20, 4), doc="存续期")
F_INFO_REDMSTARTDATE = Column(String(8), doc="日常赎回起始日")
F_INFO_REGISTRANT = Column(String(10), doc="基金注册与过户登记人ID")
F_INFO_RESTRICTEDORNOT = Column(String(20), doc="限定类型")
F_INFO_SETUPDATE = Column(String(8), doc="成立日期")
F_INFO_STATUS = Column(DECIMAL(9, 0), doc="存续状态")
F_INFO_STRUCTUREDORNOT = Column(DECIMAL(1, 0), doc="是否结构化产品")
F_INFO_TRUSTEE = Column(String(100), doc="受托人")
F_INFO_TRUSTTYPE = Column(String(40), doc="信托类别")
F_INFO_TYPE = Column(String(20), doc="基金类型")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
F_INVESTMENT_AREA = Column(String(20), doc="投资区域")
F_ISSUE_OEF_DNDDATEINST = Column(String(8), doc="机构投资者认购终止日")
F_ISSUE_OEF_STARTDATEINST = Column(String(8), doc="机构投资者认购起始日")
F_ISSUE_TOTALUNIT = Column(DECIMAL(20, 4), doc="发行份额")
F_PCHREDM_PCHMINAMT = Column(DECIMAL(20, 4), doc="每次最低申购金额(场外)(万元)")
F_PCHREDM_PCHMINAMT_EX = Column(DECIMAL(20, 4), doc="每次最低申购金额(场内) (万元)")
F_PCHREDM_PCHSTARTDATE = Column(String(8), doc="日常申购起始日")
F_PERSONAL_ENDDATEIND = Column(String(8), doc="个人投资者认购终止日")
F_PERSONAL_STARTDATEIND = Column(String(8), doc="个人投资者认购起始日")
F_SALES_SERVICE_RATE = Column(DECIMAL(20, 4), doc="销售服务费率")
INVESTSTRATEGY = Column(LONGTEXT, doc="投资策略")
IS_INDEXFUND = Column(DECIMAL(5, 0), doc="是否指数基金")
MAX_NUM_COLTARGET = Column(DECIMAL(20, 4), doc="封闭期目标募集数量上限(亿份)")
MAX_NUM_HOLDER = Column(DECIMAL(20, 4), doc="单一投资者持有份额上限(亿份)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RISK_RETURN = Column(LONGTEXT, doc="基金风险收益特征")
class CHINAMUTUALFUNDFLOATSHARE(Base):
__tablename__ = 'CHINAMUTUALFUNDFLOATSHARE'
F_UNIT_FLOATSHARE = Column(DECIMAL(20, 4), doc="场内份额(份)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TRADE_DT = Column(String(8), doc="交易日期")
class CHINAMUTUALFUNDINDPORTFOLIO(Base):
__tablename__ = 'CHINAMUTUALFUNDINDPORTFOLIO'
F_ANN_DATE = Column(String(8), doc="公告日期")
F_PRT_ENDDATE = Column(String(8), doc="截止日期")
F_PRT_INDPASSIVEPRO = Column(DECIMAL(20, 4), doc="指数投资持有行业比例(%)")
F_PRT_INDPASSIVEVALUE = Column(DECIMAL(20, 4), doc="指数投资持有行业市值(元)")
F_PRT_INDPOSPRO = Column(DECIMAL(20, 4), doc="积极投资持有行业比例(%)")
F_PRT_INDPOSVALUE = Column(DECIMAL(20, 4), doc="积极投资持有行业市值(元)")
F_PRT_INDUSTONAV = Column(DECIMAL(20, 4), doc="持有行业市值占基金净值比例(%)")
F_PRT_INDUSTONAVCHANGE = Column(DECIMAL(20, 4), doc="持有行业市值比例较上期变化(%)")
F_PRT_INDUSVALUE = Column(DECIMAL(20, 4), doc="持有行业市值(元)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_CSRCINDUSCODE = Column(String(40), doc="证监会行业编号")
S_INFO_CSRCINDUSNAME = Column(String(50), doc="行业名称")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CHINAMUTUALFUNDMANAGER(Base):
__tablename__ = 'CHINAMUTUALFUNDMANAGER'
ANN_DATE = Column(String(8), doc="公告日期")
F_INFO_DIS_SERIAL_NUMBER = Column(DECIMAL(2, 0), doc="展示序号")
F_INFO_ESCROW_FUNDMANAGER = Column(String(50), doc="代管基金经理")
F_INFO_ESCROW_LEAVEDATE = Column(String(8), doc="代管结束日期")
F_INFO_ESCROW_STARTDATE = Column(String(8), doc="代管起始日期")
F_INFO_FUNDMANAGER = Column(String(40), doc="姓名")
F_INFO_FUNDMANAGER_ID = Column(String(10), doc="基金经理ID")
F_INFO_MANAGER_BIRTHYEAR = Column(String(10), doc="出身年份")
F_INFO_MANAGER_EDUCATION = Column(String(20), doc="学历")
F_INFO_MANAGER_GENDER = Column(String(10), doc="性别")
F_INFO_MANAGER_LEAVEDATE = Column(String(8), doc="离职日期")
F_INFO_MANAGER_NATIONALITY = Column(String(10), doc="国籍")
F_INFO_MANAGER_RESUME = Column(LONGTEXT, doc="简历")
F_INFO_MANAGER_STARTDATE = Column(String(8), doc="任职日期")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_MANAGER_POST = Column(String(100), doc="备注")
class CHINAMUTUALFUNDNAV(Base):
__tablename__ = 'CHINAMUTUALFUNDNAV'
ANN_DATE = Column(String(8), doc="公告日期")
CRNCY_CODE = Column(String(10), doc="货币代码")
F_ASSET_MERGEDSHARESORNOT = Column(DECIMAL(1, 0), doc="是否合计数据")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
F_NAV_ACCUMULATED = Column(DECIMAL(24, 8), doc="累计净值")
F_NAV_ADJFACTOR = Column(DECIMAL(24, 8), doc="复权因子")
F_NAV_ADJUSTED = Column(DECIMAL(22, 8), doc="复权单位净值")
F_NAV_DISTRIBUTION = Column(DECIMAL(20, 4), doc="累计单位分配")
F_NAV_DIVACCUMULATED = Column(DECIMAL(20, 4), doc="累计分红(废弃)")
F_NAV_UNIT = Column(DECIMAL(24, 8), doc="单位净值")
F_PRT_NETASSET = Column(DECIMAL(20, 4), doc="资产净值")
IS_EXDIVIDENDDATE = Column(DECIMAL(5, 0), doc="是否净值除权日")
NETASSET_TOTAL = Column(DECIMAL(20, 4), doc="合计资产净值")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PRICE_DATE = Column(String(8), doc="截止日期")
class CHINAMUTUALFUNDPCHREDM(Base):
__tablename__ = 'CHINAMUTUALFUNDPCHREDM'
F_UNIT_ENDSHARES = Column(DECIMAL(20, 4), doc="期末基金总份额(份)")
F_UNIT_ENDSHARES_TOTAL = Column(DECIMAL(20, 4), doc="期末基金总份额-合计")
F_UNIT_PURCHASE = Column(DECIMAL(20, 4), doc="本期基金总申购份额(份)")
F_UNIT_PURCHASE_TOTAL = Column(DECIMAL(20, 4), doc="本期基金总申购份额-合计")
F_UNIT_REDEMPTION = Column(DECIMAL(20, 4), doc="本期基金总赎回份额(份)")
F_UNIT_REDEMPTION_TOTAL = Column(DECIMAL(20, 4), doc="本期基金总赎回份额-合计")
F_UNIT_RPENDDATE = Column(String(8), doc="报告期结束日期")
F_UNIT_RPSTARTDATE = Column(String(8), doc="报告期开始日期")
F_UNIT_STARTSHARES = Column(DECIMAL(20, 4), doc="期初基金总份额(份)")
F_UNIT_STARTSHARES_TOTAL = Column(DECIMAL(20, 4), doc="期初基金总份额-合计")
IS_MERGE_DATA = Column(DECIMAL(5, 0), doc="是否为合并数据")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
TRADE_DT = Column(String(8), doc="公告日期")
class CHINAMUTUALFUNDPOSESTIMATION(Base):
__tablename__ = 'CHINAMUTUALFUNDPOSESTIMATION'
F_EST_DATE = Column(String(8), doc="估算日期")
F_EST_LARGECAPWEG = Column(DECIMAL(20, 4), doc="大市值组合权重")
F_EST_MIDCAPWEG = Column(DECIMAL(20, 4), doc="中市值组合权重")
F_EST_NAV = Column(DECIMAL(20, 4), doc="估算收盘净值(元)")
F_EST_POSITION = Column(DECIMAL(20, 4), doc="基金仓位")
F_EST_SMALLCAPWEG = Column(DECIMAL(20, 4), doc="小市值组合权重")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="基金Wind代码")
class CHINAMUTUALFUNDREPNAVPER(Base):
__tablename__ = 'CHINAMUTUALFUNDREPNAVPER'
ANN_DATE = Column(String(8), doc="公告日期")
F_INFO_REPORTPERIOD = Column(String(8), doc="报告期")
F_NAV_BENCHDEVRETURN = Column(DECIMAL(20, 4), doc="净值增长率减基准收益率")
F_NAV_BENCHRETURN = Column(DECIMAL(20, 4), doc="业绩比较基准收益率")
F_NAV_BENCHSTDDEV = Column(DECIMAL(20, 4), doc="业绩比较基准收益率标准差")
F_NAV_RETURN = Column(DECIMAL(20, 4), doc="净值增长率")
F_NAV_STDDEVNAVBENCH = Column(DECIMAL(20, 4), doc="净值增长率标准差减基准收益率标准差")
F_NAV_STDDEVRETURN = Column(DECIMAL(20, 4), doc="净值增长率标准差")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PERIOD_CODE = Column(String(10), doc="期间代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CHINAMUTUALFUNDSEATTRADING(Base):
__tablename__ = 'CHINAMUTUALFUNDSEATTRADING'
CRNCY_CODE = Column(String(10), doc="货币代码")
F_COMMISSIONAM = Column(DECIMAL(20, 4), doc="交易佣金(元)")
F_COMMISSIONPRO = Column(DECIMAL(20, 4), doc="交易佣金占比(%)")
F_TRADE_BONDAM = Column(DECIMAL(20, 4), doc="债券交易金额(元)")
F_TRADE_BONDPRO = Column(DECIMAL(20, 4), doc="债券交易金额占比(%)")
F_TRADE_FUNDAM = Column(DECIMAL(20, 4), doc="基金交易金额(元)")
F_TRADE_FUNDPRO = Column(DECIMAL(20, 4), doc="基金交易金额占比(%)")
F_TRADE_REPOAM = Column(DECIMAL(20, 4), doc="回购交易金额(元)")
F_TRADE_REPOPRO = Column(DECIMAL(20, 4), doc="回购交易金额占比(%)")
F_TRADE_SBAM = Column(DECIMAL(20, 4), doc="股票债券成交金额(元)")
F_TRADE_SBPRO = Column(DECIMAL(20, 4), doc="股票债券交易量占比(%)")
F_TRADE_STOCKAM = Column(DECIMAL(20, 4), doc="股票交易金额(元)")
F_TRADE_STOCKPRO = Column(DECIMAL(20, 4), doc="股票交易金额占比(%)")
F_TRADE_WARRANTAM = Column(DECIMAL(20, 4), doc="权证交易金额(元)")
F_TRADE_WARRANTPRO = Column(DECIMAL(20, 4), doc="权证交易金额占比(%)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_REPORTPERIOD = Column(String(8), doc="报告期")
S_INFO_SECURNAME = Column(String(100), doc="证券公司简称")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CHINAMUTUALFUNDSECTOR(Base):
__tablename__ = 'CHINAMUTUALFUNDSECTOR'
CUR_SIGN = Column(String(10), doc="最新标志")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_SECTOR = Column(String(40), doc="所属板块")
S_INFO_SECTORENTRYDT = Column(String(8), doc="起始日期")
S_INFO_SECTOREXITDT = Column(String(8), doc="截止日期")
class CHINAMUTUALFUNDSHARE(Base):
__tablename__ = 'CHINAMUTUALFUNDSHARE'
ANN_DATE = Column(String(8), doc="公告日期")
CHANGE_DATE = Column(String(8), doc="变动日期")
CHANGEREASON = Column(String(10), doc="份额变动原因")
CUR_SIGN = Column(DECIMAL(5, 0), doc="最新标志")
F_INFO_SHARE = Column(DECIMAL(20, 6), doc="流通份额(万份)")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
F_UNIT_MERGEDSHARESORNOT = Column(DECIMAL(5, 0), doc="是否为合并数据")
F_UNIT_TOTAL = Column(DECIMAL(20, 6), doc="基金总份额(万份)")
FUNDSHARE = Column(DECIMAL(20, 6), doc="基金份额(万份)")
FUNDSHARE_TOTAL = Column(DECIMAL(20, 6), doc="基金合计份额(万份)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
class CHINAMUTUALFUNDSTOCKPORTFOLIO(Base):
__tablename__ = 'CHINAMUTUALFUNDSTOCKPORTFOLIO'
ANN_DATE = Column(String(8), doc="公告日期")
CRNCY_CODE = Column(String(10), doc="货币代码")
F_PRT_ENDDATE = Column(String(8), doc="截止日期")
F_PRT_PASSTKEVALUE = Column(DECIMAL(20, 4), doc="指数投资持有股票市值(元)")
F_PRT_PASSTKQUANTITY = Column(DECIMAL(20, 4), doc="指数投资持有股数(股)")
F_PRT_PASSTKTONAV = Column(DECIMAL(20, 4), doc="指数投资持有股票市值占净资产比例(%)")
F_PRT_POSSTKQUANTITY = Column(DECIMAL(20, 4), doc="积极投资持有股数(股)")
F_PRT_POSSTKTONAV = Column(DECIMAL(20, 4), doc="积极投资持有股票市值占净资产比例(%)")
F_PRT_POSSTKVALUE = Column(DECIMAL(20, 4), doc="积极投资持有股票市值(元)")
F_PRT_STKQUANTITY = Column(DECIMAL(20, 4), doc="持有股票数量(股)")
F_PRT_STKVALUE = Column(DECIMAL(20, 4), doc="持有股票市值(元)")
F_PRT_STKVALUETONAV = Column(DECIMAL(20, 4), doc="持有股票市值占基金净值比例(%)")
FLOAT_SHR_PER = Column(DECIMAL(24, 4), doc="占流通股本比例(%)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_STOCKWINDCODE = Column(String(10), doc="持有股票Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="基金Wind代码")
STOCK_PER = Column(DECIMAL(20, 2), doc="占股票市值比")
class CHINAMUTUALFUNDSUSPENDPCHREDM(Base):
__tablename__ = 'CHINAMUTUALFUNDSUSPENDPCHREDM'
F_INFO_PURCHASEUPLIMIT = Column(DECIMAL(20, 4), doc="单日申购上限")
F_INFO_REPCHANNDT = Column(String(8), doc="恢复申购公告日期")
F_INFO_REPCHDT = Column(String(8), doc="恢复申购日期")
F_INFO_SUSPCHANNDT = Column(String(8), doc="暂停申购公告日期")
F_INFO_SUSPCHREASON = Column(String(800), doc="暂停申购原因")
F_INFO_SUSPCHSTARTDT = Column(String(8), doc="暂停申购起始日期")
F_INFO_SUSPCHTYPE = Column(DECIMAL(9, 0), doc="暂停申购类型代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CHINAMUTUALFUNDTRACKINGINDEX(Base):
__tablename__ = 'CHINAMUTUALFUNDTRACKINGINDEX'
ENTRY_DT = Column(String(8), doc="生效日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
REMOVE_DT = Column(String(8), doc="失效日期")
S_INFO_INDEXWINDCODE = Column(String(40), doc="跟踪指数Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CLOSEDFUNDPCHREDM(Base):
__tablename__ = 'CLOSEDFUNDPCHREDM'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PCH_CODE = Column(String(20), doc="场内基金代码")
PCH_NAME = Column(String(40), doc="场内基金简称")
PCH_START_DT = Column(String(8), doc="场内申购起始日")
REDM_START_DT = Column(String(8), doc="场内赎回起始日")
S_INFO_EXCHMARKET = Column(String(40), doc="交易所")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SUBSTRIPTION_CODE = Column(String(20), doc="场内认购代码")
SUBSTRIPTION_END_DT = Column(String(8), doc="场内认购截止日")
SUBSTRIPTION_NAME = Column(String(40), doc="场内认购简称")
SUBSTRIPTION_PRICE = Column(DECIMAL(20, 4), doc="场内认购价格")
SUBSTRIPTION_START_DT = Column(String(8), doc="场内认购起始日")
class CMFAIPINFO(Base):
__tablename__ = 'CMFAIPINFO'
ANN_DT = Column(String(8), doc="公告日期")
COMP_ID = Column(String(10), doc="定投代销机构ID")
COMP_NAME = Column(String(200), doc="定投代销机构名称")
END_DT = Column(String(8), doc="定投截止日期")
LEVEL_AMOUNT = Column(DECIMAL(20, 4), doc="投资额级差(元)")
MAX_PURCHASE = Column(DECIMAL(20, 4), doc="定投最高金额(元)")
MEMO = Column(String(500), doc="备注")
MIN_PURCHASE = Column(DECIMAL(20, 4), doc="定投起始金额(元)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SEQUENCE = Column(DECIMAL(1, 0), doc="序号")
START_DT = Column(String(8), doc="定投开始时间")
TYPE_CODE = Column(DECIMAL(9, 0), doc="定投类型代码")
class CMFCODEANDSNAME(Base):
__tablename__ = 'CMFCODEANDSNAME'
IS_COMMON = Column(DECIMAL(1, 0), doc="是否通用代码")
MEMO = Column(String(800), doc="备注")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CODE = Column(String(20), doc="业务代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_SNAME = Column(String(100), doc="业务简称")
TYPE_CODE = Column(DECIMAL(9, 0), doc="业务类型代码")
class CMFCONSEPTION(Base):
__tablename__ = 'CMFCONSEPTION'
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_SECTOR = Column(String(10), doc="所属板块代码")
S_INFO_SECTORENTRYDT = Column(String(8), doc="起始日期")
S_INFO_SECTOREXITDT = Column(String(8), doc="截止日期")
S_INFO_SECTORNAME = Column(String(40), doc="所属板块名称")
class CMFDESCCHANGE(Base):
__tablename__ = 'CMFDESCCHANGE'
CHANGE_DT = Column(String(8), doc="变更日期")
ITEM = Column(String(50), doc="变更字段名称")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_NEW = Column(String(1000), doc="变更后")
S_INFO_OLD = Column(String(1000), doc="变更前")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CMFFAIRVALUECHANGEPROFIT(Base):
__tablename__ = 'CMFFAIRVALUECHANGEPROFIT'
ABS_CHANGE_FAIR_VALUE = Column(DECIMAL(20, 4), doc="资产支持证券投资公允价值变动收益")
ANN_DT = Column(String(8), doc="公告日期")
BOND1_CHANGE_FAIR_VALUE = Column(DECIMAL(20, 4), doc="债券投资(银行间同业市场)公允价值变动收益")
BOND_CHANGE_FAIR_VALUE = Column(DECIMAL(20, 4), doc="债券投资(交易所市场)公允价值变动收益")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
FORWARD_CHANGE_FAIR_VALUE = Column(DECIMAL(20, 4), doc="外汇远期投资公允价值变动收益")
FUND_CHANGE_FAIR_VALUE = Column(DECIMAL(20, 4), doc="基金投资公允价值变动收益")
MEMO = Column(String(200), doc="备注")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
OTHER = Column(DECIMAL(20, 4), doc="其他")
REPORT_PERIOD = Column(String(8), doc="报告期")
STOCK_CHANGE_FAIR_VALUE = Column(DECIMAL(20, 4), doc="股票投资公允价值变动收益")
TOT_CHANGE_FAIR_VALUE = Column(DECIMAL(20, 4), doc="合计公允价值变动收益")
WARRANT_CHANGE_FAIR_VALUE = Column(DECIMAL(20, 4), doc="权证投资公允价值变动收益")
class CMFFIXEDINVESTMENTRATE(Base):
__tablename__ = 'CMFFIXEDINVESTMENTRATE'
F_ANNUALYEILD_FIVESYEAR = Column(DECIMAL(20, 6), doc="近五年定投年化收益率")
F_ANNUALYEILD_FIVEYEAR = Column(String(20), doc="近五年定投年化收益同类排名")
F_ANNUALYEILD_THISYEAR = Column(DECIMAL(20, 6), doc="近一年定投年化收益率")
F_ANNUALYEILD_THREESYEAR = Column(DECIMAL(20, 6), doc="近三年定投年化收益率")
F_ANNUALYEILD_THREEYEAR = Column(String(20), doc="近三年定投年化收益同类排名")
F_ANNUALYEILD_TWOSYEAR = Column(DECIMAL(20, 6), doc="近二年定投年化收益率")
F_ANNUALYEILD_TWOYEAR = Column(String(20), doc="近二年定投年化收益同类排名")
F_AVGRETURN_FIVEYEAR = Column(DECIMAL(20, 6), doc="近五年定投总收益率")
F_AVGRETURN_THISYEAR = Column(DECIMAL(20, 6), doc="近一年定投总收益率")
F_AVGRETURN_THREEYEAR = Column(DECIMAL(20, 6), doc="近三年定投总收益率")
F_AVGRETURN_TWOYEAR = Column(DECIMAL(20, 6), doc="近二年定投总收益率")
F_DIVIDEND_METHOD = Column(String(20), doc="分红方式")
F_FIXED_AMOUNT = Column(DECIMAL(20, 0), doc="定投金额")
F_FIXED_INVESTMENT_CYCLE = Column(String(20), doc="定投周期")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
F_PURCHASE_RATE = Column(DECIMAL(20, 4), doc="申购费率")
F_RECENTYEART_THISYEAR = Column(String(20), doc="近一年定投年化收益同类排名")
F_SFRANK_RECENTYEART = Column(String(20), doc="近一年定投同类排名")
F_SFRANK_RECENTYEART_FIVESYEAR = Column(String(20), doc="近五年定投同类排名")
F_SFRANK_RECENTYEART_TWOSYEAR = Column(String(20), doc="近二年定投同类排名")
F_SFRANK_THREESYEAR = Column(String(20), doc="近三年定投同类排名")
F_TYPE_CODE = Column(String(20), doc="投资类型代码")
F_TYPE_NAME = Column(String(40), doc="基金投资类型名称")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
TRADE_DT = Column(String(8), doc="交易日期")
class CMFHOLDER(Base):
__tablename__ = 'CMFHOLDER'
ANN_DT = Column(String(8), doc="公告日期")
B_INFO_HOLDAMOUNT = Column(DECIMAL(20, 4), doc="数量(股张\份)")
B_INFO_HOLDER = Column(String(300), doc="持有人")
B_ISSUER_SHARECATEGORY = Column(String(1), doc="[内部]股东类型: 1 个人;2 公司")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_FA_LATELYRD = Column(String(8), doc="报告期")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_ENDDATE = Column(String(8), doc="截止日期")
S_INFO_WINDCODE = Column(String(40), doc="证券ID")
class CMFHOLDERSTRUCTURE(Base):
__tablename__ = 'CMFHOLDERSTRUCTURE'
ANN_DT = Column(String(8), doc="公告日期")
END_DT = Column(String(8), doc="截止日期")
HOLDER_AVGHOLDING = Column(DECIMAL(20, 4), doc="平均每户持有基金份额(份)")
HOLDER_FEEDER_HOLDING = Column(DECIMAL(20, 4), doc="联接基金持有份额(份)")
HOLDER_FEEDER_HOLDINGPCT = Column(DECIMAL(20, 4), doc="联接基金持有份额占比(%)")
HOLDER_INSTITUTION_HOLDING = Column(DECIMAL(20, 4), doc="机构投资者持有份额(份)")
HOLDER_INSTITUTION_HOLDINGPCT = Column(DECIMAL(20, 4), doc="机构投资者持有份额占比(%)")
HOLDER_MNGEMP_HOLDING = Column(DECIMAL(20, 4), doc="管理人员工持有份额(份)")
HOLDER_MNGEMP_HOLDINGPCT = Column(DECIMAL(20, 8), doc="管理人员工持有份额占比(%)")
HOLDER_NUMBER = Column(DECIMAL(20, 0), doc="基金份额持有人户数(户)")
HOLDER_PERSONAL_HOLDING = Column(DECIMAL(20, 4), doc="个人投资者持有份额(份)")
HOLDER_PERSONAL_HOLDINGPCT = Column(DECIMAL(20, 4), doc="个人投资者持有份额占比(%)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PUR_COST = Column(DECIMAL(20, 4), doc="报告期买入股票成本总额(元)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SCOPE = Column(String(4), doc="范围")
SEC_ID = Column(String(40), doc="证券ID")
SELL_INCOME = Column(DECIMAL(20, 4), doc="报告期卖出股票收入总额(元)")
class CMFHOLDINGRATIOANOMALY(Base):
__tablename__ = 'CMFHOLDINGRATIOANOMALY'
F_END_NUM_HOLDER = Column(DECIMAL(20, 4), doc="期末持有份额")
F_END_NUM_HOLDER_PROPORTION = Column(DECIMAL(20, 4), doc="期末持有份额占基金份额比例")
F_HOLDING_TIME = Column(String(500), doc="持有时间区间")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
F_INQUIRER_TYPE = Column(String(100), doc="投资者类别")
F_INQUIRER_TYPE_NUM = Column(DECIMAL(2, 0), doc="投资者序号")
F_INQUIRER_TYPECODE = Column(String(1), doc="投资者类别代码")
F_START_NUM_HOLDER = Column(DECIMAL(20, 4), doc="期初持有份额")
F_UNIT_PCH = Column(DECIMAL(20, 4), doc="申购份额")
F_UNIT_REDM = Column(DECIMAL(20, 4), doc="赎回份额")
F_UNIT_RPENDDATE = Column(String(8), doc="报告期截止日期")
F_UNIT_RPSTARTDATE = Column(String(8), doc="报告期开始日期")
IS_MERGE = Column(DECIMAL(1, 0), doc="是否为合并数据")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
TRADE_DT = Column(String(8), doc="公告日期")
class CMFINDEXDESCRIPTION(Base):
__tablename__ = 'CMFINDEXDESCRIPTION'
COMPONENT_STOCKS_NUM = Column(DECIMAL(5, 0), doc="成份股数量")
EXPIRE_DATE = Column(String(8), doc="终止发布日期")
EXPONENTIAL_SCALE_CODE = Column(DECIMAL(9, 0), doc="指数规模代码")
INCOME_PROCESSING_METHOD = Column(String(20), doc="收益处理方式")
INCOME_PROCESSING_METHOD_CODE = Column(DECIMAL(9, 0), doc="收益处理方式代码")
INDEX_CATEGORY_CODE = Column(DECIMAL(9, 0), doc="指数类别代码")
INDEX_CATEGORY_TYPE = Column(String(40), doc="指数类别")
INDEX_INTRO = Column(LONGTEXT, doc="指数简介")
INDEX_REGION_CODE = Column(DECIMAL(9, 0), doc="指数区域代码")
MARKET_OWN_CODE = Column(DECIMAL(9, 0), doc="所属市场代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_CODE = Column(String(40), doc="交易代码")
S_INFO_COMPNAME = Column(String(100), doc="指数名称")
S_INFO_EXCHMARKET = Column(String(40), doc="交易所")
S_INFO_INDEX_BASEPER = Column(String(8), doc="基期")
S_INFO_INDEX_BASEPT = Column(DECIMAL(20, 4), doc="基点")
S_INFO_INDEX_ENAME = Column(String(200), doc="指数英文名称")
S_INFO_INDEX_WEIGHTSRULE = Column(String(40), doc="加权方式")
S_INFO_INDEXSTYLE = Column(String(40), doc="指数风格")
S_INFO_LISTDATE = Column(String(8), doc="发布日期")
S_INFO_NAME = Column(String(50), doc="证券简称")
S_INFO_PUBLISHER = Column(String(100), doc="发布方")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
WEIGHT_TYPE = Column(String(100), doc="权重类型")
WEIGHT_TYPE_CODE = Column(DECIMAL(9, 0), doc="权重类型代码")
WEIGHTING_METHOD_END_DATE = Column(String(8), doc="【废弃】加权方式终止日期")
WEIGHTING_METHOD_START_DATE = Column(String(8), doc="【废弃】加权方式起始日期")
class CMFINDEXEOD(Base):
__tablename__ = 'CMFINDEXEOD'
CRNCY_CODE = Column(String(10), doc="货币代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_DQ_AMOUNT = Column(DECIMAL(20, 4), doc="成交金额(千元)")
S_DQ_CHANGE = Column(DECIMAL(20, 4), doc="涨跌(点)")
S_DQ_CLOSE = Column(DECIMAL(20, 4), doc="最新价")
S_DQ_HIGH = Column(DECIMAL(20, 4), doc="最高价")
S_DQ_LOW = Column(DECIMAL(20, 4), doc="最低价")
S_DQ_OPEN = Column(DECIMAL(20, 4), doc="开盘价")
S_DQ_PCTCHANGE = Column(DECIMAL(20, 4), doc="涨跌幅(%)")
S_DQ_PRECLOSE = Column(DECIMAL(20, 4), doc="昨收盘价")
S_DQ_VOLUME = Column(DECIMAL(20, 4), doc="成交量(手)")
S_INFO_NAME = Column(String(50), doc="指数简称")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SEC_ID = Column(String(10), doc="证券ID")
TRADE_DT = Column(String(8), doc="交易日期")
class CMFINDUSTRYPLATE(Base):
__tablename__ = 'CMFINDUSTRYPLATE'
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_NAME = Column(String(100), doc="板块名称")
S_INFO_SECTOR = Column(String(16), doc="板块代码")
S_INFO_WINDCODE = Column(String(40), doc="成份万得代码")
class CMFIOPVNAV(Base):
__tablename__ = 'CMFIOPVNAV'
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
F_IOPV_NAV = Column(DECIMAL(20, 8), doc="IOPV收盘净值")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PRICE_DATE = Column(String(8), doc="日期")
class CMFNAVOPERATIONRECORD(Base):
__tablename__ = 'CMFNAVOPERATIONRECORD'
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
F_NAV_OLD = Column(DECIMAL(11, 6), doc="调整前净值")
FUND_NAV_OBJECT_ID = Column(String(38), doc="基金净值表记录ID")
HANDLE_ACTION = Column(String(20), doc="处理动作")
HANDLE_DATE = Column(String(8), doc="处理日期")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PRICE_DATE = Column(String(8), doc="净值截止日期")
class CMFOTHERPORTFOLIO(Base):
__tablename__ = 'CMFOTHERPORTFOLIO'
ANN_DT = Column(String(8), doc="公告日期")
CRNCY_CODE = Column(String(10), doc="货币代码")
END_DT = Column(String(8), doc="截止日期")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
QUANTITY = Column(DECIMAL(20, 4), doc="持仓数量(股/张)")
S_INFO_HOLDWINDCODE = Column(String(40), doc="Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
VALUE = Column(DECIMAL(20, 4), doc="持仓市值")
VALUETONAV = Column(DECIMAL(20, 4), doc="持仓市值占基金净值比例(%)")
class CMFPREFERENTIALFEE(Base):
__tablename__ = 'CMFPREFERENTIALFEE'
AMOUNTDOWNLIMIT = Column(DECIMAL(20, 4), doc="申购金额下限(万元)")
AMOUNTUPLIMIT = Column(DECIMAL(20, 4), doc="申购金额上限(万元)")
ANN_DT = Column(String(8), doc="公告日期")
COMP_ID = Column(String(10), doc="参加优惠活动的代销机构公司ID")
COMP_NAME = Column(String(200), doc="参加优惠活动的代销机构名称")
END_DT = Column(String(8), doc="费率优惠截止日期")
MEMO = Column(String(500), doc="备注")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PREFERENTIAL_RATE = Column(DECIMAL(20, 4), doc="优惠费率(%)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SEC_ID = Column(String(10), doc="证券ID")
SEQUENCE = Column(DECIMAL(1, 0), doc="序号")
START_DT = Column(String(8), doc="费率优惠开始日期")
TYPE_CODE = Column(DECIMAL(9, 0), doc="优惠活动类型代码")
WAY_TYPE = Column(DECIMAL(9, 0), doc="优惠活动参与方式代码")
class CMFPROPORTIONOFINVEOBJ(Base):
__tablename__ = 'CMFPROPORTIONOFINVEOBJ'
BOND_INVEST_PCT_DOWN = Column(DECIMAL(5, 0), doc="债券投资比例下限")
BOND_INVEST_PCT_UP = Column(DECIMAL(5, 0), doc="债券投资比例上限")
CHANGE_DT = Column(String(8), doc="变动日期")
COMM_INVEST_PCT_DOWN = Column(DECIMAL(5, 0), doc="商品衍生品投资比例下限")
COMM_INVEST_PCT_UP = Column(DECIMAL(5, 0), doc="商品衍生品投资比例上限")
CONSEPTION_TYPECODE = Column(DECIMAL(9, 0), doc="概念主题类别代码")
FUND_INVEST_PCT_DOWN = Column(DECIMAL(5, 0), doc="基金投资比例下限")
FUND_INVEST_PCT_UP = Column(DECIMAL(5, 0), doc="基金投资比例上限")
INVEST_PCT_TYPECODE = Column(DECIMAL(9, 0), doc="基金投资占比类型代码")
IS_TOT_PCT = Column(DECIMAL(1, 0), doc="是否合计占比")
MMT_INVEST_PCT_DOWN = Column(DECIMAL(5, 0), doc="货币工具比例下限")
MMT_INVEST_PCT_UP = Column(DECIMAL(5, 0), doc="货币工具比例上限")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
OTHER_INVEST_PCT_DOWN = Column(DECIMAL(5, 0), doc="其他非股票投资比例下限")
OTHER_INVEST_PCT_UP = Column(DECIMAL(5, 0), doc="其他非股票投资比例上限")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
STOCK_INVEST_PCT_DOWN = Column(DECIMAL(5, 0), doc="股票投资比例下限")
STOCK_INVEST_PCT_UP = Column(DECIMAL(5, 0), doc="股票投资比例上限")
class CMFRISKLEVEL(Base):
__tablename__ = 'CMFRISKLEVEL'
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RISK_LEVEL = Column(String(40), doc="基金风险等级")
class CMFSECCLASS(Base):
__tablename__ = 'CMFSECCLASS'
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_SECTOR = Column(String(10), doc="所属板块代码")
S_INFO_SECTORENTRYDT = Column(String(8), doc="起始日期")
S_INFO_SECTOREXITDT = Column(String(8), doc="截止日期")
S_INFO_SECTORNAME = Column(String(40), doc="所属板块名称")
class CMFSELLINGAGENTS(Base):
__tablename__ = 'CMFSELLINGAGENTS'
CUR_SIGN = Column(DECIMAL(5, 0), doc="最新标志")
F_AGENCY_NAME = Column(String(200), doc="机构名称")
F_AGENCY_NAMEID = Column(String(20), doc="中介机构公司ID")
F_ANN_DATE = Column(String(8), doc="公告日期")
F_BEGIN_DATE = Column(String(8), doc="起始日期")
F_END_DATE = Column(String(8), doc="终止日期")
F_INFO_WINDCODE = Column(String(40), doc="WIND代码")
F_RELATION = Column(String(30), doc="关系类型")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
class CMFSUBREDFEE(Base):
__tablename__ = 'CMFSUBREDFEE'
AMOUNTDOWNLIMIT = Column(DECIMAL(20, 4), doc="金额下限(万元)")
AMOUNTUPLIMIT = Column(DECIMAL(20, 4), doc="金额上限(万元)")
ANN_DATE = Column(String(8), doc="公告日期")
CHANGE_DT = Column(String(8), doc="变动日期")
CHARGEWAY = Column(String(20), doc="收费类型")
FEE_TYPE_CODE = Column(DECIMAL(9, 0), doc="费率类型代码")
FEERATIO = Column(DECIMAL(20, 4), doc="费率(%)")
FEETYPECODE = Column(String(30), doc="费率类型")
HOLDINGPERIOD_DOWNLIMIT = Column(DECIMAL(20, 4), doc="持有年限下限")
HOLDINGPERIOD_UPLIMIT = Column(DECIMAL(20, 4), doc="持有年限上限")
HOLDINGPERIODUNIT = Column(String(20), doc="持有期限单位")
ISUPLIMITFEE = Column(String(1), doc="是否上限费率")
MEMO = Column(String(4), doc="区间是否包含掩码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SUPPLEMENTARY = Column(String(800), doc="费率补充说明")
TRADINGPLACE = Column(String(40), doc="场所")
TRADINGPLACECODE = Column(DECIMAL(9, 0), doc="投资群体代码")
USED = Column(DECIMAL(1, 0), doc="是否有效")
class CMFTHEMECONCEPT(Base):
__tablename__ = 'CMFTHEMECONCEPT'
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_NAME = Column(String(100), doc="板块名称")
S_INFO_SECTOR = Column(String(16), doc="板块代码")
S_INFO_WINDCODE = Column(String(40), doc="成份万得代码")
class CMFTRADINGSUSPENSION(Base):
__tablename__ = 'CMFTRADINGSUSPENSION'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_DQ_CHANGEREASON = Column(String(400), doc="停牌原因")
S_DQ_RESUMPDATE = Column(String(8), doc="复牌日期")
S_DQ_SUSPENDDATE = Column(String(8), doc="停牌日期")
S_DQ_SUSPENDTIME = Column(String(200), doc="停复牌时间")
S_DQ_SUSPENDTYPE = Column(DECIMAL(9, 0), doc="停牌类型代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SEC_ID = Column(String(40), doc="证券id")
class CMFUNDOPERATEPERIOD(Base):
__tablename__ = 'CMFUNDOPERATEPERIOD'
ANNUALYEILD = Column(DECIMAL(20, 4), doc="实际年化收益率")
ANTICIPATE_ANNUALYEILD = Column(DECIMAL(20, 4), doc="预期年化收益率")
BATCH1 = Column(DECIMAL(5, 0), doc="批次")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
OPR_ENDDT = Column(String(8), doc="运作结束日")
OPR_PERIOD = Column(DECIMAL(10, 0), doc="期数")
OPR_STARTDT = Column(String(8), doc="运作起始日")
PCH_ENDDT = Column(String(8), doc="开放申购截止日")
PCH_STARTDT = Column(String(8), doc="开放申购起始日")
REDM_ENDDT = Column(String(8), doc="开放赎回截止日")
REDM_STARTDT = Column(String(8), doc="开放赎回起始日")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CMMFPORTFOLIOPTM(Base):
__tablename__ = 'CMMFPORTFOLIOPTM'
ANN_DT = Column(String(8), doc="公告日期")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
F_PTM_BOTTOM = Column(DECIMAL(20, 4), doc="剩余期上限")
F_PTM_TOP = Column(DECIMAL(20, 4), doc="剩余期下限")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RATIO_ASSERT_NAV = Column(DECIMAL(20, 4), doc="资产占净值比例(%)")
RATIO_LIAB_NAV = Column(DECIMAL(20, 4), doc="负债占净值比例(%)")
REPORT_PERIOD = Column(String(8), doc="报告期")
TYPECODE = Column(String(200), doc="类型")
class CMMQUARTERLYDATA(Base):
__tablename__ = 'CMMQUARTERLYDATA'
ANN_DATE = Column(String(8), doc="公告日期")
AVG_REMAINDER_PERIOD_MAX = Column(DECIMAL(20, 4), doc="报告期内投资组合平均剩余期限最高值")
AVG_REMAINDER_PERIOD_MIN = Column(DECIMAL(20, 4), doc="报告期内投资组合平均剩余期限最低值")
BONDREPO_BALANCE = Column(DECIMAL(20, 4), doc="报告期内债券回购融资余额")
BONDREPO_BALANCE_RATIO = Column(DECIMAL(20, 4), doc="报告期内债券回购融资余额占资产净值的比例(%)")
DEVIATION_DEGREE_AVG_VALUE = Column(DECIMAL(20, 4), doc="报告期内每个工作日偏离度的绝对值的简单平均值(%)")
DEVIATION_DEGREE_FREQUENCY = Column(DECIMAL(20, 4), doc="报告期内偏离度的绝对值在0.25%(含)-0.5%间的次数")
DEVIATION_DEGREE_MAX = Column(DECIMAL(20, 4), doc="报告期内偏离度的最高值(%)")
DEVIATION_DEGREE_MIN = Column(DECIMAL(20, 4), doc="报告期内偏离度的最低值(%)")
END_BONDREPO_BALANCE = Column(DECIMAL(20, 4), doc="报告期末债券回购融资余额")
END_BONDREPO_BALANCE_RATIO = Column(DECIMAL(20, 4), doc="报告期末债券回购融资余额占资产净值的比例(%)")
F_INFO_BGNDATE = Column(String(8), doc="报告期起始日期")
F_INFO_ENDDATE = Column(String(8), doc="报告期截止日期")
FIXED_DEPOSIT = Column(DECIMAL(20, 4), doc="商业银行定期存款")
FLOATING_BOND_AMOUNT = Column(DECIMAL(20, 4), doc="剩余存续期超过397天的浮动利率债券金额")
FLOATING_BOND_AMOUNT_RATIO = Column(DECIMAL(20, 4), doc="剩余存续期超过397天的浮动利率债券占资产净值比例(%)")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CMONEYMARKETDAILYFINCOME(Base):
__tablename__ = 'CMONEYMARKETDAILYFINCOME'
ANN_DATE = Column(String(8), doc="公告日期")
F_ACCUNITNAV = Column(DECIMAL(20, 4), doc="累计单位净值")
F_INCOME_PER_MILLION = Column(DECIMAL(20, 5), doc="万份收益")
F_INFO_BGNDATE = Column(String(8), doc="起始日期")
F_INFO_ENDDATE = Column(String(8), doc="截止日期")
F_INFO_UNITYIELD = Column(DECIMAL(20, 4), doc="单位净值")
F_INFO_YEARLYROE = Column(DECIMAL(20, 4), doc="七日年化收益率")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CMONEYMARKETFINCOME(Base):
__tablename__ = 'CMONEYMARKETFINCOME'
ANN_DATE = Column(String(8), doc="公告日期")
F_INFO_BGNDATE = Column(String(8), doc="起始日期")
F_INFO_ENDDATE = Column(String(8), doc="截止日期")
F_INFO_UNITYIELD = Column(DECIMAL(20, 5), doc="每万份基金单位收益")
F_INFO_YEARLYROE = Column(DECIMAL(20, 4), doc="最近七日收益所折算的年资产收益率")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class CMONEYMARKETFSCARRYOVERM(Base):
__tablename__ = 'CMONEYMARKETFSCARRYOVERM'
CHANGE_DATE = Column(String(8), doc="变动日期")
F_INFO_INCOMESCARRYOVERDTCODE = Column(DECIMAL(9, 0), doc="收益分配份额结转日期类型代码")
F_INFO_INCOMESCARRYOVERM = Column(String(20), doc="收益分配份额结转方式")
F_IS_NEW = Column(DECIMAL(1, 0), doc="是否最新")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_WINDCODE = Column(String(40), doc="基金Wind代码")
class CODEANDSNAME(Base):
__tablename__ = 'CODEANDSNAME'
BEGINDATE = Column(String(8), doc="代码有效起始日期")
ENDDATE = Column(String(8), doc="代码有效截止日期")
IS_COMMON = Column(DECIMAL(1, 0), doc="是否通用代码")
MEMO = Column(String(800), doc="备注")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CODE = Column(String(40), doc="业务代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_SNAME = Column(String(100), doc="业务简称")
SEC_ID = Column(String(40), doc="证券ID")
TYPE_CODE = Column(DECIMAL(9, 0), doc="业务类型代码")
class COMPANYPREVIOUSNAME(Base):
__tablename__ = 'COMPANYPREVIOUSNAME'
ANN_DT = Column(String(8), doc="公告日期")
CHANGE_DT = Column(String(8), doc="变动日期")
CHANGE_REASON = Column(String(100), doc="更名原因")
COMP_NAME = Column(String(200), doc="公司名称")
COMP_NAME_ENG = Column(String(200), doc="公司英文名称")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
class COMPINTRODUCTION(Base):
__tablename__ = 'COMPINTRODUCTION'
ADDRESS = Column(String(200), doc="注册地址")
BRIEFING = Column(String(2000), doc="公司简介")
BUSINESSSCOPE = Column(LONGTEXT, doc="经营范围")
CHAIRMAN = Column(String(100), doc="法人代表")
CITY = Column(String(50), doc="城市")
COMP_ID = Column(String(40), doc="公司ID")
COMP_NAME = Column(String(200), doc="公司名称")
COMP_NAME_ENG = Column(String(100), doc="英文名称")
COMP_PROPERTY = Column(String(100), doc="企业性质")
COMP_SNAME = Column(String(40), doc="公司中文简称")
COMP_SNAMEENG = Column(String(100), doc="英文名称缩写")
COMP_TYPE = Column(String(100), doc="公司类型")
COMPANY_TYPE = Column(String(10), doc="公司类别")
COUNTRY = Column(String(20), doc="国籍")
CURRENCYCODE = Column(String(10), doc="货币代码")
DISCLOSER = Column(String(500), doc="信息披露人")
EMAIL = Column(String(200), doc="电子邮件")
ENDDATE = Column(String(8), doc="公司终止日期")
FAX = Column(String(50), doc="传真")
FOUNDDATE = Column(String(8), doc="成立日期")
IS_LISTED = Column(DECIMAL(1, 0), doc="是否上市公司")
MAIN_BUSINESS = Column(String(1000), doc="主要产品及业务")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OFFICE = Column(String(400), doc="办公地址")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PHONE = Column(String(50), doc="电话")
PRESIDENT = Column(String(100), doc="总经理")
PROVINCE = Column(String(20), doc="省份")
REGCAPITAL = Column(DECIMAL(20, 4), doc="注册资本")
REGISTERNUMBER = Column(String(20), doc="统一社会信用代码")
S_INFO_COMPTYPE = Column(String(10), doc="公司类型")
S_INFO_ORG_CODE = Column(String(30), doc="组织机构代码")
S_INFO_TOTALEMPLOYEES = Column(DECIMAL(20, 0), doc="员工总数(人)")
SOCIAL_CREDIT_CODE = Column(String(30), doc="统一社会信用编码(废弃)")
WEBSITE = Column(String(200), doc="公司网址")
ZIPCODE = Column(String(10), doc="邮编")
class COMPORGANIZATIONCODE(Base):
__tablename__ = 'COMPORGANIZATIONCODE'
COMP_ID = Column(String(40), doc="公司ID")
IS_COMMON = Column(DECIMAL(1, 0), doc="是否通用代码")
MEMO = Column(String(800), doc="备注")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_CODE = Column(String(40), doc="业务代码")
S_SNAME = Column(String(100), doc="业务名称")
TYPE_CODE = Column(DECIMAL(9, 0), doc="业务类型代码")
class COUNTRYANDAREACODE(Base):
__tablename__ = 'COUNTRYANDAREACODE'
CONTINENT = Column(String(20), doc="所属洲")
COUNTRY_CODE_2 = Column(String(10), doc="国家及地区代码(2位)")
COUNTRY_CODE_3 = Column(String(10), doc="国家及地区代码(3位)")
IS_VALID = Column(DECIMAL(5, 0), doc="是否有效")
NAME = Column(String(40), doc="国家及地区名称")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RELEASE_DATE = Column(DateTime, doc="发布日期")
class COUNTRYANDAREACODEZL(Base):
__tablename__ = 'COUNTRYANDAREACODEZL'
CONTINENT = Column(String(20), doc="所属洲")
COUNTRY_CODE_2 = Column(String(10), doc="国家及地区代码(2位)")
COUNTRY_CODE_3 = Column(String(10), doc="国家及地区代码(3位)")
IS_VALID = Column(DECIMAL(5, 0), doc="是否有效")
NAME1 = Column(String(40), doc="国家及地区名称")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
RELEASE_DATE = Column(DateTime, doc="发布日期")
class CPFUNDDESCRIPTION(Base):
__tablename__ = 'CPFUNDDESCRIPTION'
ANN_DT = Column(String(8), doc="公告日期")
CP_PERIOD = Column(DECIMAL(20, 4), doc="保本周期(年)")
END_DT = Column(String(8), doc="保本周期终止日期")
F_INFO_WINDCODE = Column(String(40), doc="Wind代码")
GUARANT_FEE = Column(DECIMAL(20, 4), doc="保证费率(%)")
GUARANTOR = Column(String(200), doc="保证人名称")
GUARANTOR_INFO = Column(String(800), doc="保证人简介")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
START_DT = Column(String(8), doc="保本周期起始日期")
TRIGGER_INFO = Column(String(2000), doc="触发机制说明")
TRIGGER_YIELD = Column(DECIMAL(20, 4), doc="触发收益率(%)")
class CURRENCYCODE(Base):
__tablename__ = 'CURRENCYCODE'
CRNCY_NAME = Column(String(40), doc="货币名称")
CURRENCY_CODE = Column(String(10), doc="货币代码")
LATEST_LOGO = Column(DECIMAL(1, 0), doc="最新标志")
MAIN_CURRENCY_CODE = Column(String(10), doc="主币货币代码")
MAIN_CURRENCY_RATIO = Column(DECIMAL(20, 0), doc="主辅币比例")
MEMO = Column(String(100), doc="备注")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PIP_VALUE = Column(DECIMAL(20, 4), doc="pip value")
class ETFPCHREDM(Base):
__tablename__ = 'ETFPCHREDM'
CONVERSION_DT = Column(String(8), doc="份额折算日")
CONVERSION_RATIO = Column(DECIMAL(20, 8), doc="份额折算比例")
LIST_DT = Column(String(8), doc="上市日期")
LIST_SHARE = Column(DECIMAL(20, 4), doc="上市交易份额")
NETWORKCASHBUYDOWNLIMIT = Column(DECIMAL(20, 4), doc="网上现金认购份额下限(份)")
NETWORKCASHBUYENDDT = Column(String(8), doc="网上现金认购截止日")
NETWORKCASHBUYSTARTDT = Column(String(8), doc="网上现金认购起始日")
NETWORKCASHBUYUPLIMIT = Column(DECIMAL(20, 4), doc="网上现金认购份额上限(份)")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OFFNETWORKBUYENDDT = Column(String(8), doc="网下现金认购截止日")
OFFNETWORKBUYSTARTDT = Column(String(8), doc="网下现金认购起始日")
OFFNETWORKCASHBUYDOWNLIMIT = Column(DECIMAL(20, 4), doc="网下现金认购份额下限(份)")
ONLINE_OFFERING_CODE = Column(String(10), doc="网上现金发售代码")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PCH_CODE = Column(String(10), doc="申购赎回代码")
PCH_NAME = Column(String(40), doc="申购赎回简称")
PCH_START_DT = Column(String(8), doc="申购起始日")
REDM_START_DT = Column(String(8), doc="赎回起始日")
S_INFO_EXCHMARKET = Column(String(20), doc="交易所")
S_INFO_WINDCODE = Column(String(40), doc="基金Wind代码")
class FINANCIALQUALIFICATION(Base):
__tablename__ = 'FINANCIALQUALIFICATION'
ACQUISITION_DATE = Column(String(8), doc="获得日期")
AGENCY_TYPCODE = Column(String(80), doc="机构类型")
FINANCIAL_TYPE = Column(String(100), doc="金融机构资格类型")
FINANCIAL_TYPE_NUM = Column(DECIMAL(9, 0), doc="金融机构资格类型代码")
IS_EFFECTIVE = Column(DECIMAL(5, 0), doc="是否有效")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
ORGANIZATION_NAME = Column(String(100), doc="机构公布名称")
QUALIFICATION_CODE = Column(String(100), doc="资格编码")
REVOKE_DATE = Column(String(8), doc="撤销日期")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_NAME = Column(String(200), doc="公司简称")
class FINDEXPERFORMANCE(Base):
__tablename__ = 'FINDEXPERFORMANCE'
ANNUALYEILD = Column(DECIMAL(20, 6), doc="年化收益率")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PCT_CHG_RECENT1M = Column(DECIMAL(20, 6), doc="最近1月涨跌幅")
PCT_CHG_RECENT1W = Column(DECIMAL(20, 6), doc="最近1周涨跌幅")
PCT_CHG_RECENT1Y = Column(DECIMAL(20, 6), doc="最近1年涨跌幅")
PCT_CHG_RECENT2Y = Column(DECIMAL(20, 6), doc="最近2年涨跌幅")
PCT_CHG_RECENT3M = Column(DECIMAL(20, 6), doc="最近3月涨跌幅")
PCT_CHG_RECENT3Y = Column(DECIMAL(20, 6), doc="最近3年涨跌幅")
PCT_CHG_RECENT4Y = Column(DECIMAL(20, 6), doc="最近4年涨跌幅")
PCT_CHG_RECENT5Y = Column(DECIMAL(20, 6), doc="最近5年涨跌幅")
PCT_CHG_RECENT6M = Column(DECIMAL(20, 6), doc="最近6月涨跌幅")
PCT_CHG_RECENT6Y = Column(DECIMAL(20, 6), doc="最近6年涨跌幅")
PCT_CHG_THISMONTH = Column(DECIMAL(20, 6), doc="本月以来涨跌幅")
PCT_CHG_THISQUARTER = Column(DECIMAL(20, 6), doc="本季以来涨跌幅")
PCT_CHG_THISWEEK = Column(DECIMAL(20, 6), doc="本周以来涨跌幅")
PCT_CHG_THISYEAR = Column(DECIMAL(20, 6), doc="本年以来涨跌幅")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SHARPRATIO_1Y = Column(DECIMAL(20, 6), doc="1年夏普比率")
SHARPRATIO_2Y = Column(DECIMAL(20, 6), doc="2年夏普比率")
SHARPRATIO_3Y = Column(DECIMAL(20, 6), doc="3年夏普比率")
SHARPRATIO_6M = Column(DECIMAL(20, 6), doc="6个月夏普比率")
SI_PCT_CHG = Column(DECIMAL(20, 6), doc="发布以来涨跌幅")
STD_DEV_1Y = Column(DECIMAL(20, 6), doc="1年标准差")
STD_DEV_2Y = Column(DECIMAL(20, 6), doc="2年标准差")
STD_DEV_3Y = Column(DECIMAL(20, 6), doc="3年标准差")
STD_DEV_6M = Column(DECIMAL(20, 6), doc="6个月标准差")
TRADE_DT = Column(String(8), doc="交易日期")
class FUNDCREDITRECORD(Base):
__tablename__ = 'FUNDCREDITRECORD'
ANN_DATE = Column(String(8), doc="公告日期")
BUSINESS_RESTRICTIVE_MEASURES = Column(DECIMAL(9, 0), doc="业务资格限制措施代码")
DEBAR_MEASURES_CODE = Column(DECIMAL(9, 0), doc="市场禁入措施代码")
DETAILED_CONTENT = Column(LONGTEXT, doc="详细内容")
EFFECTIVE_DATE = Column(String(8), doc="生效日期")
EVENT_ID = Column(String(20), doc="事件ID")
INSTITUTION_ID = Column(String(10), doc="处罚时所在机构ID")
INVOLVING_COMP_ID = Column(String(10), doc="涉及公司ID")
IRREGULARITIES = Column(String(1000), doc="违规事项")
IS_EFFECTIVE = Column(DECIMAL(1, 0), doc="是否生效")
LEGAL_BASIS = Column(String(1000), doc="法律依据")
MEASURES_DISPOSITION = Column(String(1000), doc="处分措施")
MEASURES_DISPOSITION_CODE = Column(DECIMAL(9, 0), doc="处分措施代码")
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PENALTY_AMOUNT = Column(DECIMAL(24, 8), doc="处罚金额")
PUNISHMENT_MEASURES_CODE = Column(DECIMAL(9, 0), doc="处罚措施代码")
PUNISHMENT_TIME_CODE = Column(DECIMAL(9, 0), doc="处分期限代码")
REGULATORS_ID = Column(String(10), doc="监管机构ID")
REGULATORY_OBJECT_CODE = Column(DECIMAL(9, 0), doc="监管对象类别代码")
REGULATORY_OBJECT_ID = Column(String(10), doc="监管对象ID")
REGULATORY_OBJECT_NAME = Column(String(100), doc="监管对象名称")
REGULATORY_OBJECT_TYPE = Column(DECIMAL(9, 0), doc="监管对象类型代码")
TYPE_CODE = Column(DECIMAL(9, 0), doc="业务类型代码")
class GLOBALMARKETTRADINGTIME(Base):
__tablename__ = 'GLOBALMARKETTRADINGTIME'
EXCHANGE_ENG_NAME = Column(String(200), doc="交易所英文名称")
EXCHANGE_NAME = Column(String(40), doc="交易所中文名称")
EXCHANGE_SNAME_ENG = Column(String(40), doc="交易所英文简称")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
SECURITIES_TYPE = Column(String(1000), doc="交易品种描述")
TRADING_HOURS = Column(String(500), doc="交易时段")
TRADING_HOURS_2 = Column(String(1000), doc="交易时段(新)")
TRADING_HOURS_CODE = Column(String(5), doc="交易时段编码")
class GLOBALWORKINGDAY(Base):
__tablename__ = 'GLOBALWORKINGDAY'
COUNTRY_CODE = Column(String(10), doc="国家或地区代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
WORKING_DATE = Column(String(8), doc="日期")
class INDEXCONTRASTSECTOR(Base):
__tablename__ = 'INDEXCONTRASTSECTOR'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_INDEXCODE = Column(String(40), doc="指数万得代码")
S_INFO_INDUSTRYCODE = Column(String(16), doc="板块代码")
S_INFO_INDUSTRYCODE2 = Column(String(16), doc="板块代码2")
S_INFO_INDUSTRYNAME = Column(String(50), doc="板块名称")
S_INFO_INDUSTRYNAME_ENG = Column(String(200), doc="板块英文名称")
S_INFO_NAME = Column(String(50), doc="指数简称")
class LOFDESCRIPTION(Base):
__tablename__ = 'LOFDESCRIPTION'
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_LISTBOARDNAME = Column(String(10), doc="上市板")
S_INFO_LISTDATE = Column(String(8), doc="上市日期")
S_INFO_OUTSTANDINGBALANCE = Column(DECIMAL(20, 4), doc="上市交易份额")
S_INFO_WINDCODE = Column(String(40), doc="wind代码")
class LOFPCHREDM(Base):
__tablename__ = 'LOFPCHREDM'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
PCH_CODE = Column(String(20), doc="场内申购赎回基金代码")
PCH_NAME = Column(String(40), doc="场内申购赎回基金简称")
PCH_START_DT = Column(String(8), doc="场内申购起始日")
REDM_START_DT = Column(String(8), doc="场内赎回起始日")
S_INFO_EXCHMARKET = Column(String(20), doc="交易所")
S_INFO_WINDCODE = Column(String(40), doc="基金Wind代码")
SUBSTRIPTION_CODE = Column(String(20), doc="场内认购基金代码")
SUBSTRIPTION_END_DT = Column(String(8), doc="场内认购截止日")
SUBSTRIPTION_NAME = Column(String(40), doc="场内认购基金简称")
SUBSTRIPTION_PRICE = Column(DECIMAL(20, 4), doc="场内认购价格")
SUBSTRIPTION_START_DT = Column(String(8), doc="场内认购起始日")
class RALATEDSECURITIESCODE(Base):
__tablename__ = 'RALATEDSECURITIESCODE'
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_EFFECTIVE_DT = Column(String(8), doc="生效日期")
S_INFO_INVALID_DT = Column(String(8), doc="失效日期")
S_INFO_RALATEDCODE = Column(String(40), doc="关联证券Wind代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_RELATION_TYPCODE = Column(String(10), doc="关系类型代码")
class SHSCCHANNELHOLDINGS(Base):
__tablename__ = 'SHSCCHANNELHOLDINGS'
OBJECT_ID = Column(String(38), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_CODE = Column(String(40), doc="[内部]股份代码")
S_INFO_EXCHMARKETNAME = Column(String(40), doc="交易所英文简称")
S_INFO_WINDCODE = Column(String(40), doc="WIND代码")
S_QUANTITY = Column(DECIMAL(20, 4), doc="中央结算系统持股量")
S_RATIO = Column(DECIMAL(20, 4), doc="[内部]中央结算系统持股量占比")
TRADE_DT = Column(String(8), doc="持股日期")
class SHSCDAILYSTATISTICS(Base):
__tablename__ = 'SHSCDAILYSTATISTICS'
ITEM_CODE = Column(DECIMAL(9, 0), doc="项目代码")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_EXCHMARKET = Column(String(40), doc="交易所英文简称")
TRADE_DT = Column(String(100), doc="日期")
UNIT = Column(String(20), doc="单位")
VALUE = Column(DECIMAL(20, 4), doc="数据")
class WINDCUSTOMCODE(Base):
__tablename__ = 'WINDCUSTOMCODE'
CRNCY_CODE = Column(String(10), doc="币种编号")
CRNCY_NAME = Column(String(40), doc="币种")
EXCHMARKET = Column(String(10), doc="交易所")
OBJECT_ID = Column(String(100), primary_key=True, doc="%s")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
S_INFO_ASHARECODE = Column(String(10), doc="证券ID")
S_INFO_CODE = Column(String(40), doc="交易代码")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_COUNTRYCODE = Column(String(10), doc="国别编号")
S_INFO_COUNTRYNAME = Column(String(100), doc="国别")
S_INFO_ENAME = Column(String(200), doc="证券英文简称")
S_INFO_EXCHMARKET = Column(String(10), doc="交易所编号")
S_INFO_EXCHMARKETNAME = Column(String(40), doc="交易所")
S_INFO_ISINCODE = Column(String(40), doc="ISIN代码")
S_INFO_LOT_SIZE = Column(DECIMAL(20, 4), doc="每手数量")
S_INFO_MIN_PRICE_CHG_UNIT = Column(DECIMAL(24, 8), doc="最小价格变动单位")
S_INFO_NAME = Column(String(50), doc="证券中文简称")
S_INFO_ORG_CODE = Column(String(20), doc="组织机构代码")
S_INFO_PINYIN = Column(String(40), doc="简称拼音")
S_INFO_SECTYPEBCODE = Column(DECIMAL(9, 0), doc="品种大类代码")
S_INFO_SECTYPENAME = Column(String(40), doc="类型名称")
S_INFO_SECTYPESCODE = Column(DECIMAL(9, 0), doc="品种细类代码")
S_INFO_SECURITIESTYPES = Column(String(10), doc="证券类型")
S_INFO_TYPECODE = Column(DECIMAL(9, 0), doc="分类代码")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SECURITY_STATUS = Column(DECIMAL(9, 0), doc="存续状态")
TRADING_HOURS_CODE = Column(String(10), doc="交易时段编码")
class WIND_PDUPDATE_LOG(Base):
__tablename__ = 'WIND_PDUPDATE_LOG'
CURFILE = Column(String(512))
CURFILEDESC = Column(String(1000))
CURFILETIME = Column(DateTime)
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
SERVERFILE = Column(String(512))
SERVERFILETIME = Column(DateTime)
TABLENAME = Column(String(40), primary_key=True)
wind_class_dict = {
'AEQUFROPLEINFOREPPEREND': AEQUFROPLEINFOREPPEREND,
'AINDEXCSI500WEIGHT': AINDEXCSI500WEIGHT,
'AINDEXDESCRIPTION': AINDEXDESCRIPTION,
'AINDEXEODPRICES': AINDEXEODPRICES,
'AINDEXFINANCIALDERIVATIVE': AINDEXFINANCIALDERIVATIVE,
'AINDEXHS300CLOSEWEIGHT': AINDEXHS300CLOSEWEIGHT,
'AINDEXHS300WEIGHT': AINDEXHS300WEIGHT,
'AINDEXINDUSTRIESEODCITICS': AINDEXINDUSTRIESEODCITICS,
'AINDEXMEMBERS': AINDEXMEMBERS,
'AINDEXMEMBERSCITICS': AINDEXMEMBERSCITICS,
'AINDEXMEMBERSCITICS2': AINDEXMEMBERSCITICS2,
'AINDEXMEMBERSCITICS2ZL': AINDEXMEMBERSCITICS2ZL,
'AINDEXMEMBERSCITICS3': AINDEXMEMBERSCITICS3,
'AINDEXMEMBERSCITICSZL': AINDEXMEMBERSCITICSZL,
'ASAREPLANTRADE': ASAREPLANTRADE,
'ASHAREACCOUNTSPAYABLE': ASHAREACCOUNTSPAYABLE,
'ASHAREADMINISTRATION': ASHAREADMINISTRATION,
'ASHAREANNFINANCIALINDICATOR': ASHAREANNFINANCIALINDICATOR,
'ASHAREAUDITOPINION': ASHAREAUDITOPINION,
'ASHAREBALANCESHEET': ASHAREBALANCESHEET,
'ASHAREBANKINDICATOR': ASHAREBANKINDICATOR,
'ASHAREBEGUARANTEED': ASHAREBEGUARANTEED,
'ASHARECALENDAR': ASHARECALENDAR,
'ASHARECAPITALIZATION': ASHARECAPITALIZATION,
'ASHARECAPITALOPERATION': ASHARECAPITALOPERATION,
'ASHARECASHFLOW': ASHARECASHFLOW,
'ASHARECIRCULATINGHOLDERS': ASHARECIRCULATINGHOLDERS,
'ASHARECOCAPITALOPERATION': ASHARECOCAPITALOPERATION,
'ASHARECOMPANYHOLDSHARES': ASHARECOMPANYHOLDSHARES,
'ASHARECONCEPTUALPLATE': ASHARECONCEPTUALPLATE,
'ASHARECREDITORRIGHTS': ASHARECREDITORRIGHTS,
'ASHARECUSTOMER': ASHARECUSTOMER,
'ASHAREDEFENDANT': ASHAREDEFENDANT,
'ASHAREDESCRIPTION': ASHAREDESCRIPTION,
'ASHAREDIRECTOR': ASHAREDIRECTOR,
'ASHAREDIVIDEND': ASHAREDIVIDEND,
'ASHAREEARNINGEST': ASHAREEARNINGEST,
'ASHAREEODPRICES': ASHAREEODPRICES,
'ASHAREEQUFROINFO': ASHAREEQUFROINFO,
'ASHAREEQUITYPLEDGEINFO': ASHAREEQUITYPLEDGEINFO,
'ASHAREEQUITYRELATIONSHIPS': ASHAREEQUITYRELATIONSHIPS,
'ASHAREESOPDESCRIPTION': ASHAREESOPDESCRIPTION,
'ASHAREESOPTRADINGINFO': ASHAREESOPTRADINGINFO,
'ASHAREFINANCIALDERIVATIVE': ASHAREFINANCIALDERIVATIVE,
'ASHAREFINANCIALINDICATOR': ASHAREFINANCIALINDICATOR,
'ASHAREFLOATHOLDER': ASHAREFLOATHOLDER,
'ASHAREFREEFLOAT': ASHAREFREEFLOAT,
'ASHAREGROUP': ASHAREGROUP,
'ASHAREGROUPINFORMATION': ASHAREGROUPINFORMATION,
'ASHAREGUARANTEERELATIONSHIP': ASHAREGUARANTEERELATIONSHIP,
'ASHAREGUARANTEESTATISTICS': ASHAREGUARANTEESTATISTICS,
'ASHAREHOLDER': ASHAREHOLDER,
'ASHAREHOLDERNUMBER': ASHAREHOLDERNUMBER,
'ASHAREHOLDING': ASHAREHOLDING,
'ASHAREIBROKERINDICATOR': ASHAREIBROKERINDICATOR,
'ASHAREILLEGALITY': ASHAREILLEGALITY,
'ASHAREINCDESCRIPTION': ASHAREINCDESCRIPTION,
'ASHAREINCEXECQTYPRI': ASHAREINCEXECQTYPRI,
'ASHAREINCEXERCISEPCT': ASHAREINCEXERCISEPCT,
'ASHAREINCEXERCISEPCTZL': ASHAREINCEXERCISEPCTZL,
'ASHAREINCOME': ASHAREINCOME,
'ASHAREINCQUANTITYDETAILS': ASHAREINCQUANTITYDETAILS,
'ASHAREINCQUANTITYPRICE': ASHAREINCQUANTITYPRICE,
'ASHAREINDUSRATING': ASHAREINDUSRATING,
'ASHAREINDUSTRIESCLASSCITICS': ASHAREINDUSTRIESCLASSCITICS,
'ASHAREINDUSTRIESCLASSCITICSZL': ASHAREINDUSTRIESCLASSCITICSZL,
'ASHAREINDUSTRIESCODE': ASHAREINDUSTRIESCODE,
'ASHAREINSIDEHOLDER': ASHAREINSIDEHOLDER,
'ASHAREINSIDERTRADE': ASHAREINSIDERTRADE,
'ASHAREINSTHOLDERDERDATA': ASHAREINSTHOLDERDERDATA,
'ASHAREINSURANCEINDICATOR': ASHAREINSURANCEINDICATOR,
'ASHAREINTENSITYTREND': ASHAREINTENSITYTREND,
'ASHAREINTENSITYTRENDADJ': ASHAREINTENSITYTRENDADJ,
'ASHAREINVESTMENTPEVC': ASHAREINVESTMENTPEVC,
'ASHAREIPOPRICINGFORECAST': ASHAREIPOPRICINGFORECAST,
'ASHARELONGLOAN': ASHARELONGLOAN,
'ASHAREMAJORHOLDERPLANHOLD': ASHAREMAJORHOLDERPLANHOLD,
'ASHAREMAJORHOLDERPLANHOLDZL': ASHAREMAJORHOLDERPLANHOLDZL,
'ASHAREMANAGEMENT': ASHAREMANAGEMENT,
'ASHAREMANAGEMENTHOLDREWARD': ASHAREMANAGEMENTHOLDREWARD,
'ASHAREMARGINSUBJECT': ASHAREMARGINSUBJECT,
'ASHAREMARGINTRADE': ASHAREMARGINTRADE,
'ASHAREMARGINTRADESUM': ASHAREMARGINTRADESUM,
'ASHAREMECHANISMOWNERSHIP': ASHAREMECHANISMOWNERSHIP,
'ASHAREMERGERSUBJECT': ASHAREMERGERSUBJECT,
'ASHAREMJRHOLDERTRADE': ASHAREMJRHOLDERTRADE,
'ASHAREPEVCINVESTMENT': ASHAREPEVCINVESTMENT,
'ASHAREPLAINTIFF': ASHAREPLAINTIFF,
'ASHAREPLEDGEPROPORTION': ASHAREPLEDGEPROPORTION,
'ASHAREPLEDGETRADE': ASHAREPLEDGETRADE,
'ASHAREPREVIOUSENNAME': ASHAREPREVIOUSENNAME,
'ASHAREPRODUCT': ASHAREPRODUCT,
'ASHAREPROFITEXPRESS': ASHAREPROFITEXPRESS,
'ASHAREPROFITNOTICE': ASHAREPROFITNOTICE,
'ASHAREPROSECUTION': ASHAREPROSECUTION,
'ASHARERECEIVABLES': ASHARERECEIVABLES,
'ASHAREREGINV': ASHAREREGINV,
'ASHARERELATEDPARTYDEBT': ASHARERELATEDPARTYDEBT,
'ASHARERIGHTISSUE': ASHARERIGHTISSUE,
'ASHARESELLSUBJECT': ASHARESELLSUBJECT,
'ASHAREST': ASHAREST,
'ASHARESTAFF': ASHARESTAFF,
'ASHARESTAFFSTRUCTURE': ASHARESTAFFSTRUCTURE,
'ASHARESTIBHOLDERVOTE': ASHARESTIBHOLDERVOTE,
'ASHARESTOCKRATING': ASHARESTOCKRATING,
'ASHARESUPERVISOR': ASHARESUPERVISOR,
'ASHARESUPPLIER': ASHARESUPPLIER,
'ASHARETRADINGSUSPENSION': ASHARETRADINGSUSPENSION,
'ASHARETYPECODE': ASHARETYPECODE,
'CFUNDBANKACCOUNT': CFUNDBANKACCOUNT,
'CFUNDCHANGEWINDCODE': CFUNDCHANGEWINDCODE,
'CFUNDCODEANDSNAME': CFUNDCODEANDSNAME,
'CFUNDCOMPANYPREVIOUSNAME': CFUNDCOMPANYPREVIOUSNAME,
'CFUNDFACTIONALSTYLE': CFUNDFACTIONALSTYLE,
'CFUNDHOLDRESTRICTEDCIRCULATION': CFUNDHOLDRESTRICTEDCIRCULATION,
'CFUNDINDEXMEMBERS': CFUNDINDEXMEMBERS,
'CFUNDINDEXTABLE': CFUNDINDEXTABLE,
'CFUNDINDUSTRIESCODE': CFUNDINDUSTRIESCODE,
'CFUNDINTRODUCTION': CFUNDINTRODUCTION,
'CFUNDMANAGEMENT': CFUNDMANAGEMENT,
'CFUNDPCHREDM': CFUNDPCHREDM,
'CFUNDPORTFOLIOCHANGES': CFUNDPORTFOLIOCHANGES,
'CFUNDPREVIOUSNAME': CFUNDPREVIOUSNAME,
'CFUNDRALATEDSECURITIESCODE': CFUNDRALATEDSECURITIESCODE,
'CFUNDRATESENSITIVE': CFUNDRATESENSITIVE,
'CFUNDSTYLECOEFFICIENT': CFUNDSTYLECOEFFICIENT,
'CFUNDSTYLETHRESHOLD': CFUNDSTYLETHRESHOLD,
'CFUNDTACODE': CFUNDTACODE,
'CFUNDTYPECODE': CFUNDTYPECODE,
'CFUNDWINDCUSTOMCODE': CFUNDWINDCUSTOMCODE,
'CFUNDWINDINDEXCOMPONENT': CFUNDWINDINDEXCOMPONENT,
'CFUNDWINDINDEXMEMBERS': CFUNDWINDINDEXMEMBERS,
'CHANGEWINDCODE': CHANGEWINDCODE,
'CHINACLOSEDFUNDEODPRICE': CHINACLOSEDFUNDEODPRICE,
'CHINAFEEDERFUND': CHINAFEEDERFUND,
'CHINAGRADINGFUND': CHINAGRADINGFUND,
'CHINAMFMPERFORMANCE': CHINAMFMPERFORMANCE,
'CHINAMFPERFORMANCE': CHINAMFPERFORMANCE,
'CHINAMUTUALFUNDASSETPORTFOLIO': CHINAMUTUALFUNDASSETPORTFOLIO,
'CHINAMUTUALFUNDBENCHMARK': CHINAMUTUALFUNDBENCHMARK,
'CHINAMUTUALFUNDBENCHMARKEOD': CHINAMUTUALFUNDBENCHMARKEOD,
'CHINAMUTUALFUNDBONDPORTFOLIO': CHINAMUTUALFUNDBONDPORTFOLIO,
'CHINAMUTUALFUNDDESCRIPTION': CHINAMUTUALFUNDDESCRIPTION,
'CHINAMUTUALFUNDFLOATSHARE': CHINAMUTUALFUNDFLOATSHARE,
'CHINAMUTUALFUNDINDPORTFOLIO': CHINAMUTUALFUNDINDPORTFOLIO,
'CHINAMUTUALFUNDMANAGER': CHINAMUTUALFUNDMANAGER,
'CHINAMUTUALFUNDNAV': CHINAMUTUALFUNDNAV,
'CHINAMUTUALFUNDPCHREDM': CHINAMUTUALFUNDPCHREDM,
'CHINAMUTUALFUNDPOSESTIMATION': CHINAMUTUALFUNDPOSESTIMATION,
'CHINAMUTUALFUNDREPNAVPER': CHINAMUTUALFUNDREPNAVPER,
'CHINAMUTUALFUNDSEATTRADING': CHINAMUTUALFUNDSEATTRADING,
'CHINAMUTUALFUNDSECTOR': CHINAMUTUALFUNDSECTOR,
'CHINAMUTUALFUNDSHARE': CHINAMUTUALFUNDSHARE,
'CHINAMUTUALFUNDSTOCKPORTFOLIO': CHINAMUTUALFUNDSTOCKPORTFOLIO,
'CHINAMUTUALFUNDSUSPENDPCHREDM': CHINAMUTUALFUNDSUSPENDPCHREDM,
'CHINAMUTUALFUNDTRACKINGINDEX': CHINAMUTUALFUNDTRACKINGINDEX,
'CLOSEDFUNDPCHREDM': CLOSEDFUNDPCHREDM,
'CMFAIPINFO': CMFAIPINFO,
'CMFCODEANDSNAME': CMFCODEANDSNAME,
'CMFCONSEPTION': CMFCONSEPTION,
'CMFDESCCHANGE': CMFDESCCHANGE,
'CMFFAIRVALUECHANGEPROFIT': CMFFAIRVALUECHANGEPROFIT,
'CMFFIXEDINVESTMENTRATE': CMFFIXEDINVESTMENTRATE,
'CMFHOLDER': CMFHOLDER,
'CMFHOLDERSTRUCTURE': CMFHOLDERSTRUCTURE,
'CMFHOLDINGRATIOANOMALY': CMFHOLDINGRATIOANOMALY,
'CMFINDEXDESCRIPTION': CMFINDEXDESCRIPTION,
'CMFINDEXEOD': CMFINDEXEOD,
'CMFINDUSTRYPLATE': CMFINDUSTRYPLATE,
'CMFIOPVNAV': CMFIOPVNAV,
'CMFNAVOPERATIONRECORD': CMFNAVOPERATIONRECORD,
'CMFOTHERPORTFOLIO': CMFOTHERPORTFOLIO,
'CMFPREFERENTIALFEE': CMFPREFERENTIALFEE,
'CMFPROPORTIONOFINVEOBJ': CMFPROPORTIONOFINVEOBJ,
'CMFRISKLEVEL': CMFRISKLEVEL,
'CMFSECCLASS': CMFSECCLASS,
'CMFSELLINGAGENTS': CMFSELLINGAGENTS,
'CMFSUBREDFEE': CMFSUBREDFEE,
'CMFTHEMECONCEPT': CMFTHEMECONCEPT,
'CMFTRADINGSUSPENSION': CMFTRADINGSUSPENSION,
'CMFUNDOPERATEPERIOD': CMFUNDOPERATEPERIOD,
'CMMFPORTFOLIOPTM': CMMFPORTFOLIOPTM,
'CMMQUARTERLYDATA': CMMQUARTERLYDATA,
'CMONEYMARKETDAILYFINCOME': CMONEYMARKETDAILYFINCOME,
'CMONEYMARKETFINCOME': CMONEYMARKETFINCOME,
'CMONEYMARKETFSCARRYOVERM': CMONEYMARKETFSCARRYOVERM,
'CODEANDSNAME': CODEANDSNAME,
'COMPANYPREVIOUSNAME': COMPANYPREVIOUSNAME,
'COMPINTRODUCTION': COMPINTRODUCTION,
'COMPORGANIZATIONCODE': COMPORGANIZATIONCODE,
'COUNTRYANDAREACODE': COUNTRYANDAREACODE,
'COUNTRYANDAREACODEZL': COUNTRYANDAREACODEZL,
'CPFUNDDESCRIPTION': CPFUNDDESCRIPTION,
'CURRENCYCODE': CURRENCYCODE,
'ETFPCHREDM': ETFPCHREDM,
'FINANCIALQUALIFICATION': FINANCIALQUALIFICATION,
'FINDEXPERFORMANCE': FINDEXPERFORMANCE,
'FUNDCREDITRECORD': FUNDCREDITRECORD,
'GLOBALMARKETTRADINGTIME': GLOBALMARKETTRADINGTIME,
'GLOBALWORKINGDAY': GLOBALWORKINGDAY,
'INDEXCONTRASTSECTOR': INDEXCONTRASTSECTOR,
'LOFDESCRIPTION': LOFDESCRIPTION,
'LOFPCHREDM': LOFPCHREDM,
'RALATEDSECURITIESCODE': RALATEDSECURITIESCODE,
'SHSCCHANNELHOLDINGS': SHSCCHANNELHOLDINGS,
'SHSCDAILYSTATISTICS': SHSCDAILYSTATISTICS,
'WINDCUSTOMCODE': WINDCUSTOMCODE,
'WIND_PDUPDATE_LOG': WIND_PDUPDATE_LOG,
}
| 46.881918
| 4,793
| 0.692083
|
c368d0ebed7950d44f296a0d56e04e47f46bbed6
| 3,937
|
py
|
Python
|
qmcpy/integrand/european_option.py
|
jungtaekkim/QMCSoftware
|
4518d26b06ef737797d5e522cb61d9f7b516d74e
|
[
"Apache-2.0"
] | null | null | null |
qmcpy/integrand/european_option.py
|
jungtaekkim/QMCSoftware
|
4518d26b06ef737797d5e522cb61d9f7b516d74e
|
[
"Apache-2.0"
] | null | null | null |
qmcpy/integrand/european_option.py
|
jungtaekkim/QMCSoftware
|
4518d26b06ef737797d5e522cb61d9f7b516d74e
|
[
"Apache-2.0"
] | null | null | null |
from ._integrand import Integrand
from ..true_measure import BrownianMotion
from ..discrete_distribution import Sobol
from ..util import ParameterError
from numpy import *
from scipy.stats import norm
class EuropeanOption(Integrand):
"""
European financial option.
>>> eo = EuropeanOption(Sobol(4,seed=7),call_put='put')
>>> eo
EuropeanOption (Integrand Object)
volatility 2^(-1)
call_put put
start_price 30
strike_price 35
interest_rate 0
>>> x = eo.discrete_distrib.gen_samples(2**12)
>>> y = eo.f(x)
>>> y.mean()
9.210...
>>> eo = EuropeanOption(BrownianMotion(Sobol(4,seed=7),drift=1),call_put='put')
>>> x = eo.discrete_distrib.gen_samples(2**12)
>>> y = eo.f(x)
>>> y.mean()
9.220...
"""
parameters = ['volatility', 'call_put', 'start_price', 'strike_price', 'interest_rate']
def __init__(self, sampler, volatility=0.5, start_price=30, strike_price=35,
interest_rate=0, t_final=1, call_put='call'):
"""
Args:
sampler (DiscreteDistribution/TrueMeasure): A
discrete distribution from which to transform samples or a
true measure by which to compose a transform
volatility (float): sigma, the volatility of the asset
start_price (float): S(0), the asset value at t=0
strike_price (float): strike_price, the call/put offer
interest_rate (float): r, the annual interest rate
t_final (float): exercise time
call_put (str): 'call' or 'put' option
"""
self.t_final = t_final
self.true_measure = BrownianMotion(sampler,t_final=self.t_final)
self.volatility = float(volatility)
self.start_price = float(start_price)
self.strike_price = float(strike_price)
self.interest_rate = float(interest_rate)
self.call_put = call_put.lower()
if self.call_put not in ['call','put']:
raise ParameterError("call_put must be either 'call' or 'put'")
super(EuropeanOption,self).__init__()
def g(self, x):
""" See abstract method. """
self.s = self.start_price * exp(
(self.interest_rate - self.volatility ** 2 / 2) *
self.true_measure.time_vec + self.volatility * x)
for xx,yy in zip(*where(self.s<0)): # if stock becomes <=0, 0 out rest of path
self.s[xx,yy:] = 0
if self.call_put == 'call':
y_raw = maximum(self.s[:,-1] - self.strike_price, 0)
else: # put
y_raw = maximum(self.strike_price - self.s[:,-1], 0)
y_adj = y_raw * exp(-self.interest_rate * self.t_final)
return y_adj
def get_exact_value(self):
"""
Get the fair price of a European call/put option.
Return:
float: fair price
"""
denom = self.volatility * sqrt(self.t_final)
decay = self.strike_price * exp(-self.interest_rate * self.t_final)
if self.call_put == 'call':
term1 = log(self.start_price / self.strike_price) + \
(self.interest_rate + self.volatility**2/2) * self.t_final
term2 = log(self.start_price / self.strike_price) + \
(self.interest_rate - self.volatility**2/2) * self.t_final
fp = self.start_price * norm.cdf(term1/denom) - decay * norm.cdf(term2/denom)
elif self.call_put == 'put':
term1 = log(self.strike_price / self.start_price) - \
(self.interest_rate - self.volatility**2/2) * self.t_final
term2 = log(self.strike_price / self.start_price) - \
(self.interest_rate + self.volatility**2/2) * self.t_final
fp = decay * norm.cdf(term1/denom) - self.start_price * norm.cdf(term2/denom)
return fp
| 41.442105
| 91
| 0.589789
|
d8e87ea321023c2b703febb9df20368457e875bf
| 595
|
py
|
Python
|
API/Webinar-Dec-2015/st1.py
|
VIRL-Open/virl-utils
|
4de6c8237a7b1b6d0af4280f95ec9439b9b6d831
|
[
"0BSD"
] | 56
|
2015-01-05T22:38:13.000Z
|
2020-06-09T05:37:59.000Z
|
API/Webinar-Dec-2015/st1.py
|
VIRL-Open/virl-utils
|
4de6c8237a7b1b6d0af4280f95ec9439b9b6d831
|
[
"0BSD"
] | 6
|
2016-01-11T22:32:20.000Z
|
2016-08-02T01:56:01.000Z
|
API/Webinar-Dec-2015/st1.py
|
VIRL-Open/virl-utils
|
4de6c8237a7b1b6d0af4280f95ec9439b9b6d831
|
[
"0BSD"
] | 45
|
2015-03-24T20:17:11.000Z
|
2021-08-20T01:36:05.000Z
|
#!/usr/bin/env python
import os, requests, sys
def stop_topo(toponame):
virl_host = "10.87.94.20"
username = password = "guest"
url = "http://%s:19399/simengine/rest/stop/%s" % (virl_host, toponame)
headers = {'content-type': 'text/xml'}
result = requests.get(url, auth=(username, password), headers=headers)
print result.text
def main():
if len(sys.argv) != 2:
sys.stdout.write(str(sys.argv[0]))
print ": usage: st.py <simulation-name>"
return 1
else:
stop_topo(str(sys.argv[1]).strip())
return 0
if __name__ == '__main__':
sys.exit(main())
| 22.037037
| 77
| 0.636975
|
6191d65a193fdca18404dba061e86f2102224dab
| 3,443
|
py
|
Python
|
sdk/ml/azure-ai-ml/azure/ai/ml/_local_endpoints/validators/model_validator.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/ml/azure-ai-ml/azure/ai/ml/_local_endpoints/validators/model_validator.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/ml/azure-ai-ml/azure/ai/ml/_local_endpoints/validators/model_validator.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
from azure.ai.ml.entities import OnlineDeployment
from azure.ai.ml.entities._assets import Model
from azure.ai.ml._operations.model_operations import ModelOperations
from azure.ai.ml._local_endpoints.errors import RequiredLocalArtifactsNotFoundError
from azure.ai.ml._artifacts._artifact_utilities import download_artifact
from azure.ai.ml._utils._arm_id_utils import parse_prefixed_name_version, is_ARM_id_for_resource
from azure.ai.ml._utils._storage_utils import AzureMLDatastorePathUri
class ModelValidator:
def get_model_artifacts(
self, endpoint_name: str, deployment: OnlineDeployment, model_operations: ModelOperations, download_path: str
) -> str:
"""Validates and returns model artifacts from deployment specification.
:param endpoint_name: name of endpoint which this deployment is linked to
:type endpoint_name: str
:param deployment: deployment to validate
:type deployment: OnlineDeployment entity
:return: (model name, model version, the local directory of the model artifact)
:type return: (str, str, str)
:raises: azure.ai.ml._local_endpoints.errors.RequiredLocalArtifactsNotFoundError
:raises: azure.ai.ml._local_endpoints.errors.CloudArtifactsNotSupportedError
"""
# Validate model for local endpoint
if self._model_contains_cloud_artifacts(deployment=deployment):
return self._get_cloud_model_artifacts(
model_operations=model_operations,
model=deployment.model,
download_path=download_path,
)
if not self._local_model_is_valid(deployment=deployment):
raise RequiredLocalArtifactsNotFoundError(
endpoint_name=endpoint_name,
required_artifact="model.path",
required_artifact_type=str,
deployment_name=deployment.name,
)
return (
deployment.model.name,
deployment.model.version,
Path(deployment._base_path, deployment.model.path).resolve().parent,
)
def _local_model_is_valid(self, deployment: OnlineDeployment):
return deployment.model and isinstance(deployment.model, Model) and deployment.model.path
def _model_contains_cloud_artifacts(self, deployment: OnlineDeployment):
# If the deployment.model is a string, then it is the cloud model name or full arm ID
return isinstance(deployment.model, str)
def _get_cloud_model_artifacts(self, model_operations: ModelOperations, model: str, download_path: str) -> str:
name, version = parse_prefixed_name_version(model)
model_asset = model_operations.get(name=name, version=version)
model_uri_path = AzureMLDatastorePathUri(model_asset.path)
path = Path(model_uri_path.path)
starts_with = path if path.is_dir() else path.parent
return (
name,
version,
download_artifact(
starts_with=starts_with,
destination=download_path,
datastore_operation=model_operations._datastore_operation,
datastore_name=model_uri_path.datastore,
),
)
| 45.302632
| 117
| 0.67964
|
91e67e1f2cedac674a356d3e467ef9755fa0decb
| 1,471
|
py
|
Python
|
python/python3-100-examples/example71.py
|
ii6uu99/ipynb
|
d924a6926838ca5e563620cd324368a07d2c2521
|
[
"MIT"
] | 1
|
2021-01-27T09:01:33.000Z
|
2021-01-27T09:01:33.000Z
|
python/python3-100-examples/example71.py
|
ii6uu99/ipynb
|
d924a6926838ca5e563620cd324368a07d2c2521
|
[
"MIT"
] | null | null | null |
python/python3-100-examples/example71.py
|
ii6uu99/ipynb
|
d924a6926838ca5e563620cd324368a07d2c2521
|
[
"MIT"
] | 1
|
2021-01-26T13:22:21.000Z
|
2021-01-26T13:22:21.000Z
|
#!/usr/bin/python3
__author__ = "yang.dd"
"""
example 071
"""
class Student(object):
def __init__(self):
self.__num = 0
self.__name = ""
self.__score = [0, 0, 0, 0]
def get_num(self):
return self.__num
def get_name(self):
return self.__name
def get_score(self):
return self.__score
def set_name(self, name):
self.__name = name
def set_num(self, num):
try:
num = int(num)
self.__num = num
except ValueError as e:
print("Please input a int")
def set_score(self, score_1, score_2, score_3, score_4):
self.__score[0] = score_1
self.__score[1] = score_2
self.__score[2] = score_3
self.__score[3] = score_4
def input_stu(n):
student_list = []
for i in range(n):
s = Student()
s.set_num(input("请输入编号:"))
s.set_name(input("请输入姓名:"))
s.set_score(input("请输入成绩1:"), input("请输入成绩2:"), input("请输入成绩3:"), input("请输入成绩4:"))
student_list.append(s)
return student_list
def output_stu(student_list):
for i in range(len(student_list)):
s = student_list[i]
print("编号:%s\t姓名:%s\t成绩[%s\t%s\t%s\t%s]" % (
s.get_num(), s.get_name(), s.get_score()[0], s.get_score()[1], s.get_score()[2], s.get_score()[3]))
if __name__ == '__main__':
output_stu(input_stu(1))
| 23.725806
| 112
| 0.538409
|
e5157b7ca98582f888575c2eb13cc57c78577b4a
| 2,454
|
py
|
Python
|
reaction_completer/periodic_table.py
|
CederGroupHub/ReactionCompleter
|
6b20a9a4ffdf19e0d51c99901d104d8f737ce762
|
[
"MIT"
] | null | null | null |
reaction_completer/periodic_table.py
|
CederGroupHub/ReactionCompleter
|
6b20a9a4ffdf19e0d51c99901d104d8f737ce762
|
[
"MIT"
] | null | null | null |
reaction_completer/periodic_table.py
|
CederGroupHub/ReactionCompleter
|
6b20a9a4ffdf19e0d51c99901d104d8f737ce762
|
[
"MIT"
] | null | null | null |
import json
import operator
import os
import warnings
from functools import reduce
__author__ = 'Haoyan Huo'
__maintainer__ = 'Haoyan Huo'
__email__ = 'haoyan.huo@lbl.gov'
__all__ = ['NON_VOLATILE_ELEMENTS', 'ELEMENTS', 'PT', 'PT_LIST']
NON_VOLATILE_ELEMENTS = {
'Li', 'Be',
'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl',
'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As',
'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te',
'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf',
'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At',
'Fr', 'Ra', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db',
'Sg', 'Bh', 'Hs', 'Mt', 'Ds', 'Rg', 'Cn', 'Nh', 'Fl', 'Mc', 'Lv', 'Ts'
}
ELEMENTS = set(
'H|He|'
'Li|Be|B|C|N|O|F|Ne|'
'Na|Mg|Al|Si|P|S|Cl|Ar|'
'K|Ca|Sc|Ti|V|Cr|Mn|Fe|Co|Ni|Cu|Zn|Ga|Ge|As|Se|Br|Kr|'
'Rb|Sr|Y|Zr|Nb|Mo|Tc|Ru|Rh|Pd|Ag|Cd|In|Sn|Sb|Te|I|Xe|'
'Cs|Ba|La|Ce|Pr|Nd|Pm|Sm|Eu|Gd|Tb|Dy|Ho|Er|Tm|Yb|Lu|Hf|Ta|W|Re|Os|Ir|Pt|Au|Hg|Tl|Pb|Bi|Po|At|Rn|'
'Fr|Ra|Ac|Th|Pa|U|Np|Pu|Am|Cm|Bk|Cf|Es|Fm|Md|No|Lr|Rf|Db|Sg|Bh|Hs|Mt|Ds|Rg'.split('|')
)
def _patch_pt(pt):
"""
Fix value issues in the periodic table data downloaded from
https://github.com/andrejewski/periodic-table/blob/master/data.json
:param pt: List of element data
:return: Updated, fixed periodic table data
"""
keys = reduce(operator.iand, [x.keys() for x in pt])
missing_keys = set()
for key in keys:
if any(key not in y for y in pt):
warnings.warn('Dropping key: %s because not all element has this key.' % key)
missing_keys.add(key)
numeric_keys = [
"electronegativity", "atomicRadius", "ionRadius", "vanDelWaalsRadius", "ionizationEnergy",
"electronAffinity", "meltingPoint", "boilingPoint", "density"
]
for el in pt:
for key in missing_keys:
del el[key]
for key in numeric_keys:
if not isinstance(el[key], int) and not isinstance(el[key], float):
el[key] = None
return pt
PT = {}
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pt.json')) as f:
PT_LIST = _patch_pt(json.load(f))
for element in PT_LIST:
PT[element['symbol']] = element
| 35.057143
| 116
| 0.548085
|
042eb8abe4bae6af9db73a79c531612a55759cad
| 3,485
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/lysinibacillusspac3.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/lysinibacillusspac3.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/lysinibacillusspac3.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Lysinibacillus sp. AC-3.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def LysinibacillusSpAc3(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Lysinibacillus sp. AC-3 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Lysinibacillus sp. AC-3 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="LysinibacillusSpAc3",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.190476
| 223
| 0.676327
|
de120862941b4442c01b5c2dfea2a42f10294c8f
| 2,106
|
py
|
Python
|
torchvision/ops/stochastic_depth.py
|
ZJUGuoShuai/vision
|
a9940fe4b2b63bd82a2f853616e00fd0bd112f9a
|
[
"BSD-3-Clause"
] | 3
|
2021-10-30T10:13:40.000Z
|
2021-12-12T10:26:14.000Z
|
torchvision/ops/stochastic_depth.py
|
ZJUGuoShuai/vision
|
a9940fe4b2b63bd82a2f853616e00fd0bd112f9a
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/ops/stochastic_depth.py
|
ZJUGuoShuai/vision
|
a9940fe4b2b63bd82a2f853616e00fd0bd112f9a
|
[
"BSD-3-Clause"
] | 1
|
2020-01-10T12:50:14.000Z
|
2020-01-10T12:50:14.000Z
|
import torch
import torch.fx
from torch import nn, Tensor
def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) -> Tensor:
"""
Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
<https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
branches of residual architectures.
Args:
input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): probability of the input to be zeroed.
mode (str): ``"batch"`` or ``"row"``.
``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
randomly selected rows from the batch.
training: apply stochastic depth if is ``True``. Default: ``True``
Returns:
Tensor[N, ...]: The randomly zeroed tensor.
"""
if p < 0.0 or p > 1.0:
raise ValueError("drop probability has to be between 0 and 1, but got {}".format(p))
if mode not in ["batch", "row"]:
raise ValueError("mode has to be either 'batch' or 'row', but got {}".format(mode))
if not training or p == 0.0:
return input
survival_rate = 1.0 - p
if mode == "row":
size = [input.shape[0]] + [1] * (input.ndim - 1)
else:
size = [1] * input.ndim
noise = torch.empty(size, dtype=input.dtype, device=input.device)
noise = noise.bernoulli_(survival_rate).div_(survival_rate)
return input * noise
torch.fx.wrap("stochastic_depth")
class StochasticDepth(nn.Module):
"""
See :func:`stochastic_depth`.
"""
def __init__(self, p: float, mode: str) -> None:
super().__init__()
self.p = p
self.mode = mode
def forward(self, input: Tensor) -> Tensor:
return stochastic_depth(input, self.p, self.mode, self.training)
def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
tmpstr += "p=" + str(self.p)
tmpstr += ", mode=" + str(self.mode)
tmpstr += ")"
return tmpstr
| 33.428571
| 92
| 0.597341
|
1599e37754b91f84732a7d9b9ebd879f27daa749
| 125
|
py
|
Python
|
conf/__init__.py
|
LuoxinY/v2ray_subscribe
|
438863ec944f30fae29d159ebc0d1c14021c0887
|
[
"MIT"
] | 4
|
2020-04-11T12:52:13.000Z
|
2020-07-08T06:38:08.000Z
|
conf/__init__.py
|
LuoxinY/v2ray_subscribe
|
438863ec944f30fae29d159ebc0d1c14021c0887
|
[
"MIT"
] | null | null | null |
conf/__init__.py
|
LuoxinY/v2ray_subscribe
|
438863ec944f30fae29d159ebc0d1c14021c0887
|
[
"MIT"
] | 2
|
2020-07-27T13:27:24.000Z
|
2020-10-01T04:11:31.000Z
|
from .global_variable import VariableManager
from .global_variable import GlobalVariable
global_variable = GlobalVariable()
| 25
| 44
| 0.864
|
ded6b033f807e17649e169db514645ba8e36708e
| 6,463
|
py
|
Python
|
engineauth/middleware.py
|
alecdotico/engineauth
|
def523f6c0d48f346e552b6638e6f3a6a1717733
|
[
"Apache-2.0"
] | null | null | null |
engineauth/middleware.py
|
alecdotico/engineauth
|
def523f6c0d48f346e552b6638e6f3a6a1717733
|
[
"Apache-2.0"
] | null | null | null |
engineauth/middleware.py
|
alecdotico/engineauth
|
def523f6c0d48f346e552b6638e6f3a6a1717733
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from engineauth import models
from engineauth import utils
from engineauth.config import load_config
import re
import traceback
from webob import Response
from webob import Request
class EngineAuthResponse(Response):
def _save_session(self):
session = self.request.session
# Compare the hash that we set in load_session to the current one.
# We only save the session and cookie if this value has changed.
if self.request.session_hash == session.hash():
return session
session.put()
# If we have a user_id we want to updated the
# session to use the user_id as the key.
if session.user_id is not None:
session_id = session.key.id()
if session_id != session.user_id:
session = models.Session.upgrade_to_user_session(
session_id, session.user_id)
self.set_cookie('_eauth', session.serialize())
return self
def _save_user(self):
pass
class EngineAuthRequest(Request):
ResponseClass = EngineAuthResponse
def _load_session(self):
value = self.cookies.get('_eauth')
session = None
if value:
session = models.Session.get_by_value(value)
if session is not None:
# Create a hash for later comparison,
# to determine if a put() is required
session_hash = session.hash()
else:
session = models.Session.create()
# set this to False to ensure a cookie
# is saved later in the response.
session_hash = '0'
self.session = session
self.session_hash = session_hash
return self
def _get_user_class(self):
try:
return utils.import_class(self._config['user_model'])
except Exception:
return models.User
def _load_user(self):
if self.session is not None and self.session.user_id:
self.user = self._get_user_class().get_by_id(int(self.session.user_id))
if self.user is None:
# TODO: If the user_id from the session returns no user,
# then remove it.
pass
else:
self.user = None
return self
def _load_user_by_profile(self, profile):
# if the user is logged in update that user with the profile details
if self.user:
self.user.add_profile(profile)
# else get or create a user based on the profile
else:
self.user = self._get_user_class().get_or_create_by_profile(profile)
# Add user to session
self.session.user_id = self.user.get_id()
load_user_by_profile = _load_user_by_profile
def _add_message(self, message, level=None, key='_messages'):
if not self.session.data.get(key):
self.session.data[key] = []
return self.session.data[key].append({
'message': message, 'level': level})
add_message = _add_message
def _get_messages(self, key='_messages'):
try:
return self.session.data.pop(key)
except KeyError:
pass
get_messages = _get_messages
def _set_redirect_back(self):
""" Save a valid url to redirect back after OAuth dance. """
next_uri = self.referer
# uri is valid if its domain matches the current server name
uri_is_valid = re.match(r'(https?://)?' + self.server_name + '.*', next_uri) if next_uri else False
if next_uri is not None and self._config['redirect_back'] and uri_is_valid:
self.session.data['_redirect_uri'] = next_uri
set_redirect_uri = _set_redirect_back
def _get_redirect_uri(self):
try:
return self.session.data.pop('_redirect_uri').encode('utf-8')
except KeyError:
return self._config['success_uri']
get_redirect_uri = _get_redirect_uri
def _set_globals(self, environ):
# environ['ea.config'] = req.config
environ['ea.session'] = self.session
environ['ea.user'] = self.user
class AuthMiddleware(object):
def __init__(self, app, config=None):
self.app = app
self._config = load_config(config)
self._base_uri = self._config['base_uri']
if not self._base_uri.endswith('/'):
self._base_uri = "%s/" % self._base_uri
self._url_parse_re = re.compile(r'%s([^\s/]+)/*(\S*)' %
(self._base_uri))
def __call__(self, environ, start_response):
# If the request is to the admin, return
if environ['PATH_INFO'].startswith('/_ah/'):
return self.app(environ, start_response)
# load session
req = EngineAuthRequest(environ)
req._config = self._config
req._load_session()
req._load_user()
resp = None
# If the requesting url is for engineauth load the strategy
if environ['PATH_INFO'].startswith(self._base_uri):
if req._config['redirect_back']:
req._set_redirect_back()
# extract provider and additional params from the url
provider, provider_params = self._url_parse_re.match(
req.path_info).group(1, 2)
if provider:
req.provider = provider
req.provider_params = provider_params
# load the desired strategy class
strategy_class = self._load_strategy(provider)
resp = req.get_response(strategy_class(self.app, self._config))
if resp.request is None:
# TODO: determine why this is necessary.
resp.request = req
if resp is None:
resp = req.get_response(self.app)
# Save session, return response
resp._save_session()
return resp(environ, start_response)
def _load_strategy(self, provider):
try:
strategy_location = self._config[
'provider.{0}'.format(provider)]['class_path']
return utils.import_class(strategy_location)
except Exception, e:
traceback.print_exc()
raise(Exception, "You must provide a location for the {0} "\
"strategy. Add a 'location' key to the "\
"'provider.{0}' config dict".format(provider))
| 37.795322
| 107
| 0.604673
|
3f0236fae82ed9057af8aa36a56648c3573010d9
| 902
|
py
|
Python
|
src/posts/urls.py
|
edgarbs1998/partesanato-server
|
1b01f75f94dca85ff5b963c49c237fb758b27b43
|
[
"MIT"
] | null | null | null |
src/posts/urls.py
|
edgarbs1998/partesanato-server
|
1b01f75f94dca85ff5b963c49c237fb758b27b43
|
[
"MIT"
] | 1
|
2020-06-05T22:09:06.000Z
|
2020-06-05T22:09:06.000Z
|
src/posts/urls.py
|
edgarbs1998/partesanato-server
|
1b01f75f94dca85ff5b963c49c237fb758b27b43
|
[
"MIT"
] | null | null | null |
# posts/urls.py
from django.urls import path, include
from rest_framework.routers import SimpleRouter
from rest_framework_nested.routers import NestedSimpleRouter
from posts.views import UserViewSet, PostViewSet, CommentViewSet, ImageViewSet, UserPostViewSet
router = SimpleRouter()
router.register('users', UserViewSet, base_name='user')
router.register('posts', PostViewSet, base_name='post')
users_router = NestedSimpleRouter(router, 'users', lookup='user')
users_router.register('posts', UserPostViewSet, base_name='user-post')
posts_router = NestedSimpleRouter(router, 'posts', lookup='post')
posts_router.register('comments', CommentViewSet, base_name='comment')
posts_router.register('images', ImageViewSet, base_name='image')
urlpatterns = [
path('', include(router.urls)),
path('', include(users_router.urls)),
path('', include(posts_router.urls)),
]
| 36.08
| 96
| 0.757206
|
80c754ebc3bf20fd80661e590d326ee6a0494c9e
| 93
|
py
|
Python
|
plugins/slack_alert/__init__.py
|
pkropf/mqttAlerts
|
757ae286adeea90d39e3cc1c5be5027f43aa4a40
|
[
"MIT"
] | null | null | null |
plugins/slack_alert/__init__.py
|
pkropf/mqttAlerts
|
757ae286adeea90d39e3cc1c5be5027f43aa4a40
|
[
"MIT"
] | null | null | null |
plugins/slack_alert/__init__.py
|
pkropf/mqttAlerts
|
757ae286adeea90d39e3cc1c5be5027f43aa4a40
|
[
"MIT"
] | null | null | null |
name = 'slack_alert'
def setup(self):
pass
def alert(self, topic, condition):
pass
| 11.625
| 34
| 0.655914
|
f0d64947e6bfb3dcf67f179346b1378454574772
| 658
|
py
|
Python
|
examples/allow-and-deny/allow-and-deny.py
|
kylelaker/iam-floyd
|
cf71cd3cad556b1c2b831bf22ed3d9e8ba892569
|
[
"Apache-2.0"
] | 360
|
2020-06-03T03:29:18.000Z
|
2022-03-31T04:58:13.000Z
|
examples/allow-and-deny/allow-and-deny.py
|
kylelaker/iam-floyd
|
cf71cd3cad556b1c2b831bf22ed3d9e8ba892569
|
[
"Apache-2.0"
] | 75
|
2020-06-21T08:15:58.000Z
|
2022-03-25T15:03:04.000Z
|
examples/allow-and-deny/allow-and-deny.py
|
kylelaker/iam-floyd
|
cf71cd3cad556b1c2b831bf22ed3d9e8ba892569
|
[
"Apache-2.0"
] | 13
|
2020-06-03T16:12:45.000Z
|
2022-03-31T05:05:48.000Z
|
import iam_floyd as statement
import importlib
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
helperDir = '%s/../../helper/python' % currentdir
sys.path.insert(0, helperDir)
test = importlib.import_module('python_test')
out = getattr(test, 'out')
deploy = getattr(test, 'deploy')
def statements():
# doc-start
s1 = statement.Ec2() \
.allow() \
.to_start_instances()
s2 = statement.Ec2() \
.deny() \
.to_stop_instances()
# doc-end
return [s1, s2]
all = statements()
out(all)
deploy(all)
| 19.939394
| 49
| 0.621581
|
2e5f75d128d780e22e7420a3b5de672c0b2af509
| 133
|
py
|
Python
|
enthought/numerical_modeling/numeric_context/a_numeric_context.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/numerical_modeling/numeric_context/a_numeric_context.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/numerical_modeling/numeric_context/a_numeric_context.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from blockcanvas.numerical_modeling.numeric_context.a_numeric_context import *
| 33.25
| 78
| 0.879699
|
759220a193bd3e42fd6b8e4e3af61699264159ea
| 1,449
|
py
|
Python
|
BasicOperations/04_Matplotlib/08_mouseEvent_Drag.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 1
|
2018-07-02T13:54:49.000Z
|
2018-07-02T13:54:49.000Z
|
BasicOperations/04_Matplotlib/08_mouseEvent_Drag.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | null | null | null |
BasicOperations/04_Matplotlib/08_mouseEvent_Drag.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 3
|
2016-05-28T15:13:02.000Z
|
2021-04-10T06:04:25.000Z
|
from numpy.random import rand, randint
from matplotlib.patches import RegularPolygon
import matplotlib.pyplot as plt
import numpy as np
class PatchMover(object):
def __init__(self, ax):
self.ax = ax
self.selected_patch = None
self.start_mouse_pos = None
self.start_patch_pos = None
fig = ax.figure
fig.canvas.mpl_connect('button_press_event', self.on_press)
fig.canvas.mpl_connect('button_release_event', self.on_release)
fig.canvas.mpl_connect('motion_notify_event', self.on_motion)
def on_press(self, event):
for patch in reversed(self.ax.patches):
if patch.contains_point((event.x, event.y)):
self.selected_patch = patch
self.start_mouse_pos = np.array([event.xdata, event.ydata])
self.start_patch_pos = patch.xy
break
def on_motion(self, event):
if self.selected_patch is not None:
pos = np.array([event.xdata, event.ydata])
self.selected_patch.xy = self.start_patch_pos + pos - self.start_mouse_pos
self.ax.figure.canvas.draw()
def on_release(self, event):
self.selected_patch = None
fig, ax = plt.subplots()
ax.set_aspect("equal")
for i in range(10):
poly = RegularPolygon(rand(2), randint(3, 10), rand()*0.1+0.1, facecolor=rand(3))
ax.add_patch(poly)
ax.relim()
ax.autoscale()
pm = PatchMover(ax)
plt.show()
| 32.931818
| 87
| 0.650104
|
fe2e1da0c5d3020d1f674adfb4224c0fd8da5a1a
| 419
|
py
|
Python
|
src/napari_labeller/__init__.py
|
Hekstra-Lab/napari-labeller
|
74913dce72c773df2ec94e1cb3798dd40fedf219
|
[
"BSD-3-Clause"
] | null | null | null |
src/napari_labeller/__init__.py
|
Hekstra-Lab/napari-labeller
|
74913dce72c773df2ec94e1cb3798dd40fedf219
|
[
"BSD-3-Clause"
] | 1
|
2021-12-03T21:26:27.000Z
|
2021-12-03T21:26:27.000Z
|
src/napari_labeller/__init__.py
|
Hekstra-Lab/napari-labeller
|
74913dce72c773df2ec94e1cb3798dd40fedf219
|
[
"BSD-3-Clause"
] | null | null | null |
try:
from ._version import version as __version__
except ImportError:
__version__ = "unknown"
__author__ = "Ian Hunt-Isaak and John Russell"
from ._dock_widget import napari_experimental_provide_dock_widget
from ._function import napari_experimental_provide_function
__all__ = [
"__version__",
"__author__",
"napari_experimental_provide_function",
"napari_experimental_provide_dock_widget",
]
| 26.1875
| 65
| 0.789976
|
e39c8a90b485ee434bde0e8bd1cf95738cfe5468
| 1,981
|
py
|
Python
|
sandbox/dokmaraton_list.py
|
thejoltjoker/python
|
4517065e64d83947648e2aad206ac4ec786b166e
|
[
"MIT"
] | null | null | null |
sandbox/dokmaraton_list.py
|
thejoltjoker/python
|
4517065e64d83947648e2aad206ac4ec786b166e
|
[
"MIT"
] | null | null | null |
sandbox/dokmaraton_list.py
|
thejoltjoker/python
|
4517065e64d83947648e2aad206ac4ec786b166e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
script_name.py
Description of script_name.py.
"""
import requests
from bs4 import BeautifulSoup
import string
import math
import csv
import pandas
def main():
"""docstring for main"""
list_url = "https://www.imdb.com/list/ls054856346/"
response = requests.get(list_url)
soup = BeautifulSoup(response.text, 'html.parser')
films = []
for n, item in enumerate(soup.find_all("div", "lister-item")):
# Set rating
rating = ""
if item.find("span", "ipl-rating-star__rating"):
rating = item.find("span", "ipl-rating-star__rating").text
# Genre
genre = ""
if item.find("span", "genre"):
genre = item.find("span", "genre").text.strip()
# Runtime
runtime = 0
if item.find("span", "runtime"):
runtime = int("".join([x for x in item.find("span", "runtime").text if x in string.digits]))
hours = math.floor(runtime / 60)
minutes = int((runtime / 60 - hours) * 60)
# Poster
# TODO
poster_url = item.find("div", "lister-item-image").img["loadlate"]
print(poster_url)
film_poster = f'=IMAGE("{poster_url}")'
# Put it all together
film = {
"Title": item.h3.a.text,
"Rating": rating,
"Genre": genre,
"Hours": hours,
"Minutes": minutes,
"Runtime": runtime,
"IMDB": f"https://www.imdb.com/{item.h3.a['href']}",
"Poster": film_poster
}
print(n, film["Title"])
films.append(film)
with open("dokmaraton.csv", mode="w") as dok_file:
fieldnames = ["Title", "Rating", "Genre", "Hours", "Minutes", "Runtime", "IMDB", "Poster"]
dok_writer = csv.DictWriter(dok_file, delimiter=",", fieldnames=fieldnames)
for film in films:
dok_writer.writerow(film)
if __name__ == '__main__':
main()
| 29.132353
| 104
| 0.557294
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.