hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
67dbe390a996bfb509184679c470396d2c54aa7c
| 1,170
|
py
|
Python
|
examples/example.py
|
Kiougar/datapie
|
90b40ac673da1a21d8cef59701de7e82e3258c0a
|
[
"MIT"
] | null | null | null |
examples/example.py
|
Kiougar/datapie
|
90b40ac673da1a21d8cef59701de7e82e3258c0a
|
[
"MIT"
] | null | null | null |
examples/example.py
|
Kiougar/datapie
|
90b40ac673da1a21d8cef59701de7e82e3258c0a
|
[
"MIT"
] | null | null | null |
import random
import string
import gevent
from datapie import Mine
from datapie import Miner
class MyMiner1(Miner):
def process(self, data: bytes):
return data.upper()
class MyMiner2(Miner):
def process(self, data: bytes):
return data.lower()
class MyMine(Mine):
@staticmethod
def random_word(size):
return ''.join(random.choice(string.ascii_letters) for _ in range(size))
def get_data(self):
gevent.sleep(3)
return (self.random_word(10).lower() + '\r\n').encode()
if __name__ == '__main__':
mine = MyMine(('127.0.0.1', 16000))
miner1 = MyMiner1(('127.0.0.1', 16001), mine.address)
miner2 = MyMiner2(('127.0.0.1', 16002), miner1.address)
print('Starting MyMiner2 on port 16002')
miner2.start()
# delay the miner by 10 seconds to check that miner2 can connect to it successfully
gevent.sleep(10)
print('Starting MyMiner1 on port 16001')
miner1.start()
# delay the mine by 20 seconds to check that miner can connect to it successfully even if it was started later on
gevent.sleep(10)
print('Starting MyMine on port 16000...')
mine.serve_forever()
| 25.434783
| 117
| 0.670085
|
48378780cb94ae24380fa4050079f711adecead0
| 7,605
|
py
|
Python
|
kubernetes/client/models/networking_v1beta1_http_ingress_path.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/networking_v1beta1_http_ingress_path.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/networking_v1beta1_http_ingress_path.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class NetworkingV1beta1HTTPIngressPath(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'backend': 'NetworkingV1beta1IngressBackend',
'path': 'str',
'path_type': 'str'
}
attribute_map = {
'backend': 'backend',
'path': 'path',
'path_type': 'pathType'
}
def __init__(self, backend=None, path=None, path_type=None, local_vars_configuration=None): # noqa: E501
"""NetworkingV1beta1HTTPIngressPath - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._backend = None
self._path = None
self._path_type = None
self.discriminator = None
self.backend = backend
if path is not None:
self.path = path
if path_type is not None:
self.path_type = path_type
@property
def backend(self):
"""Gets the backend of this NetworkingV1beta1HTTPIngressPath. # noqa: E501
:return: The backend of this NetworkingV1beta1HTTPIngressPath. # noqa: E501
:rtype: NetworkingV1beta1IngressBackend
"""
return self._backend
@backend.setter
def backend(self, backend):
"""Sets the backend of this NetworkingV1beta1HTTPIngressPath.
:param backend: The backend of this NetworkingV1beta1HTTPIngressPath. # noqa: E501
:type: NetworkingV1beta1IngressBackend
"""
if self.local_vars_configuration.client_side_validation and backend is None: # noqa: E501
raise ValueError("Invalid value for `backend`, must not be `None`") # noqa: E501
self._backend = backend
@property
def path(self):
"""Gets the path of this NetworkingV1beta1HTTPIngressPath. # noqa: E501
Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched. # noqa: E501
:return: The path of this NetworkingV1beta1HTTPIngressPath. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this NetworkingV1beta1HTTPIngressPath.
Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched. # noqa: E501
:param path: The path of this NetworkingV1beta1HTTPIngressPath. # noqa: E501
:type: str
"""
self._path = path
@property
def path_type(self):
"""Gets the path_type of this NetworkingV1beta1HTTPIngressPath. # noqa: E501
PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is done on a path element by element basis. A path element refers is the list of labels in the path split by the '/' separator. A request is a match for path p if every p is an element-wise prefix of p of the request path. Note that if the last element of the path is a substring of the last element in request path, it is not a match (e.g. /foo/bar matches /foo/bar/baz, but does not match /foo/barbaz). * ImplementationSpecific: Interpretation of the Path matching is up to the IngressClass. Implementations can treat this as a separate PathType or treat it identically to Prefix or Exact path types. Implementations are required to support all path types. Defaults to ImplementationSpecific. # noqa: E501
:return: The path_type of this NetworkingV1beta1HTTPIngressPath. # noqa: E501
:rtype: str
"""
return self._path_type
@path_type.setter
def path_type(self, path_type):
"""Sets the path_type of this NetworkingV1beta1HTTPIngressPath.
PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is done on a path element by element basis. A path element refers is the list of labels in the path split by the '/' separator. A request is a match for path p if every p is an element-wise prefix of p of the request path. Note that if the last element of the path is a substring of the last element in request path, it is not a match (e.g. /foo/bar matches /foo/bar/baz, but does not match /foo/barbaz). * ImplementationSpecific: Interpretation of the Path matching is up to the IngressClass. Implementations can treat this as a separate PathType or treat it identically to Prefix or Exact path types. Implementations are required to support all path types. Defaults to ImplementationSpecific. # noqa: E501
:param path_type: The path_type of this NetworkingV1beta1HTTPIngressPath. # noqa: E501
:type: str
"""
self._path_type = path_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkingV1beta1HTTPIngressPath):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NetworkingV1beta1HTTPIngressPath):
return True
return self.to_dict() != other.to_dict()
| 42.724719
| 945
| 0.657857
|
b8ef574bc13ada8862051c37c147b4e196c3261a
| 1,554
|
py
|
Python
|
supervisor/resolution/evaluations/source_mods.py
|
agners/hassio
|
42d993575ba94159046112d5ab16fac82ff5fe57
|
[
"Apache-2.0"
] | 584
|
2020-01-31T18:53:10.000Z
|
2022-03-29T21:12:15.000Z
|
supervisor/resolution/evaluations/source_mods.py
|
agners/hassio
|
42d993575ba94159046112d5ab16fac82ff5fe57
|
[
"Apache-2.0"
] | 1,056
|
2020-01-30T09:59:44.000Z
|
2022-03-31T10:15:32.000Z
|
supervisor/resolution/evaluations/source_mods.py
|
agners/hassio
|
42d993575ba94159046112d5ab16fac82ff5fe57
|
[
"Apache-2.0"
] | 295
|
2020-02-03T11:30:42.000Z
|
2022-03-31T18:53:14.000Z
|
"""Evaluation class for Content Trust."""
import logging
from pathlib import Path
from ...const import CoreState
from ...coresys import CoreSys
from ...exceptions import CodeNotaryError, CodeNotaryUntrusted
from ..const import UnsupportedReason
from .base import EvaluateBase
_SUPERVISOR_SOURCE = Path("/usr/src/supervisor")
_LOGGER: logging.Logger = logging.getLogger(__name__)
def setup(coresys: CoreSys) -> EvaluateBase:
"""Initialize evaluation-setup function."""
return EvaluateSourceMods(coresys)
class EvaluateSourceMods(EvaluateBase):
"""Evaluate supervisor source modifications."""
@property
def reason(self) -> UnsupportedReason:
"""Return a UnsupportedReason enum."""
return UnsupportedReason.SOURCE_MODS
@property
def on_failure(self) -> str:
"""Return a string that is printed when self.evaluate is False."""
return "System detect unauthorized source code modifications."
@property
def states(self) -> list[CoreState]:
"""Return a list of valid states when this evaluation can run."""
return [CoreState.RUNNING]
async def evaluate(self) -> None:
"""Run evaluation."""
if not self.sys_security.content_trust:
_LOGGER.warning("Disabled content-trust, skipping evaluation")
return
try:
await self.sys_security.verify_own_content(path=_SUPERVISOR_SOURCE)
except CodeNotaryUntrusted:
return True
except CodeNotaryError:
pass
return False
| 29.884615
| 79
| 0.69112
|
f57eee7f4f9383ba1c8c87b36a6aa94b11d444a5
| 14,080
|
py
|
Python
|
haystack/nodes/connector/crawler.py
|
jamescalam/haystack
|
374155fd5c2d990cf51573a6da50314c3a07f926
|
[
"Apache-2.0"
] | null | null | null |
haystack/nodes/connector/crawler.py
|
jamescalam/haystack
|
374155fd5c2d990cf51573a6da50314c3a07f926
|
[
"Apache-2.0"
] | null | null | null |
haystack/nodes/connector/crawler.py
|
jamescalam/haystack
|
374155fd5c2d990cf51573a6da50314c3a07f926
|
[
"Apache-2.0"
] | 1
|
2022-02-17T05:08:53.000Z
|
2022-02-17T05:08:53.000Z
|
from typing import List, Optional, Dict, Tuple, Union
import re
import sys
import json
import logging
from pathlib import Path
from urllib.parse import urlparse
try:
from webdriver_manager.chrome import ChromeDriverManager
from selenium import webdriver
except (ImportError, ModuleNotFoundError) as ie:
from haystack.utils.import_utils import _optional_component_not_installed
_optional_component_not_installed(__name__, "crawler", ie)
from haystack.nodes.base import BaseComponent
from haystack.schema import Document
logger = logging.getLogger(__name__)
class Crawler(BaseComponent):
"""
Crawl texts from a website so that we can use them later in Haystack as a corpus for search / question answering etc.
**Example:**
```python
| from haystack.nodes.connector import Crawler
|
| crawler = Crawler(output_dir="crawled_files")
| # crawl Haystack docs, i.e. all pages that include haystack.deepset.ai/overview/
| docs = crawler.crawl(urls=["https://haystack.deepset.ai/overview/get-started"],
| filter_urls= ["haystack\.deepset\.ai\/overview\/"])
```
"""
outgoing_edges = 1
def __init__(
self,
output_dir: str,
urls: Optional[List[str]] = None,
crawler_depth: int = 1,
filter_urls: Optional[List] = None,
overwrite_existing_files=True,
id_hash_keys: Optional[List[str]] = None,
):
"""
Init object with basic params for crawling (can be overwritten later).
:param output_dir: Path for the directory to store files
:param urls: List of http(s) address(es) (can also be supplied later when calling crawl())
:param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:
0: Only initial list of urls
1: Follow links found on the initial URLs (but no further)
:param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.
All URLs not matching at least one of the regular expressions will be dropped.
:param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content
:param id_hash_keys: Generate the document id from a custom list of strings that refer to the document's
attributes. If you want to ensure you don't have duplicate documents in your DocumentStore but texts are
not unique, you can modify the metadata and pass e.g. `"meta"` to this field (e.g. [`"content"`, `"meta"`]).
In this case the id will be generated by using the content and the defined metadata.
"""
super().__init__()
IN_COLAB = "google.colab" in sys.modules
options = webdriver.chrome.options.Options()
options.add_argument("--headless")
if IN_COLAB:
try:
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
self.driver = webdriver.Chrome("chromedriver", options=options)
except:
raise Exception(
"""
\'chromium-driver\' needs to be installed manually when running colab. Follow the below given commands:
!apt-get update
!apt install chromium-driver
!cp /usr/lib/chromium-browser/chromedriver /usr/bin
If it has already been installed, please check if it has been copied to the right directory i.e. to \'/usr/bin\'"""
)
else:
logger.info("'chrome-driver' will be automatically installed.")
self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
self.urls = urls
self.output_dir = output_dir
self.crawler_depth = crawler_depth
self.filter_urls = filter_urls
self.overwrite_existing_files = overwrite_existing_files
self.id_hash_keys = id_hash_keys
def crawl(
self,
output_dir: Union[str, Path, None] = None,
urls: Optional[List[str]] = None,
crawler_depth: Optional[int] = None,
filter_urls: Optional[List] = None,
overwrite_existing_files: Optional[bool] = None,
id_hash_keys: Optional[List[str]] = None,
) -> List[Path]:
"""
Craw URL(s), extract the text from the HTML, create a Haystack Document object out of it and save it (one JSON
file per URL, including text and basic meta data).
You can optionally specify via `filter_urls` to only crawl URLs that match a certain pattern.
All parameters are optional here and only meant to overwrite instance attributes at runtime.
If no parameters are provided to this method, the instance attributes that were passed during __init__ will be used.
:param output_dir: Path for the directory to store files
:param urls: List of http addresses or single http address
:param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:
0: Only initial list of urls
1: Follow links found on the initial URLs (but no further)
:param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.
All URLs not matching at least one of the regular expressions will be dropped.
:param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content
:param id_hash_keys: Generate the document id from a custom list of strings that refer to the document's
attributes. If you want to ensure you don't have duplicate documents in your DocumentStore but texts are
not unique, you can modify the metadata and pass e.g. `"meta"` to this field (e.g. [`"content"`, `"meta"`]).
In this case the id will be generated by using the content and the defined metadata.
:return: List of paths where the crawled webpages got stored
"""
# use passed params or fallback to instance attributes
if id_hash_keys is None:
id_hash_keys = self.id_hash_keys
urls = urls or self.urls
if urls is None:
raise ValueError("Got no urls to crawl. Set `urls` to a list of URLs in __init__(), crawl() or run(). `")
output_dir = output_dir or self.output_dir
filter_urls = filter_urls or self.filter_urls
if overwrite_existing_files is None:
overwrite_existing_files = self.overwrite_existing_files
if crawler_depth is None:
crawler_depth = self.crawler_depth
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir(parents=True)
file_paths: list = []
is_not_empty = len(list(output_dir.rglob("*"))) > 0
if is_not_empty and not overwrite_existing_files:
logger.info(f"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data.")
else:
logger.info(f"Fetching from {urls} to `{output_dir}`")
# Start by writing out the initial list of urls
if filter_urls:
pattern = re.compile("|".join(filter_urls))
for url in urls:
if pattern.search(url):
file_paths += self._write_to_files([url], output_dir=output_dir)
else:
file_paths += self._write_to_files(urls, output_dir=output_dir)
# follow one level of sublinks if requested
if crawler_depth == 1:
sub_links: Dict[str, List] = {}
for url_ in urls:
already_found_links: List = list(sum(list(sub_links.values()), []))
sub_links[url_] = list(
self._extract_sublinks_from_url(
base_url=url_, filter_urls=filter_urls, already_found_links=already_found_links
)
)
for url, extracted_sublink in sub_links.items():
file_paths += self._write_to_files(
extracted_sublink, output_dir=output_dir, base_url=url, id_hash_keys=id_hash_keys
)
return file_paths
def _write_to_files(
self, urls: List[str], output_dir: Path, base_url: str = None, id_hash_keys: Optional[List[str]] = None
) -> List[Path]:
paths = []
for link in urls:
logger.info(f"writing contents from `{link}`")
self.driver.get(link)
el = self.driver.find_element_by_tag_name("body")
text = el.text
link_split_values = link.replace("https://", "").split("/")
file_name = f"{'_'.join(link_split_values)}.json"
file_path = output_dir / file_name
data = {}
data["meta"] = {"url": link}
if base_url:
data["meta"]["base_url"] = base_url
data["content"] = text
document = Document.from_dict(data, id_hash_keys=id_hash_keys)
with open(file_path, "w", encoding="utf-8") as f:
json.dump(document.to_dict(), f)
paths.append(file_path)
return paths
def run( # type: ignore
self,
output_dir: Union[str, Path, None] = None,
urls: Optional[List[str]] = None,
crawler_depth: Optional[int] = None,
filter_urls: Optional[List] = None,
overwrite_existing_files: Optional[bool] = None,
return_documents: Optional[bool] = False,
id_hash_keys: Optional[List[str]] = None,
) -> Tuple[Dict, str]:
"""
Method to be executed when the Crawler is used as a Node within a Haystack pipeline.
:param output_dir: Path for the directory to store files
:param urls: List of http addresses or single http address
:param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:
0: Only initial list of urls
1: Follow links found on the initial URLs (but no further)
:param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.
All URLs not matching at least one of the regular expressions will be dropped.
:param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content
:param return_documents: Return json files content
:param id_hash_keys: Generate the document id from a custom list of strings that refer to the document's
attributes. If you want to ensure you don't have duplicate documents in your DocumentStore but texts are
not unique, you can modify the metadata and pass e.g. `"meta"` to this field (e.g. [`"content"`, `"meta"`]).
In this case the id will be generated by using the content and the defined metadata.
:return: Tuple({"paths": List of filepaths, ...}, Name of output edge)
"""
file_paths = self.crawl(
urls=urls,
output_dir=output_dir,
crawler_depth=crawler_depth,
filter_urls=filter_urls,
overwrite_existing_files=overwrite_existing_files,
)
if return_documents:
crawled_data = []
for _file in file_paths:
with open(_file.absolute(), "r") as read_file:
crawled_data.append(Document.from_dict(json.load(read_file), id_hash_keys=id_hash_keys))
results = {"documents": crawled_data}
else:
results = {"paths": file_paths}
return results, "output_1"
def run_batch( # type: ignore
self,
output_dir: Union[str, Path, None] = None,
urls: Optional[List[str]] = None,
crawler_depth: Optional[int] = None,
filter_urls: Optional[List] = None,
overwrite_existing_files: Optional[bool] = None,
return_documents: Optional[bool] = False,
id_hash_keys: Optional[List[str]] = None,
):
return self.run(
output_dir=output_dir,
urls=urls,
crawler_depth=crawler_depth,
filter_urls=filter_urls,
overwrite_existing_files=overwrite_existing_files,
return_documents=return_documents,
id_hash_keys=id_hash_keys,
)
@staticmethod
def _is_internal_url(base_url: str, sub_link: str) -> bool:
base_url_ = urlparse(base_url)
sub_link_ = urlparse(sub_link)
return base_url_.scheme == sub_link_.scheme and base_url_.netloc == sub_link_.netloc
@staticmethod
def _is_inpage_navigation(base_url: str, sub_link: str) -> bool:
base_url_ = urlparse(base_url)
sub_link_ = urlparse(sub_link)
return base_url_.path == sub_link_.path and base_url_.netloc == sub_link_.netloc
def _extract_sublinks_from_url(
self, base_url: str, filter_urls: Optional[List] = None, already_found_links: List = None
) -> set:
if filter_urls:
filter_pattern = re.compile("|".join(filter_urls))
self.driver.get(base_url)
a_elements = self.driver.find_elements_by_xpath("//a[@href]")
sub_links = set()
for i in a_elements:
sub_link = i.get_attribute("href")
if not (already_found_links and sub_link in already_found_links):
if self._is_internal_url(base_url=base_url, sub_link=sub_link) and (
not self._is_inpage_navigation(base_url=base_url, sub_link=sub_link)
):
if filter_urls:
if filter_pattern.search(sub_link):
sub_links.add(sub_link)
else:
sub_links.add(sub_link)
return sub_links
| 45.863192
| 124
| 0.624574
|
492f2f648924287129d3d064bc2b15ddd201d55a
| 32,057
|
py
|
Python
|
Lib/idlelib/idle_test/test_config.py
|
marcoramirezmx/cpython
|
8b31a11a698cb5aa9b439b349c8de4e388846f73
|
[
"CNRI-Python-GPL-Compatible"
] | 4
|
2019-09-12T02:35:07.000Z
|
2022-01-19T23:04:45.000Z
|
Lib/idlelib/idle_test/test_config.py
|
marcoramirezmx/cpython
|
8b31a11a698cb5aa9b439b349c8de4e388846f73
|
[
"CNRI-Python-GPL-Compatible"
] | 3
|
2020-03-15T21:17:00.000Z
|
2020-03-15T22:50:40.000Z
|
Lib/idlelib/idle_test/test_config.py
|
marcoramirezmx/cpython
|
8b31a11a698cb5aa9b439b349c8de4e388846f73
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2019-08-23T20:54:20.000Z
|
2019-08-23T20:54:20.000Z
|
"""Test config, coverage 93%.
(100% for IdleConfParser, IdleUserConfParser*, ConfigChanges).
* Exception is OSError clause in Save method.
Much of IdleConf is also exercised by ConfigDialog and test_configdialog.
"""
from idlelib import config
import sys
import os
import tempfile
from test.support import captured_stderr, findfile
import unittest
from unittest import mock
import idlelib
from idlelib.idle_test.mock_idle import Func
# Tests should not depend on fortuitous user configurations.
# They must not affect actual user .cfg files.
# Replace user parsers with empty parsers that cannot be saved
# due to getting '' as the filename when created.
idleConf = config.idleConf
usercfg = idleConf.userCfg
testcfg = {}
usermain = testcfg['main'] = config.IdleUserConfParser('')
userhigh = testcfg['highlight'] = config.IdleUserConfParser('')
userkeys = testcfg['keys'] = config.IdleUserConfParser('')
userextn = testcfg['extensions'] = config.IdleUserConfParser('')
def setUpModule():
idleConf.userCfg = testcfg
idlelib.testing = True
def tearDownModule():
idleConf.userCfg = usercfg
idlelib.testing = False
class IdleConfParserTest(unittest.TestCase):
"""Test that IdleConfParser works"""
config = """
[one]
one = false
two = true
three = 10
[two]
one = a string
two = true
three = false
"""
def test_get(self):
parser = config.IdleConfParser('')
parser.read_string(self.config)
eq = self.assertEqual
# Test with type argument.
self.assertIs(parser.Get('one', 'one', type='bool'), False)
self.assertIs(parser.Get('one', 'two', type='bool'), True)
eq(parser.Get('one', 'three', type='int'), 10)
eq(parser.Get('two', 'one'), 'a string')
self.assertIs(parser.Get('two', 'two', type='bool'), True)
self.assertIs(parser.Get('two', 'three', type='bool'), False)
# Test without type should fallback to string.
eq(parser.Get('two', 'two'), 'true')
eq(parser.Get('two', 'three'), 'false')
# If option not exist, should return None, or default.
self.assertIsNone(parser.Get('not', 'exist'))
eq(parser.Get('not', 'exist', default='DEFAULT'), 'DEFAULT')
def test_get_option_list(self):
parser = config.IdleConfParser('')
parser.read_string(self.config)
get_list = parser.GetOptionList
self.assertCountEqual(get_list('one'), ['one', 'two', 'three'])
self.assertCountEqual(get_list('two'), ['one', 'two', 'three'])
self.assertEqual(get_list('not exist'), [])
def test_load_nothing(self):
parser = config.IdleConfParser('')
parser.Load()
self.assertEqual(parser.sections(), [])
def test_load_file(self):
# Borrow test/cfgparser.1 from test_configparser.
config_path = findfile('cfgparser.1')
parser = config.IdleConfParser(config_path)
parser.Load()
self.assertEqual(parser.Get('Foo Bar', 'foo'), 'newbar')
self.assertEqual(parser.GetOptionList('Foo Bar'), ['foo'])
class IdleUserConfParserTest(unittest.TestCase):
"""Test that IdleUserConfParser works"""
def new_parser(self, path=''):
return config.IdleUserConfParser(path)
def test_set_option(self):
parser = self.new_parser()
parser.add_section('Foo')
# Setting new option in existing section should return True.
self.assertTrue(parser.SetOption('Foo', 'bar', 'true'))
# Setting existing option with same value should return False.
self.assertFalse(parser.SetOption('Foo', 'bar', 'true'))
# Setting exiting option with new value should return True.
self.assertTrue(parser.SetOption('Foo', 'bar', 'false'))
self.assertEqual(parser.Get('Foo', 'bar'), 'false')
# Setting option in new section should create section and return True.
self.assertTrue(parser.SetOption('Bar', 'bar', 'true'))
self.assertCountEqual(parser.sections(), ['Bar', 'Foo'])
self.assertEqual(parser.Get('Bar', 'bar'), 'true')
def test_remove_option(self):
parser = self.new_parser()
parser.AddSection('Foo')
parser.SetOption('Foo', 'bar', 'true')
self.assertTrue(parser.RemoveOption('Foo', 'bar'))
self.assertFalse(parser.RemoveOption('Foo', 'bar'))
self.assertFalse(parser.RemoveOption('Not', 'Exist'))
def test_add_section(self):
parser = self.new_parser()
self.assertEqual(parser.sections(), [])
# Should not add duplicate section.
# Configparser raises DuplicateError, IdleParser not.
parser.AddSection('Foo')
parser.AddSection('Foo')
parser.AddSection('Bar')
self.assertCountEqual(parser.sections(), ['Bar', 'Foo'])
def test_remove_empty_sections(self):
parser = self.new_parser()
parser.AddSection('Foo')
parser.AddSection('Bar')
parser.SetOption('Idle', 'name', 'val')
self.assertCountEqual(parser.sections(), ['Bar', 'Foo', 'Idle'])
parser.RemoveEmptySections()
self.assertEqual(parser.sections(), ['Idle'])
def test_is_empty(self):
parser = self.new_parser()
parser.AddSection('Foo')
parser.AddSection('Bar')
self.assertTrue(parser.IsEmpty())
self.assertEqual(parser.sections(), [])
parser.SetOption('Foo', 'bar', 'false')
parser.AddSection('Bar')
self.assertFalse(parser.IsEmpty())
self.assertCountEqual(parser.sections(), ['Foo'])
def test_save(self):
with tempfile.TemporaryDirectory() as tdir:
path = os.path.join(tdir, 'test.cfg')
parser = self.new_parser(path)
parser.AddSection('Foo')
parser.SetOption('Foo', 'bar', 'true')
# Should save to path when config is not empty.
self.assertFalse(os.path.exists(path))
parser.Save()
self.assertTrue(os.path.exists(path))
# Should remove the file from disk when config is empty.
parser.remove_section('Foo')
parser.Save()
self.assertFalse(os.path.exists(path))
class IdleConfTest(unittest.TestCase):
"""Test for idleConf"""
@classmethod
def setUpClass(cls):
cls.config_string = {}
conf = config.IdleConf(_utest=True)
if __name__ != '__main__':
idle_dir = os.path.dirname(__file__)
else:
idle_dir = os.path.abspath(sys.path[0])
for ctype in conf.config_types:
config_path = os.path.join(idle_dir, '../config-%s.def' % ctype)
with open(config_path, 'r') as f:
cls.config_string[ctype] = f.read()
cls.orig_warn = config._warn
config._warn = Func()
@classmethod
def tearDownClass(cls):
config._warn = cls.orig_warn
def new_config(self, _utest=False):
return config.IdleConf(_utest=_utest)
def mock_config(self):
"""Return a mocked idleConf
Both default and user config used the same config-*.def
"""
conf = config.IdleConf(_utest=True)
for ctype in conf.config_types:
conf.defaultCfg[ctype] = config.IdleConfParser('')
conf.defaultCfg[ctype].read_string(self.config_string[ctype])
conf.userCfg[ctype] = config.IdleUserConfParser('')
conf.userCfg[ctype].read_string(self.config_string[ctype])
return conf
@unittest.skipIf(sys.platform.startswith('win'), 'this is test for unix system')
def test_get_user_cfg_dir_unix(self):
"Test to get user config directory under unix"
conf = self.new_config(_utest=True)
# Check normal way should success
with mock.patch('os.path.expanduser', return_value='/home/foo'):
with mock.patch('os.path.exists', return_value=True):
self.assertEqual(conf.GetUserCfgDir(), '/home/foo/.idlerc')
# Check os.getcwd should success
with mock.patch('os.path.expanduser', return_value='~'):
with mock.patch('os.getcwd', return_value='/home/foo/cpython'):
with mock.patch('os.mkdir'):
self.assertEqual(conf.GetUserCfgDir(),
'/home/foo/cpython/.idlerc')
# Check user dir not exists and created failed should raise SystemExit
with mock.patch('os.path.join', return_value='/path/not/exists'):
with self.assertRaises(SystemExit):
with self.assertRaises(FileNotFoundError):
conf.GetUserCfgDir()
@unittest.skipIf(not sys.platform.startswith('win'), 'this is test for Windows system')
def test_get_user_cfg_dir_windows(self):
"Test to get user config directory under Windows"
conf = self.new_config(_utest=True)
# Check normal way should success
with mock.patch('os.path.expanduser', return_value='C:\\foo'):
with mock.patch('os.path.exists', return_value=True):
self.assertEqual(conf.GetUserCfgDir(), 'C:\\foo\\.idlerc')
# Check os.getcwd should success
with mock.patch('os.path.expanduser', return_value='~'):
with mock.patch('os.getcwd', return_value='C:\\foo\\cpython'):
with mock.patch('os.mkdir'):
self.assertEqual(conf.GetUserCfgDir(),
'C:\\foo\\cpython\\.idlerc')
# Check user dir not exists and created failed should raise SystemExit
with mock.patch('os.path.join', return_value='/path/not/exists'):
with self.assertRaises(SystemExit):
with self.assertRaises(FileNotFoundError):
conf.GetUserCfgDir()
def test_create_config_handlers(self):
conf = self.new_config(_utest=True)
# Mock out idle_dir
idle_dir = '/home/foo'
with mock.patch.dict({'__name__': '__foo__'}):
with mock.patch('os.path.dirname', return_value=idle_dir):
conf.CreateConfigHandlers()
# Check keys are equal
self.assertCountEqual(conf.defaultCfg.keys(), conf.config_types)
self.assertCountEqual(conf.userCfg.keys(), conf.config_types)
# Check conf parser are correct type
for default_parser in conf.defaultCfg.values():
self.assertIsInstance(default_parser, config.IdleConfParser)
for user_parser in conf.userCfg.values():
self.assertIsInstance(user_parser, config.IdleUserConfParser)
# Check config path are correct
for config_type, parser in conf.defaultCfg.items():
self.assertEqual(parser.file,
os.path.join(idle_dir, 'config-%s.def' % config_type))
for config_type, parser in conf.userCfg.items():
self.assertEqual(parser.file,
os.path.join(conf.userdir, 'config-%s.cfg' % config_type))
def test_load_cfg_files(self):
conf = self.new_config(_utest=True)
# Borrow test/cfgparser.1 from test_configparser.
config_path = findfile('cfgparser.1')
conf.defaultCfg['foo'] = config.IdleConfParser(config_path)
conf.userCfg['foo'] = config.IdleUserConfParser(config_path)
# Load all config from path
conf.LoadCfgFiles()
eq = self.assertEqual
# Check defaultCfg is loaded
eq(conf.defaultCfg['foo'].Get('Foo Bar', 'foo'), 'newbar')
eq(conf.defaultCfg['foo'].GetOptionList('Foo Bar'), ['foo'])
# Check userCfg is loaded
eq(conf.userCfg['foo'].Get('Foo Bar', 'foo'), 'newbar')
eq(conf.userCfg['foo'].GetOptionList('Foo Bar'), ['foo'])
def test_save_user_cfg_files(self):
conf = self.mock_config()
with mock.patch('idlelib.config.IdleUserConfParser.Save') as m:
conf.SaveUserCfgFiles()
self.assertEqual(m.call_count, len(conf.userCfg))
def test_get_option(self):
conf = self.mock_config()
eq = self.assertEqual
eq(conf.GetOption('main', 'EditorWindow', 'width'), '80')
eq(conf.GetOption('main', 'EditorWindow', 'width', type='int'), 80)
with mock.patch('idlelib.config._warn') as _warn:
eq(conf.GetOption('main', 'EditorWindow', 'font', type='int'), None)
eq(conf.GetOption('main', 'EditorWindow', 'NotExists'), None)
eq(conf.GetOption('main', 'EditorWindow', 'NotExists', default='NE'), 'NE')
eq(_warn.call_count, 4)
def test_set_option(self):
conf = self.mock_config()
conf.SetOption('main', 'Foo', 'bar', 'newbar')
self.assertEqual(conf.GetOption('main', 'Foo', 'bar'), 'newbar')
def test_get_section_list(self):
conf = self.mock_config()
self.assertCountEqual(
conf.GetSectionList('default', 'main'),
['General', 'EditorWindow', 'PyShell', 'Indent', 'Theme',
'Keys', 'History', 'HelpFiles'])
self.assertCountEqual(
conf.GetSectionList('user', 'main'),
['General', 'EditorWindow', 'PyShell', 'Indent', 'Theme',
'Keys', 'History', 'HelpFiles'])
with self.assertRaises(config.InvalidConfigSet):
conf.GetSectionList('foobar', 'main')
with self.assertRaises(config.InvalidConfigType):
conf.GetSectionList('default', 'notexists')
def test_get_highlight(self):
conf = self.mock_config()
eq = self.assertEqual
eq(conf.GetHighlight('IDLE Classic', 'normal'), {'foreground': '#000000',
'background': '#ffffff'})
# Test cursor (this background should be normal-background)
eq(conf.GetHighlight('IDLE Classic', 'cursor'), {'foreground': 'black',
'background': '#ffffff'})
# Test get user themes
conf.SetOption('highlight', 'Foobar', 'normal-foreground', '#747474')
conf.SetOption('highlight', 'Foobar', 'normal-background', '#171717')
with mock.patch('idlelib.config._warn'):
eq(conf.GetHighlight('Foobar', 'normal'), {'foreground': '#747474',
'background': '#171717'})
def test_get_theme_dict(self):
"XXX: NOT YET DONE"
conf = self.mock_config()
# These two should be the same
self.assertEqual(
conf.GetThemeDict('default', 'IDLE Classic'),
conf.GetThemeDict('user', 'IDLE Classic'))
with self.assertRaises(config.InvalidTheme):
conf.GetThemeDict('bad', 'IDLE Classic')
def test_get_current_theme_and_keys(self):
conf = self.mock_config()
self.assertEqual(conf.CurrentTheme(), conf.current_colors_and_keys('Theme'))
self.assertEqual(conf.CurrentKeys(), conf.current_colors_and_keys('Keys'))
def test_current_colors_and_keys(self):
conf = self.mock_config()
self.assertEqual(conf.current_colors_and_keys('Theme'), 'IDLE Classic')
def test_default_keys(self):
current_platform = sys.platform
conf = self.new_config(_utest=True)
sys.platform = 'win32'
self.assertEqual(conf.default_keys(), 'IDLE Classic Windows')
sys.platform = 'darwin'
self.assertEqual(conf.default_keys(), 'IDLE Classic OSX')
sys.platform = 'some-linux'
self.assertEqual(conf.default_keys(), 'IDLE Modern Unix')
# Restore platform
sys.platform = current_platform
def test_get_extensions(self):
userextn.read_string('''
[ZzDummy]
enable = True
[DISABLE]
enable = False
''')
eq = self.assertEqual
iGE = idleConf.GetExtensions
eq(iGE(shell_only=True), [])
eq(iGE(), ['ZzDummy'])
eq(iGE(editor_only=True), ['ZzDummy'])
eq(iGE(active_only=False), ['ZzDummy', 'DISABLE'])
eq(iGE(active_only=False, editor_only=True), ['ZzDummy', 'DISABLE'])
userextn.remove_section('ZzDummy')
userextn.remove_section('DISABLE')
def test_remove_key_bind_names(self):
conf = self.mock_config()
self.assertCountEqual(
conf.RemoveKeyBindNames(conf.GetSectionList('default', 'extensions')),
['AutoComplete', 'CodeContext', 'FormatParagraph', 'ParenMatch', 'ZzDummy'])
def test_get_extn_name_for_event(self):
userextn.read_string('''
[ZzDummy]
enable = True
''')
eq = self.assertEqual
eq(idleConf.GetExtnNameForEvent('z-in'), 'ZzDummy')
eq(idleConf.GetExtnNameForEvent('z-out'), None)
userextn.remove_section('ZzDummy')
def test_get_extension_keys(self):
userextn.read_string('''
[ZzDummy]
enable = True
''')
self.assertEqual(idleConf.GetExtensionKeys('ZzDummy'),
{'<<z-in>>': ['<Control-Shift-KeyRelease-Insert>']})
userextn.remove_section('ZzDummy')
# need option key test
## key = ['<Option-Key-2>'] if sys.platform == 'darwin' else ['<Alt-Key-2>']
## eq(conf.GetExtensionKeys('ZoomHeight'), {'<<zoom-height>>': key})
def test_get_extension_bindings(self):
userextn.read_string('''
[ZzDummy]
enable = True
''')
eq = self.assertEqual
iGEB = idleConf.GetExtensionBindings
eq(iGEB('NotExists'), {})
expect = {'<<z-in>>': ['<Control-Shift-KeyRelease-Insert>'],
'<<z-out>>': ['<Control-Shift-KeyRelease-Delete>']}
eq(iGEB('ZzDummy'), expect)
userextn.remove_section('ZzDummy')
def test_get_keybinding(self):
conf = self.mock_config()
eq = self.assertEqual
eq(conf.GetKeyBinding('IDLE Modern Unix', '<<copy>>'),
['<Control-Shift-Key-C>', '<Control-Key-Insert>'])
eq(conf.GetKeyBinding('IDLE Classic Unix', '<<copy>>'),
['<Alt-Key-w>', '<Meta-Key-w>'])
eq(conf.GetKeyBinding('IDLE Classic Windows', '<<copy>>'),
['<Control-Key-c>', '<Control-Key-C>'])
eq(conf.GetKeyBinding('IDLE Classic Mac', '<<copy>>'), ['<Command-Key-c>'])
eq(conf.GetKeyBinding('IDLE Classic OSX', '<<copy>>'), ['<Command-Key-c>'])
# Test keybinding not exists
eq(conf.GetKeyBinding('NOT EXISTS', '<<copy>>'), [])
eq(conf.GetKeyBinding('IDLE Modern Unix', 'NOT EXISTS'), [])
def test_get_current_keyset(self):
current_platform = sys.platform
conf = self.mock_config()
# Ensure that platform isn't darwin
sys.platform = 'some-linux'
self.assertEqual(conf.GetCurrentKeySet(), conf.GetKeySet(conf.CurrentKeys()))
# This should not be the same, since replace <Alt- to <Option-.
# Above depended on config-extensions.def having Alt keys,
# which is no longer true.
# sys.platform = 'darwin'
# self.assertNotEqual(conf.GetCurrentKeySet(), conf.GetKeySet(conf.CurrentKeys()))
# Restore platform
sys.platform = current_platform
def test_get_keyset(self):
conf = self.mock_config()
# Conflict with key set, should be disable to ''
conf.defaultCfg['extensions'].add_section('Foobar')
conf.defaultCfg['extensions'].add_section('Foobar_cfgBindings')
conf.defaultCfg['extensions'].set('Foobar', 'enable', 'True')
conf.defaultCfg['extensions'].set('Foobar_cfgBindings', 'newfoo', '<Key-F3>')
self.assertEqual(conf.GetKeySet('IDLE Modern Unix')['<<newfoo>>'], '')
def test_is_core_binding(self):
# XXX: Should move out the core keys to config file or other place
conf = self.mock_config()
self.assertTrue(conf.IsCoreBinding('copy'))
self.assertTrue(conf.IsCoreBinding('cut'))
self.assertTrue(conf.IsCoreBinding('del-word-right'))
self.assertFalse(conf.IsCoreBinding('not-exists'))
def test_extra_help_source_list(self):
# Test GetExtraHelpSourceList and GetAllExtraHelpSourcesList in same
# place to prevent prepare input data twice.
conf = self.mock_config()
# Test default with no extra help source
self.assertEqual(conf.GetExtraHelpSourceList('default'), [])
self.assertEqual(conf.GetExtraHelpSourceList('user'), [])
with self.assertRaises(config.InvalidConfigSet):
self.assertEqual(conf.GetExtraHelpSourceList('bad'), [])
self.assertCountEqual(
conf.GetAllExtraHelpSourcesList(),
conf.GetExtraHelpSourceList('default') + conf.GetExtraHelpSourceList('user'))
# Add help source to user config
conf.userCfg['main'].SetOption('HelpFiles', '4', 'Python;https://python.org') # This is bad input
conf.userCfg['main'].SetOption('HelpFiles', '3', 'Python:https://python.org') # This is bad input
conf.userCfg['main'].SetOption('HelpFiles', '2', 'Pillow;https://pillow.readthedocs.io/en/latest/')
conf.userCfg['main'].SetOption('HelpFiles', '1', 'IDLE;C:/Programs/Python36/Lib/idlelib/help.html')
self.assertEqual(conf.GetExtraHelpSourceList('user'),
[('IDLE', 'C:/Programs/Python36/Lib/idlelib/help.html', '1'),
('Pillow', 'https://pillow.readthedocs.io/en/latest/', '2'),
('Python', 'https://python.org', '4')])
self.assertCountEqual(
conf.GetAllExtraHelpSourcesList(),
conf.GetExtraHelpSourceList('default') + conf.GetExtraHelpSourceList('user'))
def test_get_font(self):
from test.support import requires
from tkinter import Tk
from tkinter.font import Font
conf = self.mock_config()
requires('gui')
root = Tk()
root.withdraw()
f = Font.actual(Font(name='TkFixedFont', exists=True, root=root))
self.assertEqual(
conf.GetFont(root, 'main', 'EditorWindow'),
(f['family'], 10 if f['size'] <= 0 else f['size'], f['weight']))
# Cleanup root
root.destroy()
del root
def test_get_core_keys(self):
conf = self.mock_config()
eq = self.assertEqual
eq(conf.GetCoreKeys()['<<center-insert>>'], ['<Control-l>'])
eq(conf.GetCoreKeys()['<<copy>>'], ['<Control-c>', '<Control-C>'])
eq(conf.GetCoreKeys()['<<history-next>>'], ['<Alt-n>'])
eq(conf.GetCoreKeys('IDLE Classic Windows')['<<center-insert>>'],
['<Control-Key-l>', '<Control-Key-L>'])
eq(conf.GetCoreKeys('IDLE Classic OSX')['<<copy>>'], ['<Command-Key-c>'])
eq(conf.GetCoreKeys('IDLE Classic Unix')['<<history-next>>'],
['<Alt-Key-n>', '<Meta-Key-n>'])
eq(conf.GetCoreKeys('IDLE Modern Unix')['<<history-next>>'],
['<Alt-Key-n>', '<Meta-Key-n>'])
class CurrentColorKeysTest(unittest.TestCase):
""" Test colorkeys function with user config [Theme] and [Keys] patterns.
colorkeys = config.IdleConf.current_colors_and_keys
Test all patterns written by IDLE and some errors
Item 'default' should really be 'builtin' (versus 'custom).
"""
colorkeys = idleConf.current_colors_and_keys
default_theme = 'IDLE Classic'
default_keys = idleConf.default_keys()
def test_old_builtin_theme(self):
# On initial installation, user main is blank.
self.assertEqual(self.colorkeys('Theme'), self.default_theme)
# For old default, name2 must be blank.
usermain.read_string('''
[Theme]
default = True
''')
# IDLE omits 'name' for default old builtin theme.
self.assertEqual(self.colorkeys('Theme'), self.default_theme)
# IDLE adds 'name' for non-default old builtin theme.
usermain['Theme']['name'] = 'IDLE New'
self.assertEqual(self.colorkeys('Theme'), 'IDLE New')
# Erroneous non-default old builtin reverts to default.
usermain['Theme']['name'] = 'non-existent'
self.assertEqual(self.colorkeys('Theme'), self.default_theme)
usermain.remove_section('Theme')
def test_new_builtin_theme(self):
# IDLE writes name2 for new builtins.
usermain.read_string('''
[Theme]
default = True
name2 = IDLE Dark
''')
self.assertEqual(self.colorkeys('Theme'), 'IDLE Dark')
# Leftover 'name', not removed, is ignored.
usermain['Theme']['name'] = 'IDLE New'
self.assertEqual(self.colorkeys('Theme'), 'IDLE Dark')
# Erroneous non-default new builtin reverts to default.
usermain['Theme']['name2'] = 'non-existent'
self.assertEqual(self.colorkeys('Theme'), self.default_theme)
usermain.remove_section('Theme')
def test_user_override_theme(self):
# Erroneous custom name (no definition) reverts to default.
usermain.read_string('''
[Theme]
default = False
name = Custom Dark
''')
self.assertEqual(self.colorkeys('Theme'), self.default_theme)
# Custom name is valid with matching Section name.
userhigh.read_string('[Custom Dark]\na=b')
self.assertEqual(self.colorkeys('Theme'), 'Custom Dark')
# Name2 is ignored.
usermain['Theme']['name2'] = 'non-existent'
self.assertEqual(self.colorkeys('Theme'), 'Custom Dark')
usermain.remove_section('Theme')
userhigh.remove_section('Custom Dark')
def test_old_builtin_keys(self):
# On initial installation, user main is blank.
self.assertEqual(self.colorkeys('Keys'), self.default_keys)
# For old default, name2 must be blank, name is always used.
usermain.read_string('''
[Keys]
default = True
name = IDLE Classic Unix
''')
self.assertEqual(self.colorkeys('Keys'), 'IDLE Classic Unix')
# Erroneous non-default old builtin reverts to default.
usermain['Keys']['name'] = 'non-existent'
self.assertEqual(self.colorkeys('Keys'), self.default_keys)
usermain.remove_section('Keys')
def test_new_builtin_keys(self):
# IDLE writes name2 for new builtins.
usermain.read_string('''
[Keys]
default = True
name2 = IDLE Modern Unix
''')
self.assertEqual(self.colorkeys('Keys'), 'IDLE Modern Unix')
# Leftover 'name', not removed, is ignored.
usermain['Keys']['name'] = 'IDLE Classic Unix'
self.assertEqual(self.colorkeys('Keys'), 'IDLE Modern Unix')
# Erroneous non-default new builtin reverts to default.
usermain['Keys']['name2'] = 'non-existent'
self.assertEqual(self.colorkeys('Keys'), self.default_keys)
usermain.remove_section('Keys')
def test_user_override_keys(self):
# Erroneous custom name (no definition) reverts to default.
usermain.read_string('''
[Keys]
default = False
name = Custom Keys
''')
self.assertEqual(self.colorkeys('Keys'), self.default_keys)
# Custom name is valid with matching Section name.
userkeys.read_string('[Custom Keys]\na=b')
self.assertEqual(self.colorkeys('Keys'), 'Custom Keys')
# Name2 is ignored.
usermain['Keys']['name2'] = 'non-existent'
self.assertEqual(self.colorkeys('Keys'), 'Custom Keys')
usermain.remove_section('Keys')
userkeys.remove_section('Custom Keys')
class ChangesTest(unittest.TestCase):
empty = {'main':{}, 'highlight':{}, 'keys':{}, 'extensions':{}}
def load(self): # Test_add_option verifies that this works.
changes = self.changes
changes.add_option('main', 'Msec', 'mitem', 'mval')
changes.add_option('highlight', 'Hsec', 'hitem', 'hval')
changes.add_option('keys', 'Ksec', 'kitem', 'kval')
return changes
loaded = {'main': {'Msec': {'mitem': 'mval'}},
'highlight': {'Hsec': {'hitem': 'hval'}},
'keys': {'Ksec': {'kitem':'kval'}},
'extensions': {}}
def setUp(self):
self.changes = config.ConfigChanges()
def test_init(self):
self.assertEqual(self.changes, self.empty)
def test_add_option(self):
changes = self.load()
self.assertEqual(changes, self.loaded)
changes.add_option('main', 'Msec', 'mitem', 'mval')
self.assertEqual(changes, self.loaded)
def test_save_option(self): # Static function does not touch changes.
save_option = self.changes.save_option
self.assertTrue(save_option('main', 'Indent', 'what', '0'))
self.assertFalse(save_option('main', 'Indent', 'what', '0'))
self.assertEqual(usermain['Indent']['what'], '0')
self.assertTrue(save_option('main', 'Indent', 'use-spaces', '0'))
self.assertEqual(usermain['Indent']['use-spaces'], '0')
self.assertTrue(save_option('main', 'Indent', 'use-spaces', '1'))
self.assertFalse(usermain.has_option('Indent', 'use-spaces'))
usermain.remove_section('Indent')
def test_save_added(self):
changes = self.load()
self.assertTrue(changes.save_all())
self.assertEqual(usermain['Msec']['mitem'], 'mval')
self.assertEqual(userhigh['Hsec']['hitem'], 'hval')
self.assertEqual(userkeys['Ksec']['kitem'], 'kval')
changes.add_option('main', 'Msec', 'mitem', 'mval')
self.assertFalse(changes.save_all())
usermain.remove_section('Msec')
userhigh.remove_section('Hsec')
userkeys.remove_section('Ksec')
def test_save_help(self):
# Any change to HelpFiles overwrites entire section.
changes = self.changes
changes.save_option('main', 'HelpFiles', 'IDLE', 'idledoc')
changes.add_option('main', 'HelpFiles', 'ELDI', 'codeldi')
changes.save_all()
self.assertFalse(usermain.has_option('HelpFiles', 'IDLE'))
self.assertTrue(usermain.has_option('HelpFiles', 'ELDI'))
def test_save_default(self): # Cover 2nd and 3rd false branches.
changes = self.changes
changes.add_option('main', 'Indent', 'use-spaces', '1')
# save_option returns False; cfg_type_changed remains False.
# TODO: test that save_all calls usercfg Saves.
def test_delete_section(self):
changes = self.load()
changes.delete_section('main', 'fake') # Test no exception.
self.assertEqual(changes, self.loaded) # Test nothing deleted.
for cfgtype, section in (('main', 'Msec'), ('keys', 'Ksec')):
testcfg[cfgtype].SetOption(section, 'name', 'value')
changes.delete_section(cfgtype, section)
with self.assertRaises(KeyError):
changes[cfgtype][section] # Test section gone from changes
testcfg[cfgtype][section] # and from mock userCfg.
# TODO test for save call.
def test_clear(self):
changes = self.load()
changes.clear()
self.assertEqual(changes, self.empty)
class WarningTest(unittest.TestCase):
def test_warn(self):
Equal = self.assertEqual
config._warned = set()
with captured_stderr() as stderr:
config._warn('warning', 'key')
Equal(config._warned, {('warning','key')})
Equal(stderr.getvalue(), 'warning'+'\n')
with captured_stderr() as stderr:
config._warn('warning', 'key')
Equal(stderr.getvalue(), '')
with captured_stderr() as stderr:
config._warn('warn2', 'yek')
Equal(config._warned, {('warning','key'), ('warn2','yek')})
Equal(stderr.getvalue(), 'warn2'+'\n')
if __name__ == '__main__':
unittest.main(verbosity=2)
| 39.772953
| 107
| 0.611411
|
98583d0605afbfc50ff518480116acabd9aef06d
| 72
|
py
|
Python
|
deephyper/benchmark/hps/autosklearn1/__init__.py
|
Z223I/deephyper
|
4fd1054dc22f15197567bdd93c6e7a95a614b8e2
|
[
"BSD-3-Clause"
] | 2
|
2020-08-26T09:15:27.000Z
|
2020-08-26T09:19:13.000Z
|
deephyper/benchmark/hps/autosklearn1/__init__.py
|
Z223I/deephyper
|
4fd1054dc22f15197567bdd93c6e7a95a614b8e2
|
[
"BSD-3-Clause"
] | null | null | null |
deephyper/benchmark/hps/autosklearn1/__init__.py
|
Z223I/deephyper
|
4fd1054dc22f15197567bdd93c6e7a95a614b8e2
|
[
"BSD-3-Clause"
] | 1
|
2021-08-31T13:47:27.000Z
|
2021-08-31T13:47:27.000Z
|
from deephyper.search.hps.automl.classifier.autosklearn1 import Problem
| 36
| 71
| 0.875
|
d76c334259adb5ccf7e19eca37f01f80c2ce5145
| 11,341
|
py
|
Python
|
Collections-a-installer/community-general-2.4.0/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
Collections-a-installer/community-general-2.4.0/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
Collections-a-installer/community-general-2.4.0/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
module: manageiq_alert_profiles
short_description: Configuration of alert profiles for ManageIQ
extends_documentation_fragment:
- community.general.manageiq
author: Elad Alfassa (@elad661) <ealfassa@redhat.com>
description:
- The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ.
options:
state:
type: str
description:
- absent - alert profile should not exist,
- present - alert profile should exist,
choices: ['absent', 'present']
default: 'present'
name:
type: str
description:
- The unique alert profile name in ManageIQ.
- Required when state is "absent" or "present".
resource_type:
type: str
description:
- The resource type for the alert profile in ManageIQ. Required when state is "present".
choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
'ExtManagementSystem', 'MiddlewareServer']
alerts:
type: list
elements: str
description:
- List of alert descriptions to assign to this profile.
- Required if state is "present"
notes:
type: str
description:
- Optional notes for this profile
'''
EXAMPLES = '''
- name: Add an alert profile to ManageIQ
community.general.manageiq_alert_profiles:
state: present
name: Test profile
resource_type: ContainerNode
alerts:
- Test Alert 01
- Test Alert 02
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Delete an alert profile from ManageIQ
community.general.manageiq_alert_profiles:
state: absent
name: Test profile
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQAlertProfiles(object):
""" Object to execute alert profile management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url)
def get_profiles(self):
""" Get all alert profiles from ManageIQ
"""
try:
response = self.client.get(self.url + '?expand=alert_definitions,resources')
except Exception as e:
self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e))
return response.get('resources') or []
def get_alerts(self, alert_descriptions):
""" Get a list of alert hrefs from a list of alert descriptions
"""
alerts = []
for alert_description in alert_descriptions:
alert = self.manageiq.find_collection_resource_or_fail("alert_definitions",
description=alert_description)
alerts.append(alert['href'])
return alerts
def add_profile(self, profile):
""" Add a new alert profile to ManageIQ
"""
# find all alerts to add to the profile
# we do this first to fail early if one is missing.
alerts = self.get_alerts(profile['alerts'])
# build the profile dict to send to the server
profile_dict = dict(name=profile['name'],
description=profile['name'],
mode=profile['resource_type'])
if profile['notes']:
profile_dict['set_data'] = dict(notes=profile['notes'])
# send it to the server
try:
result = self.client.post(self.url, resource=profile_dict, action="create")
except Exception as e:
self.module.fail_json(msg="Creating profile failed {error}".format(error=e))
# now that it has been created, we can assign the alerts
self.assign_or_unassign(result['results'][0], alerts, "assign")
msg = "Profile {name} created successfully"
msg = msg.format(name=profile['name'])
return dict(changed=True, msg=msg)
def delete_profile(self, profile):
""" Delete an alert profile from ManageIQ
"""
try:
self.client.post(profile['href'], action="delete")
except Exception as e:
self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e))
msg = "Successfully deleted profile {name}".format(name=profile['name'])
return dict(changed=True, msg=msg)
def get_alert_href(self, alert):
""" Get an absolute href for an alert
"""
return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id'])
def assign_or_unassign(self, profile, resources, action):
""" Assign or unassign alerts to profile, and validate the result.
"""
alerts = [dict(href=href) for href in resources]
subcollection_url = profile['href'] + '/alert_definitions'
try:
result = self.client.post(subcollection_url, resources=alerts, action=action)
if len(result['results']) != len(alerts):
msg = "Failed to {action} alerts to profile '{name}'," +\
"expected {expected} alerts to be {action}ed," +\
"but only {changed} were {action}ed"
msg = msg.format(action=action,
name=profile['name'],
expected=len(alerts),
changed=result['results'])
self.module.fail_json(msg=msg)
except Exception as e:
msg = "Failed to {action} alerts to profile '{name}': {error}"
msg = msg.format(action=action, name=profile['name'], error=e)
self.module.fail_json(msg=msg)
return result['results']
def update_profile(self, old_profile, desired_profile):
""" Update alert profile in ManageIQ
"""
changed = False
# we need to use client.get to query the alert definitions
old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions')
# figure out which alerts we need to assign / unassign
# alerts listed by the user:
desired_alerts = set(self.get_alerts(desired_profile['alerts']))
# alert which currently exist in the profile
if 'alert_definitions' in old_profile:
# we use get_alert_href to have a direct href to the alert
existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']])
else:
# no alerts in this profile
existing_alerts = set()
to_add = list(desired_alerts - existing_alerts)
to_remove = list(existing_alerts - desired_alerts)
# assign / unassign the alerts, if needed
if to_remove:
self.assign_or_unassign(old_profile, to_remove, "unassign")
changed = True
if to_add:
self.assign_or_unassign(old_profile, to_add, "assign")
changed = True
# update other properties
profile_dict = dict()
if old_profile['mode'] != desired_profile['resource_type']:
# mode needs to be updated
profile_dict['mode'] = desired_profile['resource_type']
# check if notes need to be updated
old_notes = old_profile.get('set_data', {}).get('notes')
if desired_profile['notes'] != old_notes:
profile_dict['set_data'] = dict(notes=desired_profile['notes'])
if profile_dict:
# if we have any updated values
changed = True
try:
result = self.client.post(old_profile['href'],
resource=profile_dict,
action="edit")
except Exception as e:
msg = "Updating profile '{name}' failed: {error}"
msg = msg.format(name=old_profile['name'], error=e)
self.module.fail_json(msg=msg, result=result)
if changed:
msg = "Profile {name} updated successfully".format(name=desired_profile['name'])
else:
msg = "No update needed for profile {name}".format(name=desired_profile['name'])
return dict(changed=changed, msg=msg)
def main():
argument_spec = dict(
name=dict(type='str'),
resource_type=dict(type='str', choices=['Vm',
'ContainerNode',
'MiqServer',
'Host',
'Storage',
'EmsCluster',
'ExtManagementSystem',
'MiddlewareServer']),
alerts=dict(type='list', elements='str'),
notes=dict(type='str'),
state=dict(default='present', choices=['present', 'absent']),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(argument_spec=argument_spec,
required_if=[('state', 'present', ['name', 'resource_type']),
('state', 'absent', ['name'])])
state = module.params['state']
name = module.params['name']
manageiq = ManageIQ(module)
manageiq_alert_profiles = ManageIQAlertProfiles(manageiq)
existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles",
name=name)
# we need to add or update the alert profile
if state == "present":
if not existing_profile:
# a profile with this name doesn't exist yet, let's create it
res_args = manageiq_alert_profiles.add_profile(module.params)
else:
# a profile with this name exists, we might need to update it
res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params)
# this alert profile should not exist
if state == "absent":
# if we have an alert profile with this name, delete it
if existing_profile:
res_args = manageiq_alert_profiles.delete_profile(existing_profile)
else:
# This alert profile does not exist in ManageIQ, and that's okay
msg = "Alert profile '{name}' does not exist in ManageIQ"
msg = msg.format(name=name)
res_args = dict(changed=False, msg=msg)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
| 37.062092
| 112
| 0.598713
|
82a7fbf95bbc5e487ddb985c160b641999d25d17
| 5,089
|
py
|
Python
|
sdk/python/pulumi_gcp/folder/iam_policy.py
|
23doors/pulumi-gcp
|
ded01b199f95b164884266ea3e6f8206c8231270
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-12-20T22:08:20.000Z
|
2019-12-20T22:08:20.000Z
|
sdk/python/pulumi_gcp/folder/iam_policy.py
|
pellizzetti/pulumi-gcp
|
fad74dd55a0cf7723f73046bb0e6fcbfd948ba84
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/folder/iam_policy.py
|
pellizzetti/pulumi-gcp
|
fad74dd55a0cf7723f73046bb0e6fcbfd948ba84
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class IAMPolicy(pulumi.CustomResource):
etag: pulumi.Output[str]
"""
(Computed) The etag of the folder's IAM policy. `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other.
"""
folder: pulumi.Output[str]
"""
The resource name of the folder the policy is attached to. Its format is folders/{folder_id}.
"""
policy_data: pulumi.Output[str]
"""
The `organizations.getIAMPolicy` data source that represents
the IAM policy that will be applied to the folder. This policy overrides any existing
policy applied to the folder.
"""
def __init__(__self__, resource_name, opts=None, folder=None, policy_data=None, __props__=None, __name__=None, __opts__=None):
"""
Allows creation and management of the IAM policy for an existing Google Cloud
Platform folder.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] folder: The resource name of the folder the policy is attached to. Its format is folders/{folder_id}.
:param pulumi.Input[str] policy_data: The `organizations.getIAMPolicy` data source that represents
the IAM policy that will be applied to the folder. This policy overrides any existing
policy applied to the folder.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/folder_iam_policy.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if folder is None:
raise TypeError("Missing required property 'folder'")
__props__['folder'] = folder
if policy_data is None:
raise TypeError("Missing required property 'policy_data'")
__props__['policy_data'] = policy_data
__props__['etag'] = None
super(IAMPolicy, __self__).__init__(
'gcp:folder/iAMPolicy:IAMPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, etag=None, folder=None, policy_data=None):
"""
Get an existing IAMPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: (Computed) The etag of the folder's IAM policy. `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other.
:param pulumi.Input[str] folder: The resource name of the folder the policy is attached to. Its format is folders/{folder_id}.
:param pulumi.Input[str] policy_data: The `organizations.getIAMPolicy` data source that represents
the IAM policy that will be applied to the folder. This policy overrides any existing
policy applied to the folder.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/folder_iam_policy.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["etag"] = etag
__props__["folder"] = folder
__props__["policy_data"] = policy_data
return IAMPolicy(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 50.386139
| 223
| 0.682452
|
1d67d850be4b51f73bca365a64b186565eba8b50
| 4,668
|
py
|
Python
|
amqp/amqp_handler.py
|
jlundy2/service-auto-analyzer
|
91dbb7155eff84877b29a327d68491467befa168
|
[
"Apache-2.0"
] | null | null | null |
amqp/amqp_handler.py
|
jlundy2/service-auto-analyzer
|
91dbb7155eff84877b29a327d68491467befa168
|
[
"Apache-2.0"
] | null | null | null |
amqp/amqp_handler.py
|
jlundy2/service-auto-analyzer
|
91dbb7155eff84877b29a327d68491467befa168
|
[
"Apache-2.0"
] | null | null | null |
"""
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import logging
import json
import pika
import commons.launch_objects as launch_objects
logger = logging.getLogger("analyzerApp.amqpHandler")
def prepare_launches(launches):
"""Function for deserializing array of launches"""
return [launch_objects.Launch(**launch) for launch in launches]
def prepare_search_logs(search_data):
"""Function for deserializing search logs object"""
return launch_objects.SearchLogs(**search_data)
def prepare_launch_info(launch_info):
"""Function for deserializing search logs object"""
return launch_objects.LaunchInfoForClustering(**launch_info)
def prepare_clean_index(clean_index):
"""Function for deserializing clean index object"""
return launch_objects.CleanIndex(**clean_index)
def prepare_delete_index(body):
"""Function for deserializing index id object"""
return int(body)
def prepare_test_item_info(test_item_info):
"""Function for deserializing test item info for suggestions"""
return launch_objects.TestItemInfo(**test_item_info)
def prepare_search_response_data(response):
"""Function for serializing response from search request"""
return json.dumps(response)
def prepare_analyze_response_data(response):
"""Function for serializing response from analyze request"""
return json.dumps([resp.dict() for resp in response])
def prepare_index_response_data(response):
"""Function for serializing response from index request
and other objects, which are pydantic objects"""
return response.json()
def output_result(response):
"""Function for serializing int object"""
return str(response)
def handle_amqp_request(channel, method, props, body,
request_handler, prepare_data_func=prepare_launches,
prepare_response_data=prepare_search_response_data):
"""Function for handling amqp reuqest: index, search and analyze"""
logger.debug("Started processing %s method %s props", method, props)
logger.debug("Started processing data %s", body)
try:
launches = json.loads(body, strict=False)
except Exception as err:
logger.error("Failed to load json from body")
logger.error(err)
return False
try:
launches = prepare_data_func(launches)
except Exception as err:
logger.error("Failed to transform body into objects")
logger.error(err)
return False
try:
response = request_handler(launches)
except Exception as err:
logger.error("Failed to process launches")
logger.error(err)
return False
try:
response_body = prepare_response_data(response)
except Exception as err:
logger.error("Failed to dump launches result")
logger.error(err)
return False
try:
channel.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(
correlation_id=props.correlation_id,
content_type="application/json"),
mandatory=False,
body=response_body)
except Exception as err:
logger.error("Failed to publish result")
logger.error(err)
logger.debug("Finished processing %s method", method)
return True
def handle_inner_amqp_request(channel, method, props, body, request_handler):
"""Function for handling inner amqp reuqests"""
logger.debug("Started processing %s method %s props", method, props)
logger.debug("Started processing data %s", body)
try:
stats_info = json.loads(body, strict=False)
except Exception as err:
logger.error("Failed to load json from body")
logger.error(err)
return False
try:
request_handler(stats_info)
except Exception as err:
logger.error("Failed to process stats info")
logger.error(err)
return False
logger.debug("Finished processing %s method", method)
return True
| 33.342857
| 77
| 0.688303
|
b92e18fff271f07e1263f58bdd542dc2bed6a507
| 4,481
|
py
|
Python
|
setup.py
|
venustar1228/cactus-blockchain
|
a2fec724eb19489898ffa306d4203de11edb94b0
|
[
"Apache-2.0"
] | 20
|
2021-07-16T18:08:13.000Z
|
2022-03-20T02:38:39.000Z
|
setup.py
|
Cactus-Network/cactus-blockchain
|
9eef13171dff764bd0549de1479d775272e16bcc
|
[
"Apache-2.0"
] | 29
|
2021-07-17T00:38:18.000Z
|
2022-03-29T19:11:48.000Z
|
setup.py
|
venustar1228/cactus-blockchain
|
a2fec724eb19489898ffa306d4203de11edb94b0
|
[
"Apache-2.0"
] | 21
|
2021-07-17T02:18:57.000Z
|
2022-03-15T08:26:56.000Z
|
from setuptools import setup
dependencies = [
"multidict==5.1.0", # Avoid 5.2.0 due to Avast
"blspy==1.0.6", # Signature library
"chiavdf==1.0.3", # timelord and vdf verification
"chiabip158==1.0", # bip158-style wallet filters
"chiapos==1.0.6", # proof of space
"clvm==0.9.7",
"clvm_rs==0.1.15",
"clvm_tools==0.4.3",
"aiohttp==3.7.4", # HTTP server for full node rpc
"aiosqlite==0.17.0", # asyncio wrapper for sqlite, to store blocks
"bitstring==3.1.9", # Binary data management library
"colorama==0.4.4", # Colorizes terminal output
"colorlog==5.0.1", # Adds color to logs
"concurrent-log-handler==0.9.19", # Concurrently log and rotate logs
"cryptography==3.4.7", # Python cryptography library for TLS - keyring conflict
"fasteners==0.16.3", # For interprocess file locking
"keyring==23.0.1", # Store keys in MacOS Keychain, Windows Credential Locker
"keyrings.cryptfile==1.3.4", # Secure storage for keys on Linux (Will be replaced)
# "keyrings.cryptfile==1.3.8", # Secure storage for keys on Linux (Will be replaced)
# See https://github.com/frispete/keyrings.cryptfile/issues/15
"PyYAML==5.4.1", # Used for config file format
"setproctitle==1.2.2", # Gives the cactus processes readable names
"sortedcontainers==2.4.0", # For maintaining sorted mempools
"websockets==8.1.0", # For use in wallet RPC and electron UI
"click==7.1.2", # For the CLI
"dnspythonchia==2.2.0", # Query DNS seeds
"watchdog==2.1.6", # Filesystem event watching - watches keyring.yaml
]
upnp_dependencies = [
"miniupnpc==2.2.2", # Allows users to open ports on their router
]
dev_dependencies = [
"pytest",
"pytest-asyncio",
"flake8",
"mypy",
"black",
"aiohttp_cors", # For blackd
"ipython", # For asyncio debugging
"types-setuptools",
]
kwargs = dict(
name="cactus-blockchain",
description="Cactus blockchain full node, farmer, timelord, and wallet.",
url="https://cactus-network.net/",
license="Apache License",
python_requires=">=3.7, <4",
keywords="cactus blockchain node",
install_requires=dependencies,
setup_requires=["setuptools_scm"],
extras_require=dict(
uvloop=["uvloop"],
dev=dev_dependencies,
upnp=upnp_dependencies,
),
packages=[
"build_scripts",
"cactus",
"cactus.cmds",
"cactus.clvm",
"cactus.consensus",
"cactus.daemon",
"cactus.full_node",
"cactus.timelord",
"cactus.farmer",
"cactus.harvester",
"cactus.introducer",
"cactus.plotters",
"cactus.plotting",
"cactus.pools",
"cactus.protocols",
"cactus.rpc",
"cactus.server",
"cactus.simulator",
"cactus.types.blockchain_format",
"cactus.types",
"cactus.util",
"cactus.wallet",
"cactus.wallet.puzzles",
"cactus.wallet.rl_wallet",
"cactus.wallet.cc_wallet",
"cactus.wallet.did_wallet",
"cactus.wallet.settings",
"cactus.wallet.trading",
"cactus.wallet.util",
"cactus.ssl",
"mozilla-ca",
],
entry_points={
"console_scripts": [
"cactus = cactus.cmds.cactus:main",
"cactus_wallet = cactus.server.start_wallet:main",
"cactus_full_node = cactus.server.start_full_node:main",
"cactus_harvester = cactus.server.start_harvester:main",
"cactus_farmer = cactus.server.start_farmer:main",
"cactus_introducer = cactus.server.start_introducer:main",
"cactus_timelord = cactus.server.start_timelord:main",
"cactus_timelord_launcher = cactus.timelord.timelord_launcher:main",
"cactus_full_node_simulator = cactus.simulator.start_simulator:main",
]
},
package_data={
"cactus": ["pyinstaller.spec"],
"": ["*.clvm", "*.clvm.hex", "*.clib", "*.clinc", "*.clsp", "py.typed"],
"cactus.util": ["initial-*.yaml", "english.txt"],
"cactus.ssl": ["cactus_ca.crt", "cactus_ca.key", "dst_root_ca.pem"],
"mozilla-ca": ["cacert.pem"],
},
use_scm_version={"fallback_version": "unknown-no-.git-directory"},
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
zip_safe=False,
)
if __name__ == "__main__":
setup(**kwargs) # type: ignore
| 36.137097
| 90
| 0.616603
|
ded2a946dec860b4dcda0280a65fb9f7bf0c38ea
| 604
|
py
|
Python
|
core/urls.py
|
mahanfarzaneh2000/Freelara
|
803cd0e75c5c03ee23ed6dea5202f3e6a7af4864
|
[
"Apache-2.0"
] | null | null | null |
core/urls.py
|
mahanfarzaneh2000/Freelara
|
803cd0e75c5c03ee23ed6dea5202f3e6a7af4864
|
[
"Apache-2.0"
] | null | null | null |
core/urls.py
|
mahanfarzaneh2000/Freelara
|
803cd0e75c5c03ee23ed6dea5202f3e6a7af4864
|
[
"Apache-2.0"
] | 1
|
2021-04-11T09:59:54.000Z
|
2021-04-11T09:59:54.000Z
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path("", include("pages.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls")),
path("gigs/", include("gigs.urls")),
path("orders/", include("orders.urls")),
path("dashboard/", include("dashboard.urls")),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 33.555556
| 82
| 0.713576
|
b3d60fa4ba092306444da6213353b51853c25317
| 12,109
|
py
|
Python
|
syzygy/build/app_verifier.py
|
nzeh/syzygy
|
3573e3d458dbb4285753c28a7cb42ced739f9f55
|
[
"Apache-2.0"
] | 343
|
2015-01-07T05:58:44.000Z
|
2022-03-15T14:55:21.000Z
|
syzygy/build/app_verifier.py
|
nzeh/syzygy-nzeh
|
3757e53f850644721284073de318e218224dd411
|
[
"Apache-2.0"
] | 61
|
2015-03-19T18:20:21.000Z
|
2019-10-23T12:58:23.000Z
|
syzygy/build/app_verifier.py
|
nzeh/syzygy-nzeh
|
3757e53f850644721284073de318e218224dd411
|
[
"Apache-2.0"
] | 66
|
2015-01-20T15:35:05.000Z
|
2021-11-25T16:49:41.000Z
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for running a unittest under Application Verifier."""
import logging
import optparse
import os
import re
import subprocess
import sys
import verifier
_THIRD_PARTY = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', 'third_party'))
sys.path.append(_THIRD_PARTY)
import colorama
_LOGGER = logging.getLogger(os.path.basename(__file__))
# A list of per-test Application Verifier checks to not run.
_DISABLED_CHECKS = {
'agent_common_unittests.exe': [
# We have a test that deliberately causes an exception which is caught and
# handled by the code under test. However, AV propogates this exception and
# launches a modal dialog window, which causes the test to timeout.
'Exceptions'
],
}
# A list of per-test Application Verifier exceptions.
_EXCEPTIONS = {
'basic_block_entry_unittests.exe': [
# This leak occurs due to a leaky global variable in ScopedHandle.
('Error', 'Leak', 2304, '', '.*::BasicBlockEntryTest::UnloadDll'),
# This leak occurs due to a leaky global lock in ScopedHandle.
('Error', 'Locks', 513, '', '.*::BasicBlockEntryTest::UnloadDll'),
# This is a known (semi-intentional) leak of the TLS index and the last
# active thread's TLS data on module unload.
('Error', 'TLS', 848, '', '.*::BasicBlockEntryTest::UnloadDll'),
],
'coverage_unittests.exe': [
# This leak occurs due to a leaky global variable in ScopedHandle.
('Error', 'Leak', 2304, '', '.*::CoverageClientTest::UnloadDll'),
# This leak occurs only in Debug, which leaks a thread local variable
# used to check thread restrictions.
('Error', 'TLS', 848, '', '.*::CoverageClientTest::UnloadDll'),
],
'instrument_unittests.exe': [
# The ASAN runtime ends up freeing a heap while holding it's critical
# section.
('Error', 'Locks', 513, '', '.*::PELibUnitTest::CheckTestDll'),
# This leak occurs due to a leaky global lock in ScopedHandle.
('Error', 'Locks', 514, '', '.*::PELibUnitTest::CheckTestDll'),
# This leak occurs only in Debug, which leaks a thread local variable
# used to check thread restrictions.
('Error', 'TLS', 848, '', '.*::PELibUnitTest::CheckTestDll'),
],
'memprof_unittests.exe': [
# This leak occurs due to a leaky global variable in ScopedHandle.
('Error', 'Leak', 2304, '', '.*::MemoryProfilerTest::UnloadDll'),
# This leak occurs due to a leaky global lock in ScopedHandle.
('Error', 'Locks', 513, '', '.*::MemoryProfilerTest::UnloadDll'),
# This leak occurs only in Debug, which leaks a thread local variable
# used to check thread restrictions.
('Error', 'TLS', 848, '', '.*::MemoryProfilerTest::UnloadDll'),
],
'parse_unittests.exe': [
# This leak occurs due to a leaky global variable in ScopedHandle.
('Error', 'Leak', 2304, '', '.*::ParseEngineRpcTest::UnloadCallTraceDll'),
# This leak occurs only in Debug, which leaks a thread local variable
# used to check thread restrictions.
('Error', 'TLS', 848, '', '.*::ParseEngineRpcTest::UnloadCallTraceDll'),
],
'profile_unittests.exe': [
# This leak occurs due to a leaky global variable in ScopedHandle.
('Error', 'Leak', 2304, '', '.*::ProfilerTest::UnloadDll'),
('Error', 'Leak', 2305, '', '.*::ProfilerTest::UnloadDll'),
# This leak occurs due to a leaky global lock in ScopedHandle.
('Error', 'Locks', 513, '', 'agent::profiler::.*::ProfilerTest::UnloadDll'),
# This leak occurs only in Debug, which leaks a thread local variable
# used to check thread restrictions.
('Error', 'TLS', 848, '', 'agent::profiler::.*::ProfilerTest::UnloadDll'),
],
'refinery_unittests.exe': [
# These are due to the syzyasan rtl (used in the instrumented test dll)
# which relies on the leaky PathService.
('Error', 'Leak', 2304, '',
'refinery::PdbCrawlerVTableTest_TestGetVFTableRVAs_Test'),
('Error', 'Locks', 513, '',
'refinery::PdbCrawlerVTableTest_TestGetVFTableRVAs_Test'),
('Error', 'TLS', 848, '',
'refinery::PdbCrawlerVTableTest_TestGetVFTableRVAs_Test'),
],
}
# A list of Application Verifier exceptions applicable to all tests.
_GLOBAL_EXCEPTIONS = [
# Symsrv related errors.
('Error', 'Leak', 2304, 'dbghelp', '^SymGetFileLineOffsets64$'),
('Error', 'Locks', 513, 'dbghelp', '^SymGetFileLineOffsets64$'),
('Error', 'Locks', 529, 'dbghelp', '^SymGetFileLineOffsets64$'),
]
# A list of unittests that should not be run under the application verifier at
# all.
_BLACK_LIST = [
# These can't be run under AppVerifier because we end up double hooking the
# operating system heap function, leading to nonsense.
'integration_tests.exe',
'integration_tests_4g.exe',
'syzyasan_rtl_unittests.exe',
# AppVerifier triggers memory hoggage in this test due to parsing complex
# symbols (numerous allocations) and a page heap being used. This leads to an
# OOM, and subsequent system misunderstanding and barfing of hex stack trace.
'refinery_stack_unittests.exe',
]
class Error(Exception):
"""Base class used for exceptions thrown in this module."""
pass
def Colorize(text):
"""Colorizes the given app verifier output with ANSI color codes."""
fore = colorama.Fore
style = colorama.Style
def _ColorizeLine(line):
line = re.sub('^(Error.*:)(.*)',
style.BRIGHT + fore.RED + '\\1' + fore.YELLOW + '\\2' +
style.RESET_ALL,
line)
line = re.sub('^(Warning:)(.*)',
style.BRIGHT + fore.YELLOW + '\\1' + style.RESET_ALL + '\\2',
line)
return line
return '\n'.join([_ColorizeLine(line) for line in text.split('\n')])
def FilterExceptions(image_name, errors):
"""Filter out the Application Verifier errors that have exceptions."""
exceptions = _EXCEPTIONS.get(image_name, [])
exceptions.extend(_GLOBAL_EXCEPTIONS)
def _HasNoException(error):
# Iterate over all the exceptions.
for (severity, layer, stopcode, module_regexp, symbol_regexp) in exceptions:
# And see if they match, first by type.
if (error.severity == severity and
error.layer == layer and
error.stopcode == stopcode):
# And then by regexpr match to the trace symbols.
for trace in error.trace:
module_matches = True
if module_regexp:
module_matches = (
trace.module and re.match(module_regexp, trace.module))
symbol_matches = True
if symbol_regexp:
symbol_matches = (
trace.symbol and re.match(symbol_regexp, trace.symbol))
if module_matches and symbol_matches:
return False
return True
filtered_errors = filter(_HasNoException, errors)
error_count = len(filtered_errors)
filtered_count = len(errors) - error_count
if error_count:
suffix = '' if error_count == 1 else 's'
filtered_errors.append(
'Error: Encountered %d AppVerifier exception%s for %s.' %
(error_count, suffix, image_name))
if filtered_count:
suffix1 = '' if filtered_count == 1 else 's'
suffix2 = '' if len(exceptions) == 1 else 's'
filtered_errors.append(
'Warning: Filtered %d AppVerifier exception%s for %s using %d rule%s.' %
(filtered_count, suffix1, image_name, len(exceptions), suffix2))
return (error_count, filtered_errors)
def _RunUnderAppVerifier(command):
runner = verifier.AppverifierTestRunner(False)
image_path = os.path.abspath(command[0])
image_name = os.path.basename(image_path)
disabled_checks = _DISABLED_CHECKS.get(image_name, [])
if not os.path.isfile(image_path):
raise Error('Path not found: %s' % image_path)
# Set up the verifier configuration.
runner.SetImageDefaults(image_name, disabled_checks=disabled_checks)
runner.ClearImageLogs(image_name)
# Run the executable. We disable exception catching as it interferes with
# Application Verifier.
command = [image_path] + command[1:] + ['--gtest_catch_exceptions=0']
_LOGGER.info('Running %s.', command)
popen = subprocess.Popen(command)
(dummy_stdout, dummy_stderr) = popen.communicate()
# Process the AppVerifier logs, filtering exceptions.
app_verifier_errors = runner.ProcessLogs(image_name)
(error_count, app_verifier_errors) = FilterExceptions(
image_name, app_verifier_errors)
# Generate warnings for error categories that were disabled.
for check in disabled_checks:
app_verifier_errors.append(
'Warning: Disabled AppVerifier %s checks.' % check)
# Output all warnings and errors.
for error in app_verifier_errors:
msg = Colorize(str(error) + '\n')
sys.stderr.write(msg)
# Clear the verifier settings for the image.
runner.ClearImageLogs(image_name)
runner.ResetImage(image_name)
if popen.returncode:
_LOGGER.error('%s failed with return code %d.', image_name,
popen.returncode)
if error_count:
suffix = '' if error_count == 1 else 's'
_LOGGER.error('%s failed AppVerifier test with %d exception%s.',
image_name, error_count, suffix)
if popen.returncode:
return popen.returncode
return error_count
def _RunNormally(command):
# We reset the image settings so that AppVerifier isn't left incidentally
# configured.
runner = verifier.AppverifierTestRunner(False)
image_path = os.path.abspath(command[0])
image_name = os.path.basename(image_path)
runner.ClearImageLogs(image_name)
runner.ResetImage(image_name)
image_path = os.path.abspath(command[0])
command = [image_path] + command[1:]
_LOGGER.info('Running %s outside of AppVerifier.' % command)
popen = subprocess.Popen(command)
(dummy_stdout, dummy_stderr) = popen.communicate()
# To be consistent with _RunUnderAppVerifier we output warnings at the end.
sys.stderr.write(Colorize(
'Warning: AppVerifier was disabled for this test.\n'))
return popen.returncode
_USAGE = '%prog [options] APPLICATION -- [application options]'
def _IsBlacklisted(command):
image_base = os.path.basename(command[0])
if image_base in _BLACK_LIST:
_LOGGER.info('Executable is blacklisted: %s.' % image_base)
return True
return False
def _ParseArgs():
parser = optparse.OptionParser(usage=_USAGE)
parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False,
help='Enable verbose logging.')
parser.add_option('--on-waterfall', dest='on_waterfall',
action='store_true', default=False,
help='Indicate that we are running on the waterfall.')
(opts, args) = parser.parse_args()
if not len(args):
parser.error('You must specify an application.')
if opts.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
return (opts, args)
def Main():
colorama.init()
(opts, args) = _ParseArgs()
if _IsBlacklisted(args):
return_code = _RunNormally(args)
else:
return_code = _RunUnderAppVerifier(args)
if return_code and opts.on_waterfall:
command = [args[0]] + ['--'] + args[1:]
command = 'python build\\app_verifier.py %s' % ' '.join(command)
sys.stderr.write('To reproduce this error locally run the following '
'command from the Syzygy root directory:\n')
sys.stderr.write(command + '\n')
sys.exit(return_code)
if __name__ == '__main__':
Main()
| 36.472892
| 80
| 0.680733
|
f67d19e97c87ad73766d0cdf150e1ce5f321b006
| 870
|
py
|
Python
|
triple_store_submitter/consts.py
|
ds-wizard/triple-store-submission-service
|
5c0403895bab0133745a75c39a069202fcb7b463
|
[
"Apache-2.0"
] | null | null | null |
triple_store_submitter/consts.py
|
ds-wizard/triple-store-submission-service
|
5c0403895bab0133745a75c39a069202fcb7b463
|
[
"Apache-2.0"
] | null | null | null |
triple_store_submitter/consts.py
|
ds-wizard/triple-store-submission-service
|
5c0403895bab0133745a75c39a069202fcb7b463
|
[
"Apache-2.0"
] | null | null | null |
PACKAGE_NAME = 'triple_store_submitter'
NICE_NAME = 'DSW Triple Store Submission Service'
PACKAGE_VERSION = '1.2.0'
ENV_CONFIG = 'SUBMISSION_CONFIG'
_DEFAULT_BUILT_AT = 'BUILT_AT'
BUILT_AT = '--BUILT_AT--'
_DEFAULT_VERSION = 'VERSION'
VERSION = '--VERSION--'
DEFAULT_ENCODING = 'utf-8'
COMMENT_INSTRUCTION_DELIMITER = ':'
COMMENT_PRE_QUERY_PREFIX = '#> pre-query:'
COMMENT_POST_QUERY_PREFIX = '#> post-query:'
class BuildInfo:
name = NICE_NAME
built_at = BUILT_AT if BUILT_AT != f'--{_DEFAULT_BUILT_AT}--' else 'unknown'
version = VERSION if VERSION != f'--{_DEFAULT_VERSION}--' else 'unknown'
package_version = PACKAGE_VERSION
@classmethod
def obj(cls):
return {
'name': cls.name,
'package_version': cls.package_version,
'version': cls.version,
'built_at': cls.built_at,
}
| 25.588235
| 80
| 0.667816
|
c64e6d680caad71ea6e4f17dc00339c229af2b2c
| 356
|
py
|
Python
|
script_runner/celery.py
|
cscanlin/munger-builder
|
adb95ae31b8fc15fd914fee469be94895b285a6d
|
[
"MIT"
] | 9
|
2015-11-06T10:37:37.000Z
|
2021-02-24T03:59:39.000Z
|
script_runner/celery.py
|
cscanlin/munger-builder
|
adb95ae31b8fc15fd914fee469be94895b285a6d
|
[
"MIT"
] | null | null | null |
script_runner/celery.py
|
cscanlin/munger-builder
|
adb95ae31b8fc15fd914fee469be94895b285a6d
|
[
"MIT"
] | 3
|
2015-12-21T17:40:44.000Z
|
2018-05-15T13:46:15.000Z
|
# from __future__ import absolute_import
#
# import os
#
# from celery import Celery
#
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'munger_builder.settings')
#
# from django.conf import settings
#
# app = Celery('munger_builder')
# app.config_from_object('django.conf:settings')
# app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| 25.428571
| 77
| 0.75
|
cea8dff4a0583445cedb532b43ecf25ba7ba0376
| 729
|
py
|
Python
|
src/idx_proba_dict.py
|
chimerast/scdv-docker
|
c833c02f5f63ca9cc3e1ccc45b75e597fc5503a8
|
[
"Apache-2.0"
] | null | null | null |
src/idx_proba_dict.py
|
chimerast/scdv-docker
|
c833c02f5f63ca9cc3e1ccc45b75e597fc5503a8
|
[
"Apache-2.0"
] | null | null | null |
src/idx_proba_dict.py
|
chimerast/scdv-docker
|
c833c02f5f63ca9cc3e1ccc45b75e597fc5503a8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- Coding: utf-8 -*-
import sys
import os
import signal
import pickle
import gensim
from sklearn.mixture import GaussianMixture
signal.signal(signal.SIGINT, signal.SIG_DFL)
num_clusters = int(sys.argv[1])
wikiFile = sys.argv[2]
baseFile = os.path.splitext(wikiFile)[0]
modelFile = baseFile + '.vec'
probaFile = baseFile + '.proba'
model = gensim.models.KeyedVectors.load_word2vec_format(modelFile, binary=False)
clf = GaussianMixture(n_components=num_clusters, covariance_type='tied', init_params='kmeans', max_iter=50)
clf.fit(model.vectors)
idx_proba = clf.predict_proba(model.vectors)
idx_proba_dict = dict(zip(model.index2word, idx_proba))
pickle.dump(idx_proba_dict, open(probaFile, 'wb'))
| 25.137931
| 107
| 0.772291
|
9ab0bcc417b16fe1d81d2f91b97c3f6c25fba5b7
| 23,158
|
py
|
Python
|
silx/gui/plot/StatsWidget.py
|
payno/silx
|
13301e61627f98fa837008250ac74a0627a7a560
|
[
"CC0-1.0",
"MIT"
] | 1
|
2016-10-26T11:05:46.000Z
|
2016-10-26T11:05:46.000Z
|
silx/gui/plot/StatsWidget.py
|
payno/silx
|
13301e61627f98fa837008250ac74a0627a7a560
|
[
"CC0-1.0",
"MIT"
] | 1
|
2018-05-03T12:39:27.000Z
|
2018-05-03T12:39:27.000Z
|
silx/gui/plot/StatsWidget.py
|
payno/silx
|
13301e61627f98fa837008250ac74a0627a7a560
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
Module containing widgets displaying stats from items of a plot.
"""
__authors__ = ["H. Payno"]
__license__ = "MIT"
__date__ = "24/07/2018"
import functools
import logging
import numpy
from collections import OrderedDict
import silx.utils.weakref
from silx.gui import qt
from silx.gui import icons
from silx.gui.plot.items.curve import Curve as CurveItem
from silx.gui.plot.items.histogram import Histogram as HistogramItem
from silx.gui.plot.items.image import ImageBase as ImageItem
from silx.gui.plot.items.scatter import Scatter as ScatterItem
from silx.gui.plot import stats as statsmdl
from silx.gui.widgets.TableWidget import TableWidget
from silx.gui.plot.stats.statshandler import StatsHandler, StatFormatter
logger = logging.getLogger(__name__)
class StatsWidget(qt.QWidget):
"""
Widget displaying a set of :class:`Stat` to be displayed on a
:class:`StatsTable` and to be apply on items contained in the :class:`Plot`
Also contains options to:
* compute statistics on all the data or on visible data only
* show statistics of all items or only the active one
:param parent: Qt parent
:param plot: the plot containing items on which we want statistics.
"""
sigVisibilityChanged = qt.Signal(bool)
NUMBER_FORMAT = '{0:.3f}'
class OptionsWidget(qt.QToolBar):
def __init__(self, parent=None):
qt.QToolBar.__init__(self, parent)
self.setIconSize(qt.QSize(16, 16))
action = qt.QAction(self)
action.setIcon(icons.getQIcon("stats-active-items"))
action.setText("Active items only")
action.setToolTip("Display stats for active items only.")
action.setCheckable(True)
action.setChecked(True)
self.__displayActiveItems = action
action = qt.QAction(self)
action.setIcon(icons.getQIcon("stats-whole-items"))
action.setText("All items")
action.setToolTip("Display stats for all available items.")
action.setCheckable(True)
self.__displayWholeItems = action
action = qt.QAction(self)
action.setIcon(icons.getQIcon("stats-visible-data"))
action.setText("Use the visible data range")
action.setToolTip("Use the visible data range.<br/>"
"If activated the data is filtered to only use"
"visible data of the plot."
"The filtering is a data sub-sampling."
"No interpolation is made to fit data to"
"boundaries.")
action.setCheckable(True)
self.__useVisibleData = action
action = qt.QAction(self)
action.setIcon(icons.getQIcon("stats-whole-data"))
action.setText("Use the full data range")
action.setToolTip("Use the full data range.")
action.setCheckable(True)
action.setChecked(True)
self.__useWholeData = action
self.addAction(self.__displayWholeItems)
self.addAction(self.__displayActiveItems)
self.addSeparator()
self.addAction(self.__useVisibleData)
self.addAction(self.__useWholeData)
self.itemSelection = qt.QActionGroup(self)
self.itemSelection.setExclusive(True)
self.itemSelection.addAction(self.__displayActiveItems)
self.itemSelection.addAction(self.__displayWholeItems)
self.dataRangeSelection = qt.QActionGroup(self)
self.dataRangeSelection.setExclusive(True)
self.dataRangeSelection.addAction(self.__useWholeData)
self.dataRangeSelection.addAction(self.__useVisibleData)
def isActiveItemMode(self):
return self.itemSelection.checkedAction() is self.__displayActiveItems
def isVisibleDataRangeMode(self):
return self.dataRangeSelection.checkedAction() is self.__useVisibleData
def __init__(self, parent=None, plot=None, stats=None):
qt.QWidget.__init__(self, parent)
self.setLayout(qt.QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self._options = self.OptionsWidget(parent=self)
self.layout().addWidget(self._options)
self._statsTable = StatsTable(parent=self, plot=plot)
self.setStats = self._statsTable.setStats
self.setStats(stats)
self.layout().addWidget(self._statsTable)
self.setPlot = self._statsTable.setPlot
self._options.itemSelection.triggered.connect(
self._optSelectionChanged)
self._options.dataRangeSelection.triggered.connect(
self._optDataRangeChanged)
self._optSelectionChanged()
self._optDataRangeChanged()
self.setDisplayOnlyActiveItem = self._statsTable.setDisplayOnlyActiveItem
self.setStatsOnVisibleData = self._statsTable.setStatsOnVisibleData
def showEvent(self, event):
self.sigVisibilityChanged.emit(True)
qt.QWidget.showEvent(self, event)
def hideEvent(self, event):
self.sigVisibilityChanged.emit(False)
qt.QWidget.hideEvent(self, event)
def _optSelectionChanged(self, action=None):
self._statsTable.setDisplayOnlyActiveItem(self._options.isActiveItemMode())
def _optDataRangeChanged(self, action=None):
self._statsTable.setStatsOnVisibleData(self._options.isVisibleDataRangeMode())
class BasicStatsWidget(StatsWidget):
"""
Widget defining a simple set of :class:`Stat` to be displayed on a
:class:`StatsWidget`.
:param parent: Qt parent
:param plot: the plot containing items on which we want statistics.
"""
STATS = StatsHandler((
(statsmdl.StatMin(), StatFormatter()),
statsmdl.StatCoordMin(),
(statsmdl.StatMax(), StatFormatter()),
statsmdl.StatCoordMax(),
(('std', numpy.std), StatFormatter()),
(('mean', numpy.mean), StatFormatter()),
statsmdl.StatCOM()
))
def __init__(self, parent=None, plot=None):
StatsWidget.__init__(self, parent=parent, plot=plot, stats=self.STATS)
class StatsTable(TableWidget):
"""
TableWidget displaying for each curves contained by the Plot some
information:
* legend
* minimal value
* maximal value
* standard deviation (std)
:param parent: The widget's parent.
:param plot: :class:`.PlotWidget` instance on which to operate
"""
COMPATIBLE_KINDS = {
'curve': CurveItem,
'image': ImageItem,
'scatter': ScatterItem,
'histogram': HistogramItem
}
COMPATIBLE_ITEMS = tuple(COMPATIBLE_KINDS.values())
def __init__(self, parent=None, plot=None):
TableWidget.__init__(self, parent)
"""Next freeID for the curve"""
self.plot = None
self._displayOnlyActItem = False
self._statsOnVisibleData = False
self._lgdAndKindToItems = {}
"""Associate to a tuple(legend, kind) the items legend"""
self.callbackImage = None
self.callbackScatter = None
self.callbackCurve = None
"""Associate the curve legend to his first item"""
self._statsHandler = None
self._legendsSet = []
"""list of legends actually displayed"""
self._resetColumns()
self.setColumnCount(len(self._columns))
self.setSelectionBehavior(qt.QAbstractItemView.SelectRows)
self.setPlot(plot)
self.setSortingEnabled(True)
def _resetColumns(self):
self._columns_index = OrderedDict([('legend', 0), ('kind', 1)])
self._columns = self._columns_index.keys()
self.setColumnCount(len(self._columns))
def setStats(self, statsHandler):
"""
:param statsHandler: Set the statistics to be displayed and how to
format them using
:rtype: :class:`StatsHandler`
"""
_statsHandler = statsHandler
if statsHandler is None:
_statsHandler = StatsHandler(statFormatters=())
if isinstance(_statsHandler, (list, tuple)):
_statsHandler = StatsHandler(_statsHandler)
assert isinstance(_statsHandler, StatsHandler)
self._resetColumns()
self.clear()
for statName, stat in list(_statsHandler.stats.items()):
assert isinstance(stat, statsmdl.StatBase)
self._columns_index[statName] = len(self._columns_index)
self._statsHandler = _statsHandler
self._columns = self._columns_index.keys()
self.setColumnCount(len(self._columns))
self._updateItemObserve()
self._updateAllStats()
def getStatsHandler(self):
return self._statsHandler
def _updateAllStats(self):
for (legend, kind) in self._lgdAndKindToItems:
self._updateStats(legend, kind)
@staticmethod
def _getKind(myItem):
if isinstance(myItem, CurveItem):
return 'curve'
elif isinstance(myItem, ImageItem):
return 'image'
elif isinstance(myItem, ScatterItem):
return 'scatter'
elif isinstance(myItem, HistogramItem):
return 'histogram'
else:
return None
def setPlot(self, plot):
"""
Define the plot to interact with
:param plot: the plot containing the items on which statistics are
applied
:rtype: :class:`.PlotWidget`
"""
if self.plot:
self._dealWithPlotConnection(create=False)
self.plot = plot
self.clear()
if self.plot:
self._dealWithPlotConnection(create=True)
self._updateItemObserve()
def _updateItemObserve(self):
if self.plot:
self.clear()
if self._displayOnlyActItem is True:
activeCurve = self.plot.getActiveCurve(just_legend=False)
activeScatter = self.plot._getActiveItem(kind='scatter',
just_legend=False)
activeImage = self.plot.getActiveImage(just_legend=False)
if activeCurve:
self._addItem(activeCurve)
if activeImage:
self._addItem(activeImage)
if activeScatter:
self._addItem(activeScatter)
else:
[self._addItem(curve) for curve in self.plot.getAllCurves()]
[self._addItem(image) for image in self.plot.getAllImages()]
scatters = self.plot._getItems(kind='scatter',
just_legend=False,
withhidden=True)
[self._addItem(scatter) for scatter in scatters]
histograms = self.plot._getItems(kind='histogram',
just_legend=False,
withhidden=True)
[self._addItem(histogram) for histogram in histograms]
def _dealWithPlotConnection(self, create=True):
"""
Manage connection to plot signals
Note: connection on Item are managed by the _removeItem function
"""
if self.plot is None:
return
if self._displayOnlyActItem:
if create is True:
if self.callbackImage is None:
self.callbackImage = functools.partial(self._activeItemChanged, 'image')
self.callbackScatter = functools.partial(self._activeItemChanged, 'scatter')
self.callbackCurve = functools.partial(self._activeItemChanged, 'curve')
self.plot.sigActiveImageChanged.connect(self.callbackImage)
self.plot.sigActiveScatterChanged.connect(self.callbackScatter)
self.plot.sigActiveCurveChanged.connect(self.callbackCurve)
else:
if self.callbackImage is not None:
self.plot.sigActiveImageChanged.disconnect(self.callbackImage)
self.plot.sigActiveScatterChanged.disconnect(self.callbackScatter)
self.plot.sigActiveCurveChanged.disconnect(self.callbackCurve)
self.callbackImage = None
self.callbackScatter = None
self.callbackCurve = None
else:
if create is True:
self.plot.sigContentChanged.connect(self._plotContentChanged)
else:
self.plot.sigContentChanged.disconnect(self._plotContentChanged)
if create is True:
self.plot.sigPlotSignal.connect(self._zoomPlotChanged)
else:
self.plot.sigPlotSignal.disconnect(self._zoomPlotChanged)
def clear(self):
"""
Clear all existing items
"""
lgdsAndKinds = list(self._lgdAndKindToItems.keys())
for lgdAndKind in lgdsAndKinds:
self._removeItem(legend=lgdAndKind[0], kind=lgdAndKind[1])
self._lgdAndKindToItems = {}
qt.QTableWidget.clear(self)
self.setRowCount(0)
# It have to called befor3e accessing to the header items
self.setHorizontalHeaderLabels(list(self._columns))
if self._statsHandler is not None:
for columnId, name in enumerate(self._columns):
item = self.horizontalHeaderItem(columnId)
if name in self._statsHandler.stats:
stat = self._statsHandler.stats[name]
text = stat.name[0].upper() + stat.name[1:]
if stat.description is not None:
tooltip = stat.description
else:
tooltip = ""
else:
text = name[0].upper() + name[1:]
tooltip = ""
item.setToolTip(tooltip)
item.setText(text)
if hasattr(self.horizontalHeader(), 'setSectionResizeMode'): # Qt5
self.horizontalHeader().setSectionResizeMode(qt.QHeaderView.ResizeToContents)
else: # Qt4
self.horizontalHeader().setResizeMode(qt.QHeaderView.ResizeToContents)
self.setColumnHidden(self._columns_index['kind'], True)
def _addItem(self, item):
assert isinstance(item, self.COMPATIBLE_ITEMS)
if (item.getLegend(), self._getKind(item)) in self._lgdAndKindToItems:
self._updateStats(item.getLegend(), self._getKind(item))
return
self.setRowCount(self.rowCount() + 1)
indexTable = self.rowCount() - 1
kind = self._getKind(item)
self._lgdAndKindToItems[(item.getLegend(), kind)] = {}
# the get item will manage the item creation of not existing
_createItem = self._getItem
for itemName in self._columns:
_createItem(name=itemName, legend=item.getLegend(), kind=kind,
indexTable=indexTable)
self._updateStats(legend=item.getLegend(), kind=kind)
callback = functools.partial(
silx.utils.weakref.WeakMethodProxy(self._updateStats),
item.getLegend(), kind)
item.sigItemChanged.connect(callback)
self.setColumnHidden(self._columns_index['kind'],
item.getLegend() not in self._legendsSet)
self._legendsSet.append(item.getLegend())
def _getItem(self, name, legend, kind, indexTable):
if (legend, kind) not in self._lgdAndKindToItems:
self._lgdAndKindToItems[(legend, kind)] = {}
if not (name in self._lgdAndKindToItems[(legend, kind)] and
self._lgdAndKindToItems[(legend, kind)]):
if name in ('legend', 'kind'):
_item = qt.QTableWidgetItem(type=qt.QTableWidgetItem.Type)
if name == 'legend':
_item.setText(legend)
else:
assert name == 'kind'
_item.setText(kind)
else:
if self._statsHandler.formatters[name]:
_item = self._statsHandler.formatters[name].tabWidgetItemClass()
else:
_item = qt.QTableWidgetItem()
tooltip = self._statsHandler.stats[name].getToolTip(kind=kind)
if tooltip is not None:
_item.setToolTip(tooltip)
_item.setFlags(qt.Qt.ItemIsEnabled | qt.Qt.ItemIsSelectable)
self.setItem(indexTable, self._columns_index[name], _item)
self._lgdAndKindToItems[(legend, kind)][name] = _item
return self._lgdAndKindToItems[(legend, kind)][name]
def _removeItem(self, legend, kind):
if (legend, kind) not in self._lgdAndKindToItems or not self.plot:
return
self.firstItem = self._lgdAndKindToItems[(legend, kind)]['legend']
del self._lgdAndKindToItems[(legend, kind)]
self.removeRow(self.firstItem.row())
self._legendsSet.remove(legend)
self.setColumnHidden(self._columns_index['kind'],
legend not in self._legendsSet)
def _updateCurrentStats(self):
for lgdAndKind in self._lgdAndKindToItems:
self._updateStats(lgdAndKind[0], lgdAndKind[1])
def _updateStats(self, legend, kind, event=None):
if self._statsHandler is None:
return
assert kind in ('curve', 'image', 'scatter', 'histogram')
if kind == 'curve':
item = self.plot.getCurve(legend)
elif kind == 'image':
item = self.plot.getImage(legend)
elif kind == 'scatter':
item = self.plot.getScatter(legend)
elif kind == 'histogram':
item = self.plot.getHistogram(legend)
else:
raise ValueError('kind not managed')
if not item or (item.getLegend(), kind) not in self._lgdAndKindToItems:
return
assert isinstance(item, self.COMPATIBLE_ITEMS)
statsValDict = self._statsHandler.calculate(item, self.plot,
self._statsOnVisibleData)
lgdItem = self._lgdAndKindToItems[(item.getLegend(), kind)]['legend']
assert lgdItem
rowStat = lgdItem.row()
for statName, statVal in list(statsValDict.items()):
assert statName in self._lgdAndKindToItems[(item.getLegend(), kind)]
tableItem = self._getItem(name=statName, legend=item.getLegend(),
kind=kind, indexTable=rowStat)
tableItem.setText(str(statVal))
def currentChanged(self, current, previous):
if current.row() >= 0:
legendItem = self.item(current.row(), self._columns_index['legend'])
assert legendItem
kindItem = self.item(current.row(), self._columns_index['kind'])
kind = kindItem.text()
if kind == 'curve':
self.plot.setActiveCurve(legendItem.text())
elif kind == 'image':
self.plot.setActiveImage(legendItem.text())
elif kind == 'scatter':
self.plot._setActiveItem('scatter', legendItem.text())
elif kind == 'histogram':
# active histogram not managed by the plot actually
pass
else:
raise ValueError('kind not managed')
qt.QTableWidget.currentChanged(self, current, previous)
def setDisplayOnlyActiveItem(self, displayOnlyActItem):
"""
:param bool displayOnlyActItem: True if we want to only show active
item
"""
if self._displayOnlyActItem == displayOnlyActItem:
return
self._displayOnlyActItem = displayOnlyActItem
self._dealWithPlotConnection(create=False)
self._updateItemObserve()
self._dealWithPlotConnection(create=True)
def setStatsOnVisibleData(self, b):
"""
.. warning:: When visible data is activated we will process to a simple
filtering of visible data by the user. The filtering is a
simple data sub-sampling. No interpolation is made to fit
data to boundaries.
:param bool b: True if we want to apply statistics only on visible data
"""
if self._statsOnVisibleData != b:
self._statsOnVisibleData = b
self._updateCurrentStats()
def _activeItemChanged(self, kind):
"""Callback used when plotting only the active item"""
assert kind in ('curve', 'image', 'scatter', 'histogram')
self._updateItemObserve()
def _plotContentChanged(self, action, kind, legend):
"""Callback used when plotting all the plot items"""
if kind not in ('curve', 'image', 'scatter', 'histogram'):
return
if kind == 'curve':
item = self.plot.getCurve(legend)
elif kind == 'image':
item = self.plot.getImage(legend)
elif kind == 'scatter':
item = self.plot.getScatter(legend)
elif kind == 'histogram':
item = self.plot.getHistogram(legend)
else:
raise ValueError('kind not managed')
if action == 'add':
if item is None:
raise ValueError('Item from legend "%s" do not exists' % legend)
self._addItem(item)
elif action == 'remove':
self._removeItem(legend, kind)
def _zoomPlotChanged(self, event):
if self._statsOnVisibleData is True:
if 'event' in event and event['event'] == 'limitsChanged':
self._updateCurrentStats()
| 39.722127
| 96
| 0.613265
|
4657aa83299a440d2ea835a0bbc743a427e487b6
| 3,045
|
py
|
Python
|
magenta/models/onsets_frames_transcription/configs.py
|
treeson-li/magenta
|
d5b5dcf3acfa0a9175f555df3b0658a0241a8e9c
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/onsets_frames_transcription/configs.py
|
treeson-li/magenta
|
d5b5dcf3acfa0a9175f555df3b0658a0241a8e9c
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/onsets_frames_transcription/configs.py
|
treeson-li/magenta
|
d5b5dcf3acfa0a9175f555df3b0658a0241a8e9c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configurations for transcription models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from magenta.common import tf_utils
from magenta.models.onsets_frames_transcription import audio_transform
import model
import tensorflow as tf
Config = collections.namedtuple('Config', ('model_fn', 'hparams'))
DEFAULT_HPARAMS = tf_utils.merge_hparams(
audio_transform.DEFAULT_AUDIO_TRANSFORM_HPARAMS,
tf.contrib.training.HParams(
eval_batch_size=1,
predict_batch_size=1,
sample_rate=16000,
spec_type='mel',
spec_mel_htk=True,
spec_log_amplitude=True,
spec_hop_length=512,
spec_n_bins=229,
spec_fmin=30.0, # A0
cqt_bins_per_octave=36,
truncated_length_secs=0.0,
max_expected_train_example_len=0,
semisupervised_concat=False,
onset_length=32,
offset_length=32,
onset_mode='length_ms',
onset_delay=0,
min_frame_occupancy_for_label=0.0,
jitter_amount_ms=0,
min_duration_ms=0,
backward_shift_amount_ms=0))
CONFIG_MAP = {}
CONFIG_MAP['onsets_frames'] = Config(
model_fn=model.model_fn,
hparams=tf_utils.merge_hparams(DEFAULT_HPARAMS,
model.get_default_hparams()),
)
DatasetConfig = collections.namedtuple(
'DatasetConfig', ('name', 'path', 'process_for_training'))
DATASET_CONFIG_MAP = {}
DATASET_CONFIG_MAP['maestro'] = [
DatasetConfig(
'train',
'gs://magentadata/datasets/maestro/v1.0.0/'
'maestro-v1.0.0_ns_wav_train.tfrecord@10',
process_for_training=True),
DatasetConfig(
'eval_train',
'gs://magentadata/datasets/maestro/v1.0.0/'
'maestro-v1.0.0_ns_wav_train.tfrecord@10',
process_for_training=False),
DatasetConfig(
'test',
'gs://magentadata/datasets/maestro/v1.0.0/'
'maestro-v1.0.0_ns_wav_test.tfrecord@10',
process_for_training=False),
DatasetConfig(
'validation',
'gs://magentadata/datasets/maestro/v1.0.0/'
'maestro-v1.0.0_ns_wav_validation.tfrecord@10',
process_for_training=False),
]
SemisupervisedExamplesConfig = collections.namedtuple(
'SemisupervisedExamplesConfig', ('examples_path',
'batch_ratio',
'label_ratio'))
| 32.052632
| 74
| 0.682759
|
ce0718eb5943e5669e812f44988d7dffd4898237
| 1,250
|
py
|
Python
|
data_structures/hashing/double_hash.py
|
Morre525/Python
|
aaf8fe67d3a792ca4bf0ab89dae276d95e61b109
|
[
"MIT"
] | null | null | null |
data_structures/hashing/double_hash.py
|
Morre525/Python
|
aaf8fe67d3a792ca4bf0ab89dae276d95e61b109
|
[
"MIT"
] | null | null | null |
data_structures/hashing/double_hash.py
|
Morre525/Python
|
aaf8fe67d3a792ca4bf0ab89dae276d95e61b109
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from hash_table import HashTable
from number_theory.prime_numbers import next_prime, check_prime
class DoubleHash(HashTable):
"""
Hash Table example with open addressing and Double Hash
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __hash_function_2(self, value, data):
next_prime_gt = (
next_prime(value % self.size_table)
if not check_prime(value % self.size_table)
else value % self.size_table
) # gt = bigger than
return next_prime_gt - (data % next_prime_gt)
def __hash_double_function(self, key, data, increment):
return (increment * self.__hash_function_2(key, data)) % self.size_table
def _colision_resolution(self, key, data=None):
i = 1
new_key = self.hash_function(data)
while self.values[new_key] is not None and self.values[new_key] != key:
new_key = (
self.__hash_double_function(key, data, i)
if self.balanced_factor() >= self.lim_charge
else None
)
if new_key is None:
break
else:
i += 1
return new_key
| 29.069767
| 80
| 0.5992
|
7aed42be873f673922f7a287421c064ebac114f7
| 3,150
|
py
|
Python
|
COGCCpy/production.py
|
dianaceroallard/COGCCpy
|
cb6fe01dede7b5e861a8dbe4387597576e02e665
|
[
"MIT"
] | 1
|
2021-02-02T21:49:05.000Z
|
2021-02-02T21:49:05.000Z
|
COGCCpy/production.py
|
dianaceroallard/COGCCpy
|
cb6fe01dede7b5e861a8dbe4387597576e02e665
|
[
"MIT"
] | null | null | null |
COGCCpy/production.py
|
dianaceroallard/COGCCpy
|
cb6fe01dede7b5e861a8dbe4387597576e02e665
|
[
"MIT"
] | null | null | null |
import pandas as pd
import requests
import time
class production:
'''
Access production from COGCC.
'''
def __init__(self, apis):
self.apis = [x.replace('-', '') for x in apis]
# Check for only APIs only in Colorado
state_codes = [x[:2] for x in self.apis if x[:2] != '05']
if len(state_codes) > 0:
raise ValueError('State code found outside of Colorado:', state_codes)
self.df = pd.DataFrame()
self.pull_iterator()
def pull_iterator(self):
'''
Iterates through APIs, seperates components, and sends them to be pulled.
'''
total = len(self.apis)
i = 0
for api in self.apis:
i += 1
prec = str(int(100 * i / total)) + '% complete '
print(api, prec, end='\r')
# Testing so far shows that the "APIWB" and "Year" do not limit results.
api_wb = 'All'
year = 'All'
# County code
api_co = api[2:5]
# Well code
api_well = api[5:10]
self.pull_prod(api_co, api_well, api_wb, year)
time.sleep(5)
def pull_prod(self, api_co, api_well, api_wb, year):
'''
Pulls production data from COGCC.
'''
url = 'https://cogcc.state.co.us/production/?&apiCounty=' + api_co + '&apiSequence=' + api_well + '&APIWB=' + api_wb + '&Year=' + year
r = requests.get(url)
if r.status_code == 200:
if 'No Records found' not in r.text:
df = pd.read_html(r.text)[1]
##Format Columns
cols = ['Days Produced', 'BOM Inventory', 'Oil Produced', 'Oil Sold', 'Oil Adjustment', 'EOM Inventory',
'Oil Gravity', 'Gas Produced', 'Gas Flared', 'Gas Sold', 'Gas Used', 'Water Volume']
for col in cols:
df[col].fillna(0, inplace=True)
df[col] = df[col].astype(int)
df['First of Month'] = pd.to_datetime(df['First of Month'])
# Format API Codes
df['API County'] = df['API County'].astype(str)
df['API County'] = df['API County'].apply(lambda x: '{0:0>3}'.format(x))
df['API Sequence'] = df['API Sequence'].astype(str)
df['API Sequence'] = df['API Sequence'].apply(lambda x: '{0:0>5}'.format(x))
df['API Sidetrack'] = df['API Sidetrack'].astype(str)
df['API Sidetrack'] = df['API Sidetrack'].apply(lambda x: '{0:0>2}'.format(x))
# Set API_Label Column
df['API_Label'] = '05-' + df['API County'] + '-' + df['API Sequence'] + '-' + df['API Sidetrack']
# Reorder with Flexibility
cols = list(df)
cols.remove('API_Label')
cols = ['API_Label'] + cols
df = df[cols]
self.df = pd.concat([self.df, df], ignore_index=True)
else:
print('Bad Response:', r.status_code)
| 36.206897
| 143
| 0.492698
|
34e405c27bda1d09dd2149ceebfd206e58e65d96
| 2,703
|
py
|
Python
|
mycroft/util/lang/format_pt.py
|
sowmyavasudeva/SmartBookmark
|
797a90cfea624d2ab977e5aa78614c0db1177a23
|
[
"Apache-2.0"
] | 2
|
2018-12-16T15:55:04.000Z
|
2018-12-29T19:52:38.000Z
|
mycroft/util/lang/format_pt.py
|
sowmyavasudeva/SmartBookmark
|
797a90cfea624d2ab977e5aa78614c0db1177a23
|
[
"Apache-2.0"
] | 3
|
2021-06-08T21:06:32.000Z
|
2022-01-13T02:22:38.000Z
|
mycroft/util/lang/format_pt.py
|
sowmyavasudeva/SmartBookmark
|
797a90cfea624d2ab977e5aa78614c0db1177a23
|
[
"Apache-2.0"
] | 1
|
2020-12-06T18:38:41.000Z
|
2020-12-06T18:38:41.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mycroft.util.lang.format_common import convert_to_mixed_fraction
FRACTION_STRING_PT = {
2: 'meio',
3: u'terço',
4: 'quarto',
5: 'quinto',
6: 'sexto',
7: u'sétimo',
8: 'oitavo',
9: 'nono',
10: u'décimo',
11: 'onze avos',
12: 'doze avos',
13: 'treze avos',
14: 'catorze avos',
15: 'quinze avos',
16: 'dezasseis avos',
17: 'dezassete avos',
18: 'dezoito avos',
19: 'dezanove avos',
20: u'vigésimo',
30: u'trigésimo',
100: u'centésimo',
1000: u'milésimo'
}
def nice_number_pt(number, speech, denominators):
""" Portuguese helper for nice_number
This function formats a float to human understandable functions. Like
4.5 becomes "4 e meio" for speech and "4 1/2" for text
Args:
number (int or float): the float to format
speech (bool): format for speech (True) or display (False)
denominators (iter of ints): denominators to use, default [1 .. 20]
Returns:
(str): The formatted string.
"""
result = convert_to_mixed_fraction(number, denominators)
if not result:
# Give up, just represent as a 3 decimal number
return str(round(number, 3))
whole, num, den = result
if not speech:
if num == 0:
# TODO: Number grouping? E.g. "1,000,000"
return str(whole)
else:
return '{} {}/{}'.format(whole, num, den)
if num == 0:
return str(whole)
# denominador
den_str = FRACTION_STRING_PT[den]
# fracções
if whole == 0:
if num == 1:
# um décimo
return_string = 'um {}'.format(den_str)
else:
# três meio
return_string = '{} {}'.format(num, den_str)
# inteiros >10
elif num == 1:
# trinta e um
return_string = '{} e {}'.format(whole, den_str)
# inteiros >10 com fracções
else:
# vinte e 3 décimo
return_string = '{} e {} {}'.format(whole, num, den_str)
# plural
if num > 1:
return_string += 's'
return return_string
| 27.581633
| 75
| 0.603774
|
4d4bcea763da5198e10d61555adf015541914f45
| 5,360
|
py
|
Python
|
badtray.py
|
ataradov/badtray
|
63cea025c3553d7f36e8d7ab5a2fdad689d25340
|
[
"BSD-3-Clause"
] | 1
|
2021-11-20T16:55:18.000Z
|
2021-11-20T16:55:18.000Z
|
badtray.py
|
ataradov/badtray
|
63cea025c3553d7f36e8d7ab5a2fdad689d25340
|
[
"BSD-3-Clause"
] | null | null | null |
badtray.py
|
ataradov/badtray
|
63cea025c3553d7f36e8d7ab5a2fdad689d25340
|
[
"BSD-3-Clause"
] | 1
|
2021-11-22T17:00:25.000Z
|
2021-11-22T17:00:25.000Z
|
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021, Alex Taradov <alex@taradov.com>. All rights reserved.
import html
import os
import time
import http.server
import socketserver
import configparser
# curl -T test.bin -utest:12345 http://127.0.0.1:12345/test/1/test-linux.bin
#------------------------------------------------------------------------------
config = configparser.ConfigParser()
config.read('config.ini')
PORT = eval(config['main']['port'])
AUTH = config['main']['auth']
PATH = config['main']['path']
VPATH = config['main']['vpath']
MAX_SIZE = eval(config['main']['max_size'])
#------------------------------------------------------------------------------
STYLE = [
'<style>',
'* { font-family: Arial, Helvetica, sans-serif; background: #fff; }',
'table { border-spacing: 0px; border-style: none; border-color: #000000; border-collapse: collapse; }',
'th { border-width: 1px; border-style: solid; padding: 3pt 1em 3pt 1em; border-color: #000; background: #f0f0f0; }',
'td { border-width: 1px; border-style: solid; padding: 3pt 1em 3pt 1em; border-color: #000; background: #ffffff; }',
'</style>',
]
#------------------------------------------------------------------------------
def build_file_index(name):
path = os.path.join(PATH, name)
dir_list = os.listdir(path)
if 'index.html' in dir_list:
dir_list.remove('index.html')
dir_list.sort(key=int, reverse=True)
text = [
'<!doctype html>',
'<html lang=en>',
'<head>',
'<meta charset=utf-8>',
'<title>Binaries for %s</title>' % html.escape(name),
'\n'.join(STYLE),
'</head>',
'<body>',
'<table>'
'<tr><th>Index</th><th>Created</th><th>Files</th>',
]
for d in dir_list:
dir_path = os.path.join(path, d)
mtime = os.path.getmtime(dir_path)
last_mod = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(mtime))
files = os.listdir(dir_path)
files.sort()
files_str = ''.join(['<a href="%s">[%s]</a> ' % (html.escape(os.path.join(VPATH, name, d, f)), html.escape(f)) for f in files])
text += ['<tr><td>%s</td><td>%s</td><td>%s</td></tr>' % (html.escape(d), html.escape(last_mod), files_str)]
text += [
'</table>'
'</body>',
'</html>',
]
try:
open(os.path.join(path, 'index.html'), 'w').write('\n'.join(text))
except:
return False
return True
#------------------------------------------------------------------------------
def build_index():
dir_list = os.listdir(PATH)
if 'index.html' in dir_list:
dir_list.remove('index.html')
dir_list.sort()
text = [
'<!doctype html>',
'<html lang=en>',
'<head>',
'<meta charset=utf-8>',
'<title>Binaries</title>',
'\n'.join(STYLE),
'</head>',
'<body>',
'<p>Available binaries:</p>',
'<ul>'
]
for d in dir_list:
text += ['<li><a href="%s">%s</a></li>' % (html.escape(os.path.join(VPATH, d)), html.escape(d))]
text += [
'</ul>',
'</body>',
'</html>',
]
try:
open(os.path.join(PATH, 'index.html'), 'w').write('\n'.join(text))
except:
return False
for d in dir_list:
if not build_file_index(d):
return False
return True
#------------------------------------------------------------------------------
def name_valid(name):
if name in ['.', '..']:
return False
for c in name:
if not c.isalnum() and c not in ['-', '_', '.']:
return False
return True
#------------------------------------------------------------------------------
class CustomHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def send_reply(self, messsage):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(('%s\n' % messsage).encode())
def do_PUT(self):
print('Request headers:')
print(self.headers)
if self.headers['Expect'] == '100-continue':
self.send_response(100)
self.end_headers()
if self.headers['Authorization'] != AUTH:
self.send_reply('Not authorized')
return
content_length = int(self.headers['Content-Length'], 0)
if content_length == 0 or content_length > MAX_SIZE:
self.send_reply('Invalid content length')
return
path = self.translate_path(self.path)
if path == None:
self.send_reply('Invalid path')
return
print('Saving file %s, %d bytes' % (path, content_length))
try:
os.makedirs(os.path.dirname(path))
except FileExistsError:
pass
try:
open(path, 'wb').write(self.rfile.read(content_length))
except:
self.send_reply('File write failed')
return
if not build_index():
self.send_reply('Index build failed')
return
self.send_reply('OK')
def translate_path(self, path):
parts = path.strip('/').split('/')
if len(parts) != 3:
return None
for part in parts:
if not name_valid(part):
return None
if not parts[1].isdigit():
return None
return os.path.join(PATH, parts[0], parts[1], parts[2])
#------------------------------------------------------------------------------
socketserver.TCPServer.allow_reuse_address = True
print('Serving on port %d' % PORT)
httpd = socketserver.TCPServer(('', PORT), CustomHTTPRequestHandler)
httpd.serve_forever()
| 25.402844
| 136
| 0.546828
|
e0a5082a893ce64e0548b8f5492b2373944929df
| 23,888
|
py
|
Python
|
python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py
|
liym27/Paddle
|
50582071dce846a973a054c40fe194069657960a
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py
|
liym27/Paddle
|
50582071dce846a973a054c40fe194069657960a
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py
|
liym27/Paddle
|
50582071dce846a973a054c40fe194069657960a
|
[
"Apache-2.0"
] | 1
|
2019-04-23T12:36:53.000Z
|
2019-04-23T12:36:53.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import os
import sys
from optimizer_factory import *
from google.protobuf import text_format
import paddle.fluid as fluid
from paddle.fluid.framework import Program
from paddle.fluid.incubate.fleet.base.fleet_base import Fleet
from paddle.fluid.incubate.fleet.base.fleet_base import Mode
from paddle.fluid.incubate.fleet.base.fleet_base import DistributedOptimizer
from paddle.fluid.incubate.fleet.base.role_maker import MPISymetricRoleMaker
class PSLib(Fleet):
def __init__(self):
super(PSLib, self).__init__(Mode.PSLIB)
self._opt_info = None
self._local_ip = 0
self._fleet_ptr = None
self._main_programs = []
self._scopes = []
def init(self, role_maker=None):
super(PSLib, self).init(MPISymetricRoleMaker())
self._fleet_ptr = fluid.core.Fleet()
def init_worker(self):
"""
init_worker(): will be called by user. When a user knows current process is_server(), he/she
should call init_worker() to initialize global information about worker and connect
worker with pserver. You should run startup program before init_worker.
Args:
executor(Executor): The executor to run for init server.
programs(Program|None): The program that need to run.
"""
if len(self._main_programs) == 0:
raise ValueError(
"You should run DistributedOptimizer.minimize() first")
if self._opt_info:
if "fleet_desc" in self._opt_info:
self._dist_desc_str = text_format.MessageToString(
self._opt_info["fleet_desc"])
self._dist_desc = self._opt_info["fleet_desc"]
else:
raise Exception(
"You should run DistributedOptimizer.minimize() first")
# barrier_all for init_server, wait for server starts
self._role_maker._barrier_all()
self.all_ips_ = self._role_maker._all_gather(self._local_ip)
self._fleet_ptr.init_worker(self._dist_desc_str, self.all_ips_,
self._role_maker._get_size(),
self._role_maker._get_rank())
# barrier_all for init_worker
self._role_maker._barrier_all()
# prepare for client to client communication
info = self._fleet_ptr.get_clients_info()
all_info = self._role_maker._worker_gather(info[0])
self._fleet_ptr.gather_clients(all_info)
self._fleet_ptr.create_client2client_connection()
# barrier for init model
self._role_maker._barrier_worker()
if self._role_maker.is_first_worker():
tables = self._dist_desc.trainer_param.dense_table
for prog, scope in zip(self._main_programs, self._scopes):
prog_id = str(id(prog))
prog_conf = self._opt_info['program_configs'][prog_id]
prog_tables = {}
for key in prog_conf:
if "dense" not in key:
continue
for table_id in prog_conf[key]:
prog_tables[int(table_id)] = 0
for table in tables:
if int(table.table_id) not in prog_tables:
continue
var_name_list = []
for i in range(0, len(table.dense_variable_name)):
var_name = table.dense_variable_name[i]
if scope.find_var(var_name) is None:
raise ValueError(
"var " + var_name + " not found in scope, "
+ "you should run startup program first")
var_name_list.append(var_name)
self._fleet_ptr.init_model(scope,
int(table.table_id),
var_name_list)
# barrier for init model done
self._role_maker._barrier_worker()
else:
raise NameError(
"You should run DistributedOptimizer.minimize() first")
def init_server(self, model_dir=None, **kwargs):
"""
init_server() will be called by user. It will load model from model_dir.
Args:
model_dir(str): load model path, can be local or hdfs/afs path.
kwargs: user-defined attributes, currently support following:
model(int): load model mode.
0 is for load whole model,
1 is for load delta model (load diff),
default is 0.
Example:
>>> fleet.init_server("/you/path/to/model", mode = 0)
"""
mode = kwargs.get("mode", 0)
self._role_maker._barrier_worker()
if self._role_maker.is_first_worker():
self._fleet_ptr.load_model(model_dir, mode)
self._role_maker._barrier_worker()
def run_server(self):
"""
init_pserver(): will be called by user. When a user knows current process is_worker(), he/she
should call init_pserver() to initialize global information about parameter server
"""
if self._opt_info:
if "fleet_desc" in self._opt_info:
self._dist_desc_str = text_format.MessageToString(
self._opt_info["fleet_desc"])
self._dist_desc = self._opt_info["fleet_desc"]
else:
raise Exception(
"You should run DistributedOptimizer.minimize() first")
self._fleet_ptr.init_server(self._dist_desc_str,
self._role_maker._get_rank())
self._local_ip = self._fleet_ptr.run_server()
# barrier_all for init_server
self._role_maker._barrier_all()
self.all_ips_ = self._role_maker._all_gather(self._local_ip)
self._fleet_ptr.gather_servers(self.all_ips_,
self._role_maker._get_size())
# barrier_all for init_worker, wait all workers start
self._role_maker._barrier_all()
else:
raise Exception(
"You should run DistributedOptimizer.minimize() first")
def stop_worker(self):
"""
stop(): will be called after a user finishes his/her training task. Fleet instance will be
destroyed when stop() is called.
"""
self._role_maker._barrier_worker()
if self._role_maker.is_first_worker():
self._fleet_ptr.stop_server()
self._role_maker._barrier_worker()
self._role_maker._barrier_all()
self._role_maker._finalize()
def distributed_optimizer(self, optimizer, strategy={}):
"""
distributed_optimizer
Args:
optimizer(Optimizer): optimizer
strategy(dict): strategy
Examples:
.. code-block:: python
fleet.distributed_optimizer(optimizer)
Returns:
optimizer(DownpourOptimizer): downpour optimizer
"""
self._optimizer = DownpourOptimizer(optimizer, strategy)
return self._optimizer
def save_inference_model(self,
executor,
dirname,
feeded_var_names=None,
target_vars=None,
main_program=None,
export_for_deployment=True):
"""
save pserver model called from a worker
Args:
executor(Executor): fluid executor
dirname(str): save model path
feeded_var_names(list): default None
target_vars(list): default None
main_program(Program): default None
export_for_deployment(bool): default None
Examples:
.. code-block:: python
fleet.save_inference_model(dirname="hdfs:/my/path")
"""
self._fleet_ptr.save_model(dirname)
def save_persistables(self, executor, dirname, main_program=None, **kwargs):
"""
save presistable parameters,
when using fleet, it will save sparse and dense feature
Args:
executor(Executor): fluid executor
dirname(str): save path. It can be hdfs/afs path or local path
main_program(Program): fluid program, default None
kwargs: use define property, current support following
mode(int): 0 means save all pserver model,
1 means save delta pserver model (save diff),
2 means save xbox base,
3 means save batch model.
Example:
>>> fleet.save_persistables(dirname="/you/path/to/model", mode = 0)
"""
mode = kwargs.get("mode", 0)
self._fleet_ptr.client_flush()
self._role_maker._barrier_worker()
if self._role_maker.is_first_worker():
self._fleet_ptr.save_model(dirname, mode)
self._role_maker._barrier_worker()
def save_cache_model(self, executor, dirname, main_program=None, **kwargs):
"""
save sparse cache table,
when using fleet, it will save sparse cache table
Args:
dirname(str): save path. It can be hdfs/afs path or local path
main_program(Program): fluid program, default None
kwargs: use define property, current support following
mode(int): define for feature extension in the future,
currently no use, will pass a default value 0
Example:
.. code-block:: python
>>> fleet.save_cache_model(None, dirname="/you/path/to/model", mode = 0)
"""
mode = kwargs.get("mode", 0)
self._fleet_ptr.client_flush()
self._role_maker._barrier_worker()
cache_threshold = 0.0
if self._role_maker.is_first_worker():
cache_threshold = self._fleet_ptr.get_cache_threshold()
#check cache threshold right or not
self._role_maker._barrier_worker()
if self._role_maker.is_first_worker():
self._fleet_ptr.cache_shuffle(0, dirname, mode, cache_threshold)
self._role_maker._barrier_worker()
feasign_num = -1
if self._role_maker.is_first_worker():
feasign_num = self._fleet_ptr.save_cache(0, dirname, mode)
self._role_maker._barrier_worker()
return feasign_num
def shrink_sparse_table(self):
"""
shrink cvm of all sparse embedding in pserver, the decay rate
is defined as "show_click_decay_rate" in fleet_desc.prototxt
Example:
>>> fleet.shrink_sparse_table()
"""
self._role_maker._barrier_worker()
if self._role_maker.is_first_worker():
for i in self._opt_info["fleet_desc"].trainer_param.sparse_table:
self._fleet_ptr.shrink_sparse_table(i.table_id)
self._role_maker._barrier_worker()
def shrink_dense_table(self, decay, emb_dim=11, scope=None, table_id=None):
"""
shrink batch_sum in pserver by multiplying by decay
Args:
decay(float): the decay rate, usually range in (0, 1)
emb_dim(int): one element's length in datanorm layer
scope(Scope): Scope object, default is fluid.global_scope()
table_id(int): table id of shrinking dense table. None means shrink all,
you should specify it when using multiple scopes,
default is None.
Example:
>>> fleet.shrink_dense_table(0.98, 11, myscope1, 1)
>>> fleet.shrink_dense_table(0.98, 11, myscope1, 2)
>>> fleet.shrink_dense_table(0.98, 11, myscope2, 3)
"""
if scope is None:
scope = fluid.global_scope()
self._role_maker._barrier_worker()
if self._role_maker.is_first_worker():
for i in self._opt_info["fleet_desc"].trainer_param.dense_table:
if table_id is not None and table_id != i.table_id:
continue
var_list = [var for var in i.dense_variable_name]
skip = False
for var in var_list:
if scope.find_var(var) is None:
skip = True
break
if skip:
continue
self._fleet_ptr.shrink_dense_table(i.table_id, scope, var_list,
decay, emb_dim)
self._role_maker._barrier_worker()
def clear_model(self):
"""
clear_model() will be called by user. It will clear sparse model.
Examples:
.. code-block:: python
fleet.clear_model()
"""
self._role_maker._barrier_worker()
if self._role_maker.is_first_worker():
self._fleet_ptr.clear_model()
self._role_maker._barrier_worker()
def load_one_table(self, table_id, model_path, **kwargs):
"""
load pslib model for one table or load params from paddle model
Args:
table_id(int): load table id
model_path(str): load model path, can be local or hdfs/afs path
kwargs(dict): user defined params, currently support following:
only for load pslib model for one table:
mode(int): load model mode. 0 is for load whole model, 1 is
for load delta model (load diff), default is 0.
only for load params from paddle model:
scope(Scope): Scope object
model_proto_file(str): path of program desc proto binary
file, can be local or hdfs/afs file
var_names(list): var name list
load_combine(bool): load from a file or splited param files
default False.
Examples:
.. code-block:: python
# load pslib model for one table
fleet.load_one_table(0, "hdfs:/my_fleet_model/20190714/0/")
fleet.load_one_table(1, "hdfs:/xx/xxx", mode = 0)
# load params from paddle model
fleet.load_one_table(2, "hdfs:/my_paddle_model/",
scope = my_scope,
model_proto_file = "./my_program.bin",
load_combine = False)
# below is how to save proto binary file
with open("my_program.bin", "wb") as fout:
my_program = fluid.default_main_program()
fout.write(my_program.desc.serialize_to_string())
"""
mode = kwargs.get("mode", 0)
scope = kwargs.get("scope", None)
model_proto_file = kwargs.get("model_proto_file", None)
var_names = kwargs.get("var_names", None)
load_combine = kwargs.get("load_combine", False)
self._role_maker._barrier_worker()
if scope is not None and model_proto_file is not None:
self._load_one_table_from_paddle_model(scope, table_id, model_path,
model_proto_file, var_names,
load_combine)
elif self._role_maker.is_first_worker():
self._fleet_ptr.load_model_one_table(table_id, model_path, mode)
self._role_maker._barrier_worker()
def _load_one_table_from_paddle_model(self,
scope,
table_id,
model_path,
model_proto_file,
var_names=None,
load_combine=False):
"""
load params from paddle model, and push params to pserver
Args:
scope(Scope): Scope object
table_id(int): the id of table to load
model_path(str): path of paddle model, can be local or hdfs/afs file
model_proto_file(str): path of program desc proto binary file,
can be local or hdfs/afs file
var_names(list): load var names
load_combine(bool): load from a file or splited param files
"""
self._role_maker._barrier_worker()
if self._role_maker.is_first_worker():
# get fs config from fleet_desc
fs_name = self._opt_info["fleet_desc"].fs_client_param.uri
fs_ugi = self._opt_info["fleet_desc"].fs_client_param.user + "," + \
self._opt_info["fleet_desc"].fs_client_param.passwd
hadoop_bin = self._opt_info["fleet_desc"].fs_client_param.hadoop_bin
# download model_path if it's hdfs/afs
if model_path.startswith("hdfs:") or model_path.startswith("afs:"):
dest = "./model_for_load_table_%s" % table_id
cmd = hadoop_bin + " fs -D fs.default.name=" + fs_name + \
" -D hadoop.job.ugi=" + fs_ugi + " -get " + model_path + \
" " + dest
ret = os.system(cmd)
if ret != 0:
raise RuntimeError("download model failed")
model_path = dest
# download model_proto_file if it's hdfs/afs
if model_proto_file.startswith("hdfs:") or \
model_proto_file.startswith("afs:"):
dest = "./model_proto_file_for_load_table_%s" % table_id
cmd = hadoop_bin + " fs -D fs.default.name=" + fs_name + \
" -D hadoop.job.ugi=" + fs_ugi + " -get " + \
model_proto_file + " " + dest
ret = os.system(cmd)
if ret != 0:
raise RuntimeError("download model proto file failed")
model_proto_file = dest
for i in self._opt_info["fleet_desc"].trainer_param.dense_table:
if table_id is not None and table_id != i.table_id:
continue
table_var_names = [var for var in i.dense_variable_name]
skip = False
for var in table_var_names:
if scope.find_var(var) is None:
skip = True
break
if skip:
continue
self._fleet_ptr.load_from_paddle_model(
scope, table_id, var_names, model_path, model_proto_file,
table_var_names, load_combine)
self._role_maker._barrier_worker()
def _set_opt_info(self, opt_info):
"""
this function saves the result from DistributedOptimizer.minimize()
"""
self._opt_info = opt_info
fleet = PSLib()
class DownpourOptimizer(DistributedOptimizer):
"""
DistributedOptimizer is a wrapper for paddle.fluid.optimizer
A user should pass a paddle.fluid.optimizer to DistributedOptimizer
minimize() function is implemented.
DistributedOptimizer is the starting point for a user who wants to
run distributed training. The optimized information will be stored in
Fleet() instance who holds the global information about current distributed
training.
Args:
optimizer(Optimizer): subclass of Optimizer.
strategy(any): config for DownpourOptimizer.
Returns:
None
"""
def __init__(self, optimizer, strategy=None):
super(DownpourOptimizer, self).__init__(optimizer, strategy)
self._optimizer = optimizer
self._optimizer_name = "Distributed%s" % optimizer.type.capitalize()
if optimizer.type != "adam":
print("Currently, distributed optimizer only support Adam"
"Will config built-in adam for you."
"We will support more functions in DistributedOptimizer",
sys.stderr)
self._optimizer_name = "DistributedAdam"
self._distributed_optimizer = globals()[self._optimizer_name](optimizer)
def backward(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None):
"""
Currently, backward function can not be called through DistributedOptimizer
"""
raise NotImplementedError()
def apply_gradients(self, params_grads):
"""
Currently, apply_gradients function can not be called through DistributedOptimizer
"""
raise NotImplementedError()
def minimize(self,
losses,
scopes=None,
startup_programs=None,
parameter_list=None,
no_grad_set=None):
"""
minimize a program through loss, loss can be a list in DistributedOptimizer.
Note that in parameter server mode, a worker will not get anything about optimize_os
Because optmizer algorithms run on pserver side. We will make this usable in pserver
process, but currently the optimization part is written into Fleet(). A user does not
need to care about how to startup a pserver node.
Args:
losses (Variable|Variable List): loss variable or loss variable list to run optimization.
scopes (Scope| Scope List): scope instance.
startup_programs (Program|Program List): startup_program for initializing parameters
in `parameter_list`.
parameter_list (list): list of Variables to update.
no_grad_set (set|None): set of Variables should be ignored.
Returns:
tuple: (optimize_ops, params_grads) which are, list of operators appended;
and list of (param, grad) Variables pair for optimization.
"""
if not isinstance(losses, list):
losses = [losses]
optimize_ops, param_grads, opt_info = \
self._distributed_optimizer._minimize(
losses,
startup_programs,
parameter_list,
no_grad_set,
self._strategy)
fleet._set_opt_info(opt_info)
programs = [loss.block.program for loss in losses]
if scopes is None:
scopes = [fluid.global_scope()] * len(programs)
if len(scopes) != len(programs):
raise ValueError(
"You should make sure len(scopes) == len(programs) or set scopes None"
)
fleet._main_programs = programs
fleet._scopes = scopes
return [optimize_ops, param_grads]
| 41.32872
| 103
| 0.571207
|
689efb23435b9593a08b5289449f315cbb64aba9
| 369
|
py
|
Python
|
extract1000.py
|
lavizhao/keyword
|
f2dc25bfa71b8734ee1140672a5dc1875814453b
|
[
"Apache-2.0"
] | 3
|
2016-04-05T23:14:24.000Z
|
2017-05-30T12:49:06.000Z
|
extract1000.py
|
lavizhao/keyword
|
f2dc25bfa71b8734ee1140672a5dc1875814453b
|
[
"Apache-2.0"
] | null | null | null |
extract1000.py
|
lavizhao/keyword
|
f2dc25bfa71b8734ee1140672a5dc1875814453b
|
[
"Apache-2.0"
] | null | null | null |
#coding: utf-8
'''
抽取训练数据400条,用来实验预处理等的效果
'''
import csv
if __name__ == '__main__':
f = open("data/new_train.csv")
a = 0
sample_size = 100
train = []
for a in range(sample_size):
row = f.readline()
train.append(row)
f.close()
t = open("data/sub_train.csv","w")
for row in train:
t.write(row)
| 13.666667
| 38
| 0.536585
|
36fbc0c9d6725d9ff8198c1ca1c28bb07c910031
| 33,432
|
py
|
Python
|
l2s-sdn-framework-app.py
|
Networks-it-uc3m/software-driven-l2-communications
|
c7d9a24a0741afe728b7f703ce0f43e400b162a2
|
[
"Apache-2.0"
] | null | null | null |
l2s-sdn-framework-app.py
|
Networks-it-uc3m/software-driven-l2-communications
|
c7d9a24a0741afe728b7f703ce0f43e400b162a2
|
[
"Apache-2.0"
] | null | null | null |
l2s-sdn-framework-app.py
|
Networks-it-uc3m/software-driven-l2-communications
|
c7d9a24a0741afe728b7f703ce0f43e400b162a2
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
from ryu.app.wsgi import ControllerBase
from ryu.app.wsgi import Response
from ryu.app.wsgi import route
from ryu.app.wsgi import WSGIApplication
from ryu.base import app_manager
from ryu.lib import dpid as dpid_lib
from ryu.topology.api import get_switch, get_link, get_host, event
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import ipv6
import networkx as nx
# REST API for switch configuration
#
# get all the switches
# GET /v1.0/topology/switches
#
# get the switch
# GET /v1.0/topology/switches/<dpid>
#
# get all the links
# GET /v1.0/topology/links
#
# get the links of a switch
# GET /v1.0/topology/links/<dpid>
#
# get all the hosts
# GET /v1.0/topology/hosts
#
# get the hosts of a switch
# GET /v1.0/topology/hosts/<dpid>
#
# where
# <dpid>: datapath id in 16 hex
class TopologyAPI(app_manager.RyuApp):
_CONTEXTS = {
'wsgi': WSGIApplication
}
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(TopologyAPI, self).__init__(*args, **kwargs)
wsgi = kwargs['wsgi']
wsgi.register(TopologyController, {'topology_api_app': self})
self.mac_to_port = {} # Dictionary mapping mac addresses to switch ports
self.logger.setLevel(logging.INFO)
self.graph = nx.Graph()
self.topology_api_app = self
self.switch_port_to_hosts = {}
self.paths = []
self.last_cookie = 0
# Counter for increasing weights in the second flow-path creation
self.counter = 0
# Function for adding a flow entry into the switches
def add_flow(self, datapath, priority, match, actions, cookie=0, buffer_id=None):
of_proto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(of_proto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, cookie=cookie, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, cookie=cookie, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
# Function for deleting paths. It deletes a flow in a specific switch (datapath) with a specific cookie
def delete_flow(self, datapath, cookie):
of_proto = datapath.ofproto
parser = datapath.ofproto_parser
mod = parser.OFPFlowMod(
datapath=datapath,
cookie=cookie,
cookie_mask=0xFFFFFFFFFFFFFFFF,
table_id=of_proto.OFPTT_ALL,
command=of_proto.OFPFC_DELETE,
out_port=of_proto.OFPP_ANY,
out_group=of_proto.OFPG_ANY
)
print("Sending OF command to delete rule...")
print(mod)
datapath.send_msg(mod)
# Function for calculating list of available paths among two nodes
def calculate_paths(self, src_node, dst_node, weight=None):
paths = list(nx.shortest_simple_paths(self.graph, src_node, dst_node, weight=weight))
print("Calculating the available paths")
return paths
# TODO: Function to check if the stored path is feasible to still work once a link is down
def check_feasible_path(self, path_to_check):
feasible = False
src_node = path_to_check.get('src')
dst_node = path_to_check.get('dst')
checking_path = path_to_check.get('path')
shortest_simple_paths = self.calculate_paths(src_node, dst_node)
print('Checking if a path is feasible even after a link down... ')
print('Path to check: {0}'.format(path_to_check))
print('Shortest simple paths to check: {0}'.format(shortest_simple_paths))
# Check if any of the calculated paths matches with the stored one. If math, the path is still feasible
# To check if a path is still feasible, it should be checked all the paths just to not ignored that the
# shortest path between two nodes is different due to
if isinstance(shortest_simple_paths, list):
# It is a list of lists
for item in shortest_simple_paths:
if item == checking_path:
feasible = True
else:
# There is only one path obtained
if shortest_simple_paths == checking_path:
feasible = True
if feasible:
print("The stored path still is feasible. Do not change it!")
else:
print("The stored path is not feasible anymore! Creating a new one...")
return feasible
# Function for establishing the OF rules and connect sites
# TODO: (save the list of paths and the associated cookie, done) and check this info before adding a new one
def create_flowpath(self, list_available_paths, cookie=None):
# The cookie arguments allows to modify/update a stored path and maintain that cookie
self.logger.info("-- List of available paths: %s", list_available_paths)
# Selects the shortest simple path from the available paths.
# ensure that there is a list a lists with different paths. Otherwise, the list of available paths
# is the own path to be selected
if isinstance(list_available_paths[0], list):
selected_path = list_available_paths[0]
else:
selected_path = list_available_paths
# Selects the longest simple path from the available paths
# selected_path = list_available_paths[len(list_available_paths) - 1]
self.logger.info("- Selected path from the available paths: %s", selected_path)
if cookie is None:
# To avoid selecting cookies already used, use the length+1 of the list storing the defined paths
self.last_cookie = selected_cookie = self.last_cookie + 1
else:
selected_cookie = cookie
path_to_store = {"cookie": selected_cookie,
"src": selected_path[0],
"dst": selected_path[len(selected_path) - 1],
"path": selected_path}
self.paths.append(path_to_store)
# Messages for debugging: Delete
print("-----> Stored path: {0}".format(self.paths))
# Information that could be read from a file since it is given by the MANO entity
if selected_cookie == 1:
port_sw_a_to_host = 1
port_sw_c_to_host = 1
mac_host_a = "fa:16:3e:7a:cd:0f"
mac_host_c = "fa:16:3e:cd:52:83"
else:
port_sw_a_to_host = 4
port_sw_c_to_host = 4
mac_host_a = "fa:16:3e:ef:33:81"
mac_host_c = "fa:16:3e:4f:25:26"
# Go through the elements of the selected path to install the appropriate OF rules
for i in selected_path:
datapath = self.graph.nodes[i]["datapath"]
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
if selected_path.index(i) == 0:
print("*** First element of the selected path: {0}".format(i))
print("*** Next element of the selected path: {0}".format(selected_path[selected_path.index(i) + 1]))
# First element, install OF rules considering the MAC addresses
# Dictionary with the info of the link between the first switch and the next switch
data_info = self.graph.get_edge_data(i, selected_path[selected_path.index(i) + 1])
out_port = data_info.get('port_dpid_' + str(i))
# First rule: steer traffic from the connected host to the following switch/hop
print("** First rule: steer traffic in switch {0} with mac addr src {1} through port {2}".
format(i, mac_host_a, out_port))
self.logger.info("* Installing rule in the dpid %s", i)
match = ofproto_parser.OFPMatch(eth_src=mac_host_a)
actions = [ofproto_parser.OFPActionOutput(out_port)]
self.add_flow(datapath, 125, match, actions, selected_cookie)
# Second rule: steer traffic to the connected host
out_port = port_sw_a_to_host
print("** Second rule: steer traffic in switch {0} with mac addr src {1} through port {2}".
format(i, mac_host_c, out_port))
self.logger.info("* Installing rule in the dpid %s", i)
match = ofproto_parser.OFPMatch(eth_src=mac_host_c)
actions = [ofproto_parser.OFPActionOutput(out_port)]
self.add_flow(datapath, 125, match, actions, selected_cookie)
elif selected_path.index(i) == len(selected_path) - 1:
# Last element, install OF rules considering the MAC addresses
print("*** Last element of the selected path: {0}".format(i))
# Dictionary with the info of the link between the last switch and the previous switch
data_info = self.graph.get_edge_data(i, selected_path[selected_path.index(i) - 1])
out_port = data_info.get('port_dpid_' + str(i))
print("** First rule: steer traffic in switch {0} with mac addr src {1} through port {2}".
format(i, mac_host_c, out_port))
self.logger.info("* Installing rule in the dpid %s", i)
match = ofproto_parser.OFPMatch(eth_src=mac_host_c)
actions = [ofproto_parser.OFPActionOutput(out_port)]
self.add_flow(datapath, 125, match, actions, selected_cookie)
out_port = port_sw_c_to_host
print("** Second rule: steer traffic in switch {0} with mac addr src {1} through port {2}".
format(i, mac_host_a, out_port))
self.logger.info("* Installing rule in the dpid %s", i)
match = ofproto_parser.OFPMatch(eth_src=mac_host_a)
actions = [ofproto_parser.OFPActionOutput(out_port)]
self.add_flow(datapath, 125, match, actions, selected_cookie)
else:
# Intermediate elements, install OF rules considering the next and previous connected switches
print("*** Intermediate element of the selected path: {0}".format(i))
# Dictionary with the info of the link between the i switch and the previous switch
data_info_in = self.graph.get_edge_data(i, selected_path[selected_path.index(i) - 1])
# Dictionary with the info of the link between the i switch and the next switch
data_info_out = self.graph.get_edge_data(i, selected_path[selected_path.index(i) + 1])
in_port = data_info_in.get('port_dpid_' + str(i))
out_port = data_info_out.get('port_dpid_' + str(i))
# Rule to allow traffic in the opposite direction
print("** First rule: steer traffic in switch {0} from in_port {1} through out_port {2}".
format(i, in_port, out_port))
self.logger.info("* Installing rule in the dpid %s", i)
match = ofproto_parser.OFPMatch(in_port=in_port)
actions = [ofproto_parser.OFPActionOutput(out_port)]
self.add_flow(datapath, 125, match, actions, selected_cookie)
# Rule to allow traffic in the opposite direction
print("** Second rule: other direction traffic in switch {0} with in_port {1} through out_port {2}".
format(i, out_port, in_port))
self.logger.info("* Installing rule in the dpid %s", i)
match = ofproto_parser.OFPMatch(in_port=out_port)
actions = [ofproto_parser.OFPActionOutput(in_port)]
self.add_flow(datapath, 125, match, actions, selected_cookie)
# Increase the weights to avoid repeat the same path for the second flow
self.increase_path_weight(selected_path)
self.counter += 1
# Function for increasing all the edges of a path
def increase_path_weight(self, path):
if self.counter == 0:
print("-- Increasing weight of the path {0}".format(path))
for i in path:
if path.index(i) != len(path) - 1:
data_info = self.graph.get_edge_data(i, path[path.index(i) + 1])
weight_info = data_info.get("weight")
print("The weight of the edge is: {0}".format(weight_info))
print("Increasing the weight...")
self.graph[i][path[path.index(i) + 1]]['weight'] += 10
data_info = self.graph.get_edge_data(i, path[path.index(i) + 1])
weight_info = data_info.get("weight")
print("The weight of the edge is: {0}".format(weight_info))
# This function deletes the OF rules installed in the switches with a specific cookie
def delete_path(self, cookie):
print("Deleting path with cookie {0}".format(cookie))
# Look for the element of the list with the cookie argument
if len(self.paths) != 0:
print(self.paths)
for item in self.paths:
print(item)
if item["cookie"] == cookie:
# Go switch by switch and delete the flows associated to a cookie
path_to_delete = item["path"]
for i in path_to_delete:
print("Deleting OF rules of switch with dpid {0}".format(i))
datapath = self.graph.nodes[i]["datapath"]
self.delete_flow(datapath, cookie)
self.paths.remove(item)
print(self.paths)
else:
print("No cookie {0} founded".format(cookie))
else:
print("There is no path stored yet")
# Function for getting the links from the controller and stored in the graph if not present
def update_topology_links(self):
self.logger.info("-- Updating topology links...")
links_list = get_link(self.topology_api_app, None)
# Obtaining the links between switches
graph_links = [(format(link.src.dpid, "x").zfill(16), format(link.dst.dpid, "x").zfill(16),
{'port_dpid_' + str(format(link.src.dpid, "x").zfill(16)): link.src.port_no,
'port_dpid_' + str(format(link.dst.dpid, "x").zfill(16)): link.dst.port_no,
'weight': 1})
for link in links_list]
if len(list(self.graph.edges)) == 0:
self.graph.add_edges_from(graph_links)
else:
new_edges_list =[]
stored_graph_edges = list(self.graph.edges)
# If there are edges already stored, only add the new ones
for item in graph_links:
tuple_item = (item[0],item[1])
inverse_tuple_item = (item[1],item[0])
tuple_exist = False
for i in stored_graph_edges:
if tuple_item == i or inverse_tuple_item == i:
tuple_exist = True
break
if not tuple_exist:
new_edges_list.append(item)
# Adding new edges to the graph
if len(new_edges_list) > 0:
self.logger.info("- Adding new edges to the graph: {0}".format(new_edges_list))
#print(new_edges_list)
self.graph.add_edges_from(new_edges_list)
# Check if the funciton was called because a downed link (erase from the graph not existant links)
self.logger.info("- Verifying downed links..")
len_stored_graph_edges = len(stored_graph_edges)
len_graph_links = len(graph_links)
self.logger.info("- Length of requested links list: {0}".format(len_graph_links))
self.logger.info("- Length of stored links list: {0}".format(len_stored_graph_edges))
for item_stored in stored_graph_edges:
tuple_stored_item = (item_stored[0],item_stored[1])
inverse_tuple_stored_item = (item_stored[1],item_stored[0])
tuple_founded = False
for i in graph_links:
tuple_graph_link = (i[0],i[1])
if tuple_stored_item == tuple_graph_link or inverse_tuple_stored_item == tuple_graph_link:
# self.logger.info("- Item founded, breaking the loop...")
tuple_founded = True
break
if not tuple_founded:
self.logger.info("- Item not found, so it must be deleted: {0}".format(item_stored))
self.graph.remove_edge(item_stored[0],item_stored[1])
self.logger.info("-- Resulting edges stored in the graph after updating the edges of the topology: {}".format(list(self.graph.edges(data=True))))
# Function for updating the topology information stored in the Graph property
def update_topology(self, switch_list, links_list):
self.logger.info("-- Recalculating topology...")
# Obtaining the switches of the topology
graph_nodes_switches = []
switches = [switch.dp.id for switch in switch_list]
print('Printing the dpid in hex format: ')
for i_switch in switches:
graph_nodes_switches.append((format(i_switch, "x").zfill(16), {"type": "switch"}))
# graph_nodes_switches.append((i_switch, {"type": "switch"}))
# print(format(i_switch, "x").zfill(16))
print('Switches obtained by controller:')
print(graph_nodes_switches)
# Obtaining the links between switches
# graph_links = [(link.src.dpid, link.dst.dpid, {'port_dpid_' + str(link.src.dpid): link.src.port_no,
# 'port_dpid_' + str(link.dst.dpid): link.dst.port_no})
# for link in links_list]
graph_links = [(format(link.src.dpid, "x").zfill(16), format(link.dst.dpid, "x").zfill(16),
{'port_dpid_' + str(format(link.src.dpid, "x").zfill(16)): link.src.port_no,
'port_dpid_' + str(format(link.dst.dpid, "x").zfill(16)): link.dst.port_no,
'weight': 1})
for link in links_list]
print('Links obtained by controller:')
print(graph_links)
if len(list(self.graph.nodes)) == 0:
print('Empty graph. Adding new nodes and links...')
self.graph.add_nodes_from(graph_nodes_switches)
self.graph.add_edges_from(graph_links)
else:
print('Non-Empty graph. Updating nodes and links...')
new_graph = nx.Graph()
new_graph.add_nodes_from(graph_nodes_switches)
new_graph.add_edges_from(graph_links)
self.graph = new_graph
print('Nodes and links of supporting graph')
print(list(new_graph.nodes))
print(list(new_graph.edges))
print("List of nodes and links stored in the Graph:")
print(list(self.graph.nodes(data=True)))
print(list(self.graph.edges))
# Save Graph into a gml file to check updates
print('-----------------------------------')
nx.write_gml(self.graph, "topology-graph.gml")
print("List of nodes stored in the Graph:")
print(list(self.graph.nodes(data=True)))
print("List of links stored in the Graph:")
print(list(self.graph.edges))
print('-----------------------------------')
# Function to find the port of the switches connected to the infrastructure
# TODO: define properly the utility of this function to develop it
def find_ports_to_hosts(self):
# Getting the host list
print('####################### Obtaining Hosts Info #########################')
host_list = get_host(self.topology_api_app, None)
hosts = [host.to_dict() for host in host_list]
print('Number of hosts detected: {0}'.format(str(len(hosts))))
counter = 0
# Print all elements of hosts
for i in hosts:
print('------- Host number: {} ------------'.format(counter))
counter = counter + 1
if type(i) is dict:
for key, value in i.items():
print("Key: {0}; Value: {1}".format(key, value))
print('------------------------------------')
print('######################################################################')
# Function for handling switch features negotiation event, storing the switches in nodes of a graph
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def _switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# dpid = format(datapath.id, "d").zfill(16)
dpid = format(datapath.id, "x").zfill(16)
self.logger.info("+ Handling switch %s features event.", dpid)
# Storing the switch and its features in a graph node
print("+++ Storing the node in the graph from the switch_features_handler event")
if not self.graph.has_node(dpid):
self.graph.add_node(dpid, type="switch", datapath=datapath)
# self.graph.add_node(dpid, type="switch", of_proto=ofproto, of_proto_parser=parser, datapath=datapath)
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
# Function for handling Packet-In events
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
of_proto = datapath.ofproto
of_parser = datapath.ofproto_parser
in_port = msg.match['in_port']
# Get info about packets
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
# Ignore LLDP packets
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
return
# Ignore IPv6 packets
pkt_ipv6 = pkt.get_protocol(ipv6.ipv6)
if pkt_ipv6:
return
dst = eth.dst # Destination MAC address (string)
src = eth.src # Source MAC address (string)
dpid = format(datapath.id, "d").zfill(16)
self.mac_to_port.setdefault(dpid, {})
self.logger.info("Packet-in into switch %s in port: %s (src: %s; dst: %s)", dpid, in_port, src, dst)
self.logger.info("Discarding the incomming packets...")
## The next lines are commented to avoid simple switch controller operations
# learn a mac address to avoid FLOOD next time.
##self.mac_to_port[dpid][src] = in_port
##if dst in self.mac_to_port[dpid]:
## out_port = self.mac_to_port[dpid][dst]
##else:
## out_port = of_proto.OFPP_FLOOD
##actions = [of_parser.OFPActionOutput(out_port)]
### install a flow to avoid packet_in next time
##if out_port != of_proto.OFPP_FLOOD:
## match = of_parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)
## # verify if we have a valid buffer_id, if yes avoid to send both
## # flow_mod & packet_out
## if msg.buffer_id != of_proto.OFP_NO_BUFFER:
## self.add_flow(datapath, 1, match, actions, msg.buffer_id)
## return
## else:
## self.add_flow(datapath, 1, match, actions)
##data = None
##if msg.buffer_id == of_proto.OFP_NO_BUFFER:
## data = msg.data
## out = of_parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
## in_port=in_port, actions=actions, data=data)
## datapath.send_msg(out)
# Function for handling switch enter event
@set_ev_cls(event.EventSwitchEnter)
def _switch_enter_handler(self, ev):
self.logger.info("+ Handling switch enter event.")
self.update_topology_links()
#self.update_topology(get_switch(self.topology_api_app, None), get_link(self.topology_api_app, None))
# TODO: Manage the situation of a link falling down (calculate new paths, install new rules, etc,)
# Function for handling switch ports status modification events
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
dpid = format(datapath.id, "d").zfill(16)
reason = msg.reason
port_no = msg.desc.port_no
# Logging about switch and its port status modification
self.logger.info("Port status modified in switch id: %s ", dpid)
of_proto = msg.datapath.ofproto
if reason == of_proto.OFPPR_ADD:
self.logger.debug("-- Port added %s", port_no)
elif reason == of_proto.OFPPR_DELETE:
self.logger.debug("-- Port deleted %s", port_no)
elif reason == of_proto.OFPPR_MODIFY:
self.logger.debug("-- Port modified %s", port_no)
#TODO: Update the topology, check affected paths, erase bad OF rules, recalculate them
self.update_topology_links()
print("Paths stored before the link is down: {0}".format(self.paths))
for item in self.paths:
print("+++ Checking the feasibility of path: {0}".format(item))
if self.check_feasible_path(item):
print('This path is still feasible, so it is not needed to be modified')
else:
# Modify path
print('This path is not feasible, not anymore')
src_node = item.get('src')
dst_node = item.get('dst')
old_path = item.get('path')
cookie = item.get('cookie')
shortest_simple_path = self.calculate_paths(src_node, dst_node)[0]
print("Old stored path: {0}".format(old_path))
print("New path to store: {0}".format(shortest_simple_path))
print("Deleting old path...")
self.delete_path(cookie)
print('State of the stored path list: {0}'.format(self.paths))
self.create_flowpath(shortest_simple_path, cookie)
else:
self.logger.debug("Illegal port state %s %s", port_no, reason)
#self.update_topology(get_switch(self.topology_api_app, None), get_link(self.topology_api_app, None))
# Class with the API rest functionality definition
class TopologyController(ControllerBase):
def __init__(self, req, link, data, **config):
super(TopologyController, self).__init__(req, link, data, **config)
# This attribute allows to synchronize the controller class and the API
self.topology_api_app = data['topology_api_app']
# After this, we can get and set the attributes of the upper class (e.g., self.topology_api_app.paths)
@route('topology', '/v1.0/topology/switches',
methods=['GET'])
def list_switches(self, req, **kwargs):
return self._switches(req, **kwargs)
@route('topology', '/v1.0/topology/switches/{dpid}',
methods=['GET'], requirements={'dpid': dpid_lib.DPID_PATTERN})
def get_switch(self, req, **kwargs):
return self._switches(req, **kwargs)
@route('topology', '/v1.0/topology/links',
methods=['GET'])
def list_links(self, req, **kwargs):
return self._links(req, **kwargs)
@route('topology', '/v1.0/topology/links/{dpid}',
methods=['GET'], requirements={'dpid': dpid_lib.DPID_PATTERN})
def get_links(self, req, **kwargs):
return self._links(req, **kwargs)
@route('topology', '/v1.0/topology/hosts',
methods=['GET'])
def list_hosts(self, req, **kwargs):
return self._hosts(req, **kwargs)
@route('topology', '/v1.0/topology/hosts/{dpid}',
methods=['GET'], requirements={'dpid': dpid_lib.DPID_PATTERN})
def get_hosts(self, req, **kwargs):
return self._hosts(req, **kwargs)
# API call for connecting sites
@route('topology', '/v1.0/topology/create_flowpath',
methods=['GET'])
def create_flowpath(self, req, **kwargs):
return self._create_flowpath(req, **kwargs)
# API call for printing paths sites
@route('topology', '/v1.0/topology/print',
methods=['GET'])
def get_paths(self, req, **kwargs):
return self._paths(req, **kwargs)
# API call for deleting paths and OF rules
@route('topology', '/v1.0/topology/delete_path/{cookie}',
methods=['GET'])
def delete_path(self, req, **kwargs):
return self._delete_path(req, **kwargs)
def _switches(self, req, **kwargs):
dpid = None
if 'dpid' in kwargs:
dpid = dpid_lib.str_to_dpid(kwargs['dpid'])
switches = get_switch(self.topology_api_app, dpid)
body = json.dumps([switch.to_dict() for switch in switches])
return Response(content_type='application/json', body=body)
def _links(self, req, **kwargs):
dpid = None
if 'dpid' in kwargs:
dpid = dpid_lib.str_to_dpid(kwargs['dpid'])
links = get_link(self.topology_api_app, dpid)
body = json.dumps([link.to_dict() for link in links])
return Response(content_type='application/json', body=body)
def _hosts(self, req, **kwargs):
dpid = None
if 'dpid' in kwargs:
print('dpid is not None when recieving api request')
dpid = dpid_lib.str_to_dpid(kwargs['dpid'])
print(dpid)
hosts = get_host(self.topology_api_app, dpid)
body = json.dumps([host.to_dict() for host in hosts])
return Response(content_type='application/json', body=body)
"""
From here on, all the code is related to the functionality extension for the paper
"""
def _create_flowpath(self, req, **kwargs):
# print('Reading API call parameters...')
# print(kwargs)
# Info that should be passed by the MANO entity, or being discover by an application of the controller
switch_node_a = format(1, "x").zfill(16)
switch_node_c = format(3, "x").zfill(16)
metric = "weight"
# Get all the available simple paths (this gets a list of lists)
list_available_paths = self.topology_api_app.calculate_paths(switch_node_a, switch_node_c, metric)
self.topology_api_app.create_flowpath(list_available_paths)
#self.topology_api_app.increase_path_weight(list_available_paths[0])
#self.topology_api_app.increase_path_weight(list_available_paths[len(list_available_paths) - 1])
response = 'Received Request! Processing inter-site connection between sites...' + "<br>"
return Response(content_type='text/html', body=response)
def _paths(self, req, **kwargs):
#graph_nodes = list(self.topology_api_app.graph.nodes(data="type"))
graph_nodes = list(self.topology_api_app.graph.nodes(data=True))
# update edges
print("----- Showing the stored paths ---------")
print(self.topology_api_app.paths)
self.topology_api_app.update_topology_links()
graph_edges = self.topology_api_app.graph.edges.data()
response = "<b>+ Graph Nodes:</b> " + str(graph_nodes) + "<br>" + "<b>+ Graph Edges:</b> " + str(graph_edges)
print(response)
return Response(content_type='text/html', body=response)
def _delete_path(self, req, **kwargs):
print('Reading API call parameters...')
print(kwargs)
requested_cookie = kwargs.get("cookie")
self.topology_api_app.delete_path(int(requested_cookie))
response = "Received Request! Deleting path with cookie " + requested_cookie + " ..." + "<br>"
return Response(content_type='text/html', body=response)
| 46.823529
| 157
| 0.607681
|
f807e1a80d4011d6c26c4784470e1710ed7d7b29
| 103,413
|
py
|
Python
|
calfem/core.py
|
Karl-Eriksson/calfem-python
|
e9a88a85d3a73877ec99f7fbd1a296a44c3c9b22
|
[
"MIT"
] | 54
|
2016-04-11T19:12:13.000Z
|
2022-02-22T07:15:39.000Z
|
calfem/core.py
|
Karl-Eriksson/calfem-python
|
e9a88a85d3a73877ec99f7fbd1a296a44c3c9b22
|
[
"MIT"
] | 13
|
2019-07-01T19:48:38.000Z
|
2022-02-11T12:50:02.000Z
|
calfem/core.py
|
Karl-Eriksson/calfem-python
|
e9a88a85d3a73877ec99f7fbd1a296a44c3c9b22
|
[
"MIT"
] | 273
|
2017-08-01T10:29:09.000Z
|
2022-02-16T14:02:36.000Z
|
# -*- coding: iso-8859-15 -*-
"""
CALFEM Core module
Contains all the functions implementing CALFEM standard functionality
"""
from scipy.sparse.linalg import dsolve
import numpy as np
import logging as cflog
def error(msg):
"""Write ``msg`` to error log."""
cflog.error(" calfem.core: "+msg)
def info(msg):
"""Write ``msg`` to info log."""
cflog.info(" calfem.core: "+msg)
def spring1e(ep):
"""
Compute element stiffness matrix for spring element.
:param float ep: spring stiffness or analog quantity (ep = k).
:return mat Ke: stiffness matrix, dim(Ke)= 2 x 2
"""
k = ep
return np.mat([[k,-k],[-k,k]],'d')
def spring1s(ep,ed):
"""
Compute element force in spring element (spring1e).
:param float ep: spring stiffness or analog quantity
:param list ed: element displacements [d0, d1]
:return float es: element force [N]
"""
k = ep
return k*(ed[1]-ed[0]);
def bar1e(ep):
"""
Compute element stiffness matrix for spring element.
:param ep float: spring stiffness or analog quantity
:return mat Ke: stiffness matrix, dim(Ke)= 2 x 2
"""
k = ep
return np.mat([[k,-k],[-k,k]],'d')
def bar1s(ep,ed):
"""
Compute element force in spring element (spring1e).
:param float ep: spring stiffness or analog quantity
:param list ed: element displacements [d0, d1]
:return float es: element force
"""
k = ep
return k*(ed[1]-ed[0]);
def bar2e(ex,ey,ep):
"""
Compute the element stiffness matrix for two dimensional bar element.
:param list ex: element x coordinates [x1, x2]
:param list ey: element y coordinates [y1, y2]
:param list ep: [E, A]: E - Young's modulus, A - Cross section area
:return mat Ke: stiffness matrix, [4 x 4]
"""
E=ep[0]
A=ep[1]
b = np.mat([[ex[1]-ex[0]],[ey[1]-ey[0]]])
L = np.asscalar(np.sqrt(b.T*b))
Kle = np.mat([[1.,-1.],[-1.,1.]])*E*A/L
n = np.asarray(b.T/L).reshape(2,)
G = np.mat([
[n[0],n[1],0.,0.],
[0.,0.,n[0],n[1]]
])
return G.T*Kle*G
def bar2g(ex,ey,ep,N):
"""
Compute element stiffness matrix for two dimensional geometric
nonlinear bar element.
:param list ex: element x coordinates [x1, x2]
:param list ey: element y coordinates [y1, y2]
:param list ep: element properties [E, A], E - Young's modulus, A - Cross section area
:param float N: normal force
:return mat Ke: stiffness matrix [4 x 4]
"""
E = ep[0]
A = ep[1]
b = np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b.T/L).reshape(2,)
G = np.mat([
[ n[0], n[1], 0., 0. ],
[-n[1], n[0], 0., 0. ],
[ 0., 0., n[0], n[1]],
[ 0., 0., -n[1], n[0]]
])
Kle = E*A/L*np.mat([
[ 1, 0,-1, 0],
[ 0, 0, 0, 0],
[-1, 0, 1, 0],
[ 0, 0, 0, 0]
])+N/L*np.mat([
[ 0, 0, 0, 0],
[ 0, 1, 0,-1],
[ 0, 0, 0, 0],
[ 0,-1, 0, 1]
])
return G.T*Kle*G
def bar2s(ex,ey,ep,ed):
"""
Compute normal force in two dimensional bar element.
:param list ex: element x coordinates [x1, x2]
:param list ey: element y coordinates [y1, y2]
:param list ep: element properties [E, A], E - Young's modulus, A - Cross section area
:param list ed: element displacements [u1, u2, u3, u4]
:return float N: element foce [N]
"""
E=ep[0]
A=ep[1]
b = np.mat([[ex[1]-ex[0]],[ey[1]-ey[0]]])
L = np.asscalar(np.sqrt(b.T*b))
#Kle = np.mat([[1.,-1.],[-1.,1.]])*E*A/L
n = np.asarray(b.T/L).reshape(2,)
G = np.mat([
[n[0],n[1],0.,0.],
[0.,0.,n[0],n[1]]
])
u=np.asmatrix(ed).T
N=E*A/L*np.mat([[-1.,1.]])*G*u
return np.asscalar(N)
def bar3e(ex,ey,ez,ep):
"""
Compute element stiffness matrix for three dimensional bar element.
:param list ex: element x coordinates [x1, x2]
:param list ey: element y coordinates [y1, y2]
:param list ez: element z coordinates [z1, z2]
:param list ep: element properties [E, A], E - Young's modulus, A - Cross section area
:return mat Ke: stiffness matrix, [6 x 6]
"""
E = ep[0]
A = ep[1]
b = np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]],
[ez[1]-ez[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b.T/L).reshape(3)
G = np.mat([
[ n[0], n[1], n[2], 0., 0., 0. ],
[ 0., 0., 0., n[0], n[1], n[2]]
])
Kle = E*A/L*np.mat([
[ 1,-1],
[-1, 1]
])
return G.T*Kle*G
def bar3s(ex,ey,ez,ep,ed):
"""
Compute normal force in three dimensional bar element.
:param list ex: element x coordinates [x1, x2]
:param list ey: element y coordinates [y1, y2]
:param list ez: element z coordinates [z1, z2]
:param list ep: element properties [E, A], E - Young's modulus, A - Cross section area
:param list ed: element displacements [u1, ..., u6]
:return float N: normal force
"""
E = ep[0]
A = ep[1]
b = np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]],
[ez[1]-ez[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b.T/L).reshape(3)
G = np.mat([
[ n[0], n[1], n[2], 0. , 0. , 0. ],
[ 0. , 0. , 0. , n[0], n[1], n[2]]
])
#Kle = E*A/L*np.mat([
# [ 1,-1],
# [-1, 1]
#])
u = np.asmatrix(ed).T
N = E*A/L*np.mat([[-1.,1.]])*G*u
return np.asscalar(N)
def beam2e(ex,ey,ep,eq=None):
"""
Compute the stiffness matrix for a two dimensional beam element.
:param list ex: element x coordinates [x1, x2]
:param list ey: element y coordinates [y1, y2]
:param list ep: element properties [E, A, I], E - Young's modulus, A - Cross section area, I - Moment of inertia
:param list eq: distributed loads, local directions [qx, qy]
:return mat Ke: element stiffness matrix [6 x 6]
:return mat fe: element stiffness matrix [6 x 1] (if eq!=None)
"""
b=np.mat([[ex[1]-ex[0]],[ey[1]-ey[0]]])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b.T/L).reshape(2,)
E=ep[0]
A=ep[1]
I=ep[2]
qx=0.
qy=0.
if not eq is None:
qx=eq[0]
qy=eq[1]
Kle = np.mat([
[E*A/L, 0., 0., -E*A/L, 0., 0. ],
[ 0., 12*E*I/L**3., 6*E*I/L**2., 0., -12*E*I/L**3., 6*E*I/L**2. ],
[ 0., 6*E*I/L**2., 4*E*I/L, 0., -6*E*I/L**2., 2*E*I/L ],
[-E*A/L, 0., 0., E*A/L, 0., 0. ],
[ 0., -12*E*I/L**3.,-6*E*I/L**2., 0., 12*E*I/L**3.,-6*E*I/L**2. ],
[ 0., 6*E*I/L**2., 2*E*I/L, 0., -6*E*I/L**2., 4*E*I/L ]
])
fle=L*np.mat([qx/2, qy/2, qy*L/12, qx/2, qy/2, -qy*L/12]).T
G=np.mat([
[ n[0], n[1], 0., 0., 0., 0.],
[-n[1], n[0], 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0.],
[0., 0., 0., n[0], n[1], 0.],
[0., 0., 0., -n[1], n[0], 0.],
[0., 0., 0., 0., 0., 1.]
])
Ke=G.T*Kle*G
fe=G.T*fle
if eq is None:
return Ke
else:
return Ke,fe
def beam2s(ex,ey,ep,ed,eq=None,nep=None):
"""
Compute section forces in two dimensional beam element (beam2e).
Parameters:
ex = [x1 x2]
ey = [y1 y2] element node coordinates
ep = [E A I] element properties,
E: Young's modulus
A: cross section area
I: moment of inertia
ed = [u1 ... u6] element displacements
eq = [qx qy] distributed loads, local directions
nep number of evaluation points ( default=2 )
Returns:
es = [ N1 V1 M1 section forces, local directions, in
N2 V2 M2 n points along the beam, dim(es)= n x 3
.........]
edi = [ u1 v1 element displacements, local directions,
u2 v2 in n points along the beam, dim(es)= n x 2
.......]
eci = [ x1 local x-coordinates of the evaluation
x2 points, (x1=0 and xn=L)
...]
"""
EA=ep[0]*ep[1]
EI=ep[0]*ep[2]
b=np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b.T/L).reshape(2,)
qx=0.
qy=0.
if not eq is None:
qx=eq[0]
qy=eq[1]
ne=2
if nep!=None:
ne = nep
C=np.mat([
[0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 1., 0.],
[L, 0., 0., 1., 0., 0.],
[0., L**3, L**2, 0., L, 1.],
[0., 3*L**2, 2*L, 0., 1., 0.]
])
G=np.mat([
[ n[0], n[1], 0., 0., 0., 0.],
[-n[1], n[0], 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0.],
[0., 0., 0., n[0], n[1], 0.],
[0., 0., 0., -n[1], n[0], 0.],
[0., 0., 0., 0., 0., 1.]
])
M=np.ravel(C.I*(G*np.asmatrix(ed).T-np.matrix([0., 0., 0., -qx*L**2/(2*EA), qy*L**4/(24*EI), qy*L**3/(6*EI)]).T))
A=np.matrix([M[0],M[3]]).T
B=np.matrix([M[1],M[2],M[4],M[5]]).T
x=np.asmatrix(np.arange(0.,L+L/(ne-1),L/(ne-1))).T
zero=np.asmatrix(np.zeros([len(x)])).T
one=np.asmatrix(np.ones([len(x)])).T
u=np.concatenate((x,one),1)*A-np.power(x,2)*qx/(2*EA)
du=np.concatenate((one,zero),1)*A-x*qx/EA
v=np.concatenate((np.power(x,3),np.power(x,2),x,one),1)*B+np.power(x,4)*qy/(24*EI)
d2v=np.concatenate((6*x,2*one,zero,zero),1)*B+np.power(x,2)*qy/(2*EI)
d3v=np.concatenate((6*one,zero,zero,zero),1)*B+x*qy/EI
N=EA*du
M=EI*d2v
V=-EI*d3v
edi=np.concatenate((u,v),1)
eci=x
es=np.concatenate((N,V,M),1)
return (es,edi,eci)
def beam2t(ex,ey,ep,eq=None):
"""
Compute the stiffness matrix for a two dimensional elastic
Timoshenko beam element.
Parameters:
ex = [x1 x2]
ey = [y1 y2] element node coordinates
ep = [E G A I ks] element properties
E: Young's modulus
G: Shear modulus
A: Cross section area
I: Moment of inertia
ks: Shear correction factor
eq = [qx qy] distributed loads, local directions
Returns:
Ke element stiffness matrix (6 x 6)
fe element load vector (6 x 1)
"""
b = np.mat([[ex[1]-ex[0]],[ey[1]-ey[0]]])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b.T/L).reshape(2)
E = ep[0]
Gm = ep[1]
A = ep[2]
I = ep[3]
ks = ep[4]
qx = 0.
qy = 0.
if eq != None:
qx = eq[0]
qy = eq[1]
m = (12/L**2)*(E*I/(Gm*A*ks))
Kle = E/(1+m)*np.mat([
[A*(1+m)/L, 0., 0., -A*(1+m)/L, 0., 0. ],
[0., 12*I/L**3., 6*I/L**2., 0., -12*I/L**3., 6*I/L**2. ],
[0., 6*I/L**2., 4*I*(1+m/4.)/L, 0., -6*I/L**2., 2*I*(1-m/2)/L],
[-A*(1+m)/L, 0., 0., A*(1+m)/L, 0., 0. ],
[0., -12*I/L**3.,-6*I/L**2., 0., 12*I/L**3.,-6*I/L**2. ],
[0., 6*I/L**2., 2*I*(1-m/2)/L, 0., -6*I/L**2., 4*I*(1+m/4)/L]
])
fle = L*np.mat([qx/2, qy/2, qy*L/12, qx/2, qy/2, -qy*L/12]).T
G = np.mat([
[ n[0], n[1], 0., 0., 0., 0.],
[-n[1], n[0], 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0., 0.],
[ 0., 0., 0., n[0], n[1], 0.],
[ 0., 0., 0., -n[1], n[0], 0.],
[ 0., 0., 0., 0., 0., 1.]
])
Ke = G.T*Kle*G
fe = G.T*fle
if eq == None:
return Ke
else:
return Ke,fe
def beam2ts(ex,ey,ep,ed,eq=None,nep=None):
"""
Compute section forces in two dimensional beam element (beam2e).
Parameters:
ex = [x1, x2]
ey = [y1, y2] element node coordinates
ep = [E,G,A,I,ks] element properties,
E: Young's modulus
G: shear modulus
A: cross section area
I: moment of inertia
ed = [u1, ... ,u6] element displacements
eq = [qx, qy] distributed loads, local directions
nep number of evaluation points ( default=2 )
Returns:
es = [[N1,V1,M1], section forces, local directions, in
[N2,V2,M2], n points along the beam, dim(es)= n x 3
..........]
edi = [[u1,v1,teta1], element displacements, local directions,
[u2,v2,teta2], and rotation of cross section at
.............] in n points along the beam, dim(es)= n x 2
(Note! Rotation of the cross section is not equal to dv/dx for Timoshenko beam element)
eci = [[x1], local x-coordinates of the evaluation
[x2], points, (x1=0 and xn=L)
....]
"""
EA = ep[0]*ep[2]
EI = ep[0]*ep[3]
GAK = ep[1]*ep[2]*ep[4]
alfa = EI/GAK
b = np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b.T/L).reshape(2)
qx = 0.
qy = 0.
if eq != None:
qx = eq[0]
qy = eq[1]
ne = 2
if nep != None:
ne = nep
C = np.mat([
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 1.],
[ 0., 6*alfa, 0., 0., 1., 0.],
[ L, 0., 0., 1., 0., 0.],
[ 0., L**3, L**2, 0., L, 1.],
[ 0., 3*(L**2+2*alfa), 2*L, 0., 1., 0.]
])
G = np.mat([
[ n[0], n[1], 0., 0., 0., 0.],
[-n[1], n[0], 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0., 0.],
[ 0., 0., 0., n[0], n[1], 0.],
[ 0., 0., 0.,-n[1], n[0], 0.],
[ 0., 0., 0., 0., 0., 1.]
])
M = np.ravel(C.I*(G*np.asmatrix(ed).T-np.mat([0., 0., 0., -qx*L**2/(2*EA), qy*L**4/(24*EI)-qy*L**2/(2*GAK), qy*L**3/(6*EI)]).T))
C2 = np.mat([M[0], M[3]]).T
C4 = np.mat([M[1], M[2], M[4], M[5]]).T
x = np.asmatrix(np.arange(0., L+L/(ne-1), L/(ne-1))).T
zero = np.asmatrix(np.zeros([len(x)])).T
one = np.asmatrix(np.ones([len(x)])).T
u = np.concatenate((x,one),1)*C2-qx/(2*EA)*np.power(x,2)
du = np.concatenate((one,zero),1)*C2-qx*x/EA
v = np.concatenate((np.power(x,3),np.power(x,2),x,one),1)*C4+qy/(24*EI)*np.np.power(x,4)-qy/(2*GAK)*np.power(x,2)
dv = np.concatenate((3*np.power(x,2),2*x,one,zero),1)*C4+qy*np.power(x,3)/(6*EI)-qy*x/GAK
teta = np.concatenate((3*(np.power(x,2)+2*alfa*one),2*x,one,zero),1)*C4+qy*np.power(x,3)/(6*EI)
dteta = np.concatenate((6*x,2*one,zero,zero),1)*C4+qy*np.power(x,2)/(2*EI)
N = EA*du
M = EI*dteta
V = GAK*(dv-teta)
es = np.concatenate((N,V,M),1)
edi = np.concatenate((u,v,teta),1)
eci = x
if nep != None:
return es,edi,eci
else:
return es
def beam2w(ex,ey,ep,eq=None):
"""
Compute the stiffness matrix for a two dimensional beam element
on elastic foundation.
Parameters:
ex = [x1, x2]
ey = [y1, y2] element node coordinates
ep = [E,A,I,ka,kt] element properties,
E: Young's modulus
A: cross section area
I: moment of inertia
ka: axial foundation stiffness
kt: transversal foundation stiffness
eq = [qx, qy] distributed loads, local directions
Returns:
Ke beam stiffness matrix (6 x 6)
fe element load vector (6 x 1)
"""
b = np.mat([[ex[1]-ex[0]],[ey[1]-ey[0]]])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b/L).reshape(2)
E,A,I,ka,kt = ep
qx = 0
qy = 0
if eq != None:
qx,qy = eq
K1 = np.mat([
[ E*A/L, 0, 0, -E*A/L, 0, 0 ],
[ 0, 12*E*I/L**3, 6*E*I/L**2, 0, -12*E*I/L**3, 6*E*I/L**2],
[ 0, 6*E*I/L**2, 4*E*I/L, 0, -6*E*I/L**2, 2*E*I/L ],
[-E*A/L, 0, 0, E*A/L, 0, 0 ],
[ 0, -12*E*I/L**3,-6*E*I/L**2, 0, 12*E*I/L**3,-6*E*I/L**2],
[ 0, 6*E*I/L**2, 2*E*I/L, 0, -6*E*I/L**2, 4*E*I/L ]
])
K2 = L/420*np.mat([
[ 140*ka, 0, 0, 70*ka, 0, 0 ],
[ 0, 156*kt, 22*kt*L, 0, 54*kt, -13*kt*L ],
[ 0, 22*kt*L, 4*kt*L**2, 0, 13*kt*L,-3*kt*L**2],
[ 70*ka, 0, 0, 140*ka, 0, 0 ],
[ 0, 54*kt, 13*kt*L, 0, 156*kt, -22*kt*L ],
[ 0, -13*kt*L,-3*kt*L**2, 0, -22*kt*L, 4*kt*L**2]
])
Kle = K1+K2
fle = L*np.mat([qx/2, qy/2, qy*L/12, qx/2, qy/2, -qy*L/12]).T
G = np.mat([
[ n[0], n[1], 0, 0, 0, 0],
[-n[1], n[0], 0, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, n[0], n[1], 0],
[ 0, 0, 0,-n[1], n[0], 0],
[ 0, 0, 0, 0, 0, 1]
])
Ke = G.T*Kle*G
fe = G.T*fle
if eq != None:
return Ke,fe
else:
return Ke
def beam2ws(ex,ey,ep,ed,eq=None):
"""
Compute section forces in a two dimensional beam element
on elastic foundation.
Parameters:
ex = [x1, x2]
ey = [y1, y2] element node coordinates
ep = [E,A,I,ka,kt] element properties,
E: Young's modulus
A: cross section area
I: moment of inertia
ka: axial foundation stiffness
kt: transversal foundation stiffness
ed = [u1, ... ,u6] element displacement vector
eq = [qx, qy] distributed loads, local directions
Returns:
es = [[N1, V1, M1],
[N2, V2, M2]] element forces, local direction
"""
if np.asmatrix(ed).shape[0] > 1:
cferror("Only one row is allowed in the ed matrix !!!")
return
b = np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b/L).reshape(2,)
E,A,I,ka,kt = ep
qx = 0
qy = 0
if eq != None:
qx,qy = eq
K1 = np.mat([
[ E*A/L, 0, 0, -E*A/L, 0, 0 ],
[ 0, 12*E*I/L**3, 6*E*I/L**2, 0, -12*E*I/L**3, 6*E*I/L**2],
[ 0, 6*E*I/L**2, 4*E*I/L, 0, -6*E*I/L**2, 2*E*I/L ],
[-E*A/L, 0, 0, E*A/L, 0, 0 ],
[ 0, -12*E*I/L**3,-6*E*I/L**2, 0, 12*E*I/L**3,-6*E*I/L**2],
[ 0, 6*E*I/L**2, 2*E*I/L, 0, -6*E*I/L**2, 4*E*I/L ]
])
K2 = L/420*np.mat([
[ 140*ka, 0, 0, 70*ka, 0, 0 ],
[ 0, 156*kt, 22*kt*L, 0, 54*kt, -13*kt*L ],
[ 0, 22*kt*L, 4*kt*L**2, 0, 13*kt*L,-3*kt*L**2],
[ 70*ka, 0, 0, 140*ka, 0, 0 ],
[ 0, 54*kt, 13*kt*L, 0, 156*kt, -22*kt*L ],
[ 0, -13*kt*L,-3*kt*L**2, 0, -22*kt*L, 4*kt*L**2]
])
Kle = K1+K2
fle = L*np.mat([qx/2, qy/2, qy*L/12, qx/2, qy/2, -qy*L/12]).T
G = np.mat([
[ n[0], n[1], 0, 0, 0, 0],
[-n[1], n[0], 0, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, n[0], n[1], 0],
[ 0, 0, 0,-n[1], n[0], 0],
[ 0, 0, 0, 0, 0, 1]
])
P = Kle*G*np.asmatrix(ed).T-fle
es = np.mat([
[-P[0,0],-P[1,0],-P[2,0]],
[ P[3,0], P[4,0], P[5,0]]
])
return es
def beam2g(ex,ey,ep,N,eq=None):
"""
Compute the element stiffness matrix for a two dimensional
beam element with respect to geometric nonlinearity.
Parameters:
ex = [x1, x2]
ey = [y1, y2] element node coordinates
ep = [E,A,I] element properties;
E: Young's modulus
A: cross section area
I: moment of inertia
N axial force in the beam
eq distributed transverse load
Returns:
Ke element stiffness matrix (6 x 6)
fe element load vector (6 x 1)
"""
if eq != None:
if np.size(eq) > 1:
cferror("eq should be a scalar !!!")
return
else:
q = eq[0]
else:
q = 0
b = np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b/L).reshape(2,)
E,A,I = ep
rho = -N*L**2/(np.pi**2*E*I)
kL = np.pi*np.sqrt(abs(rho))+np.finfo(float).eps
if rho > 0:
f1 = (kL/2)/np.tan(kL/2)
f2 = (1/12.)*kL**2/(1-f1)
f3 = f1/4+3*f2/4
f4 = -f1/2+3*f2/2
f5 = f1*f2
h = 6*(2/kL**2-(1+np.cos(kL))/(kL*np.sin(kL)))
elif rho < 0:
f1 = (kL/2)/np.tanh(kL/2)
f2 = -(1/12.)*kL**2/(1-f1)
f3 = f1/4+3*f2/4
f4 = -f1/2+3*f2/2
f5 = f1*f2
h = -6*(2/kL**2-(1+np.cosh(kL))/(kL*np.sinh(kL)))
else:
f1 = f2 = f3 = f4 = f5 = h = 1
Kle = np.mat([
[ E*A/L, 0., 0., -E*A/L, 0., 0. ],
[ 0., 12*E*I*f5/L**3., 6*E*I*f2/L**2., 0., -12*E*I*f5/L**3., 6*E*I*f2/L**2.],
[ 0., 6*E*I*f2/L**2., 4*E*I*f3/L, 0., -6*E*I*f2/L**2., 2*E*I*f4/L ],
[-E*A/L, 0., 0., E*A/L, 0., 0. ],
[ 0., -12*E*I*f5/L**3.,-6*E*I*f2/L**2., 0., 12*E*I*f5/L**3.,-6*E*I*f2/L**2.],
[ 0., 6*E*I*f2/L**2., 2*E*I*f4/L, 0., -6*E*I*f2/L**2., 4*E*I*f3/L ]
])
fle = q*L*np.mat([0.,1/2.,L*h/12,0.,1/2.,-L*h/12]).T
G = np.mat([
[ n[0], n[1], 0, 0, 0, 0],
[-n[1], n[0], 0, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, n[0], n[1], 0],
[ 0, 0, 0,-n[1], n[0], 0],
[ 0, 0, 0, 0, 0, 1]
])
Ke = G.T*Kle*G
fe = G.T*fle
if eq != None:
return Ke,fe
else:
return Ke
def beam2gs(ex,ey,ep,ed,N,eq=None):
"""
Calculate section forces in a two dimensional nonlinear
beam element.
Parameters:
ex = [x1, x2]
ey = [y1, y2] element node coordinates
ep = [E,A,I] element properties;
E: Young's modulus
A: cross section area
I: moment of inertia
ed = [u1, ... ,u6] element displacement vector
N axial force
eq = [qy] distributed transverse load
Returns:
es = [[N1,V1,M1], element forces, local directions
[N2,V2,M2]]
"""
if eq != None:
eq = eq[0]
else:
eq = 0
b = np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b/L).reshape(2,)
E,A,I = ep
rho = -N*L**2/(np.pi**2*E*I)
eps = 2.2204e-16
kL = np.pi*np.sqrt(abs(rho))+eps
if rho > 0:
f1 = (kL/2)/np.tan(kL/2)
f2 = (1/12.)*kL**2/(1-f1)
f3 = f1/4+3*f2/4
f4 = -f1/2+3*f2/2
f5 = f1*f2
h = 6*(2/kL**2-(1+np.cos(kL))/(kL*np.sin(kL)))
elif rho < 0:
f1 = (kL/2)/np.tanh(kL/2)
f2 = -(1/12.)*kL**2/(1-f1)
f3 = f1/4+3*f2/4
f4 = -f1/2+3*f2/2
f5 = f1*f2
h = -6*(2/kL**2-(1+np.cosh(kL))/(kL*np.sinh(kL)))
else:
f1 = f2 = f3 = f4 = f5 = h = 1
Kle = np.mat([
[ E*A/L, 0, 0, -E*A/L, 0, 0 ],
[ 0, 12*E*I*f5/L**3, 6*E*I*f2/L**2, 0, -12*E*I*f5/L**3, 6*E*I*f2/L**2],
[ 0, 6*E*I*f2/L**2, 4*E*I*f3/L, 0, -6*E*I*f2/L**2, 2*E*I*f4/L ],
[-E*A/L, 0, 0, E*A/L, 0, 0 ],
[ 0, -12*E*I*f5/L**3,-6*E*I*f2/L**2, 0, 12*E*I*f5/L**3,-6*E*I*f2/L**2],
[ 0, 6*E*I*f2/L**2, 2*E*I*f4/L, 0, -6*E*I*f2/L**2, 4*E*I*f3/L ]
])
fle = eq*L*np.mat([0,1/2.,L*h/12,0,1/2.,-L*h/12]).T
G = np.mat([
[ n[0], n[1], 0, 0, 0, 0],
[-n[1], n[0], 0, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, n[0], n[1], 0],
[ 0, 0, 0,-n[1], n[0], 0],
[ 0, 0, 0, 0, 0, 1]
])
u = np.asmatrix(ed).T
P = Kle*G*u-fle
es = np.mat([
[-P[0,0],-P[1,0],-P[2,0]],
[ P[3,0], P[4,0], P[5,0]]
])
return es
def beam2d(ex,ey,ep):
"""
Calculate the stiffness matrix Ke, the mass matrix Me
and the damping matrix Ce for a 2D elastic Bernoulli
beam element.
Parameters:
ex = [x1, x2]
ey = [y1, y2] element node coordinates
ep = [E,A,I,m,(a,b)] element properties;
E: Young's modulus
A: cross section area
I: moment of inertia
m: mass per unit length
a,b: damping coefficients,
Ce=aMe+bKe
Returns:
Ke element stiffness matrix (6 x 6)
Me element mass martix
Ce element damping matrix, optional
"""
b = np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n = np.asarray(b/L).reshape(2,)
a = 0
b = 0
if np.size(ep) == 4:
E,A,I,m = ep
elif np.size(ep) == 6:
E,A,I,m,a,b = ep
Kle = np.mat([
[ E*A/L, 0, 0, -E*A/L, 0, 0 ],
[ 0, 12*E*I/L**3, 6*E*I/L**2, 0, -12*E*I/L**3, 6*E*I/L**2],
[ 0, 6*E*I/L**2, 4*E*I/L, 0, -6*E*I/L**2, 2*E*I/L ],
[-E*A/L, 0, 0, E*A/L, 0, 0 ],
[ 0, -12*E*I/L**3,-6*E*I/L**2, 0, 12*E*I/L**3,-6*E*I/L**2],
[ 0, 6*E*I/L**2, 2*E*I/L, 0, -6*E*I/L**2, 4*E*I/L ]
])
Mle = m*L/420*np.mat([
[ 140, 0, 0, 70, 0, 0 ],
[ 0, 156, 22*L, 0, 54, -13*L ],
[ 0, 22*L, 4*L**2, 0, 13*L,-3*L**2],
[ 70, 0, 0, 140, 0, 0 ],
[ 0, 54, 13*L, 0, 156, -22*L ],
[ 0, -13*L,-3*L**2, 0, -22*L, 4*L**2]
])
Cle = a*Mle+b*Kle
G = np.mat([
[ n[0], n[1], 0, 0, 0, 0],
[-n[1], n[0], 0, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, n[0], n[1], 0],
[ 0, 0, 0,-n[1], n[0], 0],
[ 0, 0, 0, 0, 0, 1]
])
Ke = G.T*Kle*G
Me = G.T*Mle*G
Ce = G.T*Cle*G
if np.size(ep) == 4:
return Ke,Me
elif np.size(ep) == 6:
return Ke,Me,Ce
def beam3e(ex,ey,ez,eo,ep,eq=None):
"""
Calculate the stiffness matrix for a 3D elastic Bernoulli
beam element.
Parameters:
ex = [x1 x2]
ey = [y1 y2]
ez = [z1 z2] element node coordinates
eo = [xz yz zz] orientation of local z axis
ep = [E G A Iy Iz Kv] element properties
E: Young's modulus
G: Shear modulus
A: Cross section area
Iy: Moment of inertia, local y-axis
Iz: Moment of inertia, local z-axis
Kv: Saint-Venant's torsion constant
eq = [qx qy qz qw] distributed loads
Returns:
Ke beam stiffness matrix (12 x 12)
fe equivalent nodal forces (12 x 1)
"""
b = np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]],
[ez[1]-ez[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n1 = np.asarray(b.T/L).reshape(3,)
eo = np.asmatrix(eo)
lc = np.asscalar(np.sqrt(eo*eo.T))
n3 = np.asarray(eo/lc).reshape(3,)
E,Gs,A,Iy,Iz,Kv = ep
qx = 0.
qy = 0.
qz = 0.
qw = 0.
if eq != None:
qx,qy,qz,qw = eq
a = E*A/L
b = 12*E*Iz/L**3
c = 6*E*Iz/L**2
d = 12*E*Iy/L**3
e = 6*E*Iy/L**2
f = Gs*Kv/L
g = 2*E*Iy/L
h = 2*E*Iz/L
Kle = np.mat([
[ a, 0, 0, 0, 0, 0, -a, 0, 0, 0, 0, 0 ],
[ 0, b, 0, 0, 0, c, 0,-b, 0, 0, 0, c ],
[ 0, 0, d, 0,-e, 0, 0, 0,-d, 0,-e, 0 ],
[ 0, 0, 0, f, 0, 0, 0, 0, 0,-f, 0, 0 ],
[ 0, 0,-e, 0, 2*g, 0, 0, 0, e, 0, g, 0 ],
[ 0, c, 0, 0, 0, 2*h, 0,-c, 0, 0, 0, h ],
[-a, 0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0 ],
[ 0,-b, 0, 0, 0, -c, 0, b, 0, 0, 0, -c ],
[ 0, 0,-d, 0, e, 0, 0, 0, d, 0, e, 0 ],
[ 0, 0, 0,-f, 0, 0, 0, 0, 0, f, 0, 0 ],
[ 0, 0,-e, 0, g, 0, 0, 0, e, 0, 2*g, 0 ],
[ 0, c, 0, 0, 0, h, 0,-c, 0, 0, 0, 2*h]
])
fle = L/2*np.mat([qx, qy, qz, qw, -qz*L/6, qy*L/6, qx, qy, qz, qw, qz*L/6, -qy*L/6]).T
n2 = np.array([0.,0.,0.])
n2[0] = n3[1]*n1[2]-n3[2]*n1[1]
n2[1] = -n1[2]*n3[0]+n1[0]*n3[2]
n2[2] = n3[0]*n1[1]-n1[0]*n3[1]
#An = np.append([n1,n2],[n3],0)
G = np.mat([
[ n1[0], n1[1], n1[2], 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ n2[0], n2[1], n2[2], 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ n3[0], n3[1], n3[2], 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, n1[0], n1[1], n1[2], 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, n2[0], n2[1], n2[2], 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, n3[0], n3[1], n3[2], 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, n1[0], n1[1], n1[2], 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, n2[0], n2[1], n2[2], 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, n3[0], n3[1], n3[2], 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, n1[0], n1[1], n1[2]],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, n2[0], n2[1], n2[2]],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, n3[0], n3[1], n3[2]]
])
Ke = G.T*Kle*G
fe = G.T*fle
if eq == None:
return Ke
else:
return Ke,fe
def beam3s(ex,ey,ez,eo,ep,ed,eq=None,n=None):
"""
Calculate the variation of the section forces and displacements
along a three-dimensional beam element.
Parameters:
ex = [x1 x2] element node coordinates
ey = [y1 y2]
ez = [z1 z2]
eo = [xz yz zz] orientation of local z axis
ep = [E G A Iy Iz Kv] element properties
E: Young's modulus
G: Shear modulus
A: Cross section area
Iy: Moment of inertia, local y-axis
Iz: Moment of inertia, local z-axis
Kv: Saint-Venant's torsion constant
ed the element displacement vector from the
global coordinate system
eq = [qx qy qz qw] the disibuted axial, transversal and
torsional loads
n the number of point in which displacements
and section forces are to be computed
Returns:
es = [[N1,Vy1,Vz1,T1,My1,Mz1], section forces in n points along
[N2,Vy2,Vz2,T2,My2,Mz2], the local x-axis
[..,...,...,..,...,...],
[Nn,Vyn,Vzn,Tn,Myn,Mzn]]
edi = [[u1,v1,w1,fi1], displacements in n points along
[u2,v2,w2,fi2], the local x-axis
[..,..,..,...],
[un,vn,wn,fin]]
eci = [[x1], local x-coordinates of the evaluation
[x2], points
[..],
[xn]]
"""
b = np.mat([
[ex[1]-ex[0]],
[ey[1]-ey[0]],
[ez[1]-ez[0]]
])
L = np.asscalar(np.sqrt(b.T*b))
n1 = np.asarray(b.T/L).reshape(3,)
eo = np.asmatrix(eo)
lc = np.asscalar(np.sqrt(eo*eo.T))
n3 = np.asarray(eo/lc).reshape(3,)
EA = ep[0]*ep[2]
EIy = ep[0]*ep[3]
EIz = ep[0]*ep[4]
GKv = ep[1]*ep[5]
qx = 0.
qy = 0.
qz = 0.
qw = 0.
if eq != None:
qx,qy,qz,qw = eq
ne = 2
if n != None:
ne = n
n2 = np.array([0.,0.,0.])
n2[0] = n3[1]*n1[2]-n3[2]*n1[1]
n2[1] = -n1[2]*n3[0]+n1[0]*n3[2]
n2[2] = n3[0]*n1[1]-n1[0]*n3[1]
G = np.mat([
[ n1[0], n1[1], n1[2], 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ n2[0], n2[1], n2[2], 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ n3[0], n3[1], n3[2], 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, n1[0], n1[1], n1[2], 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, n2[0], n2[1], n2[2], 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, n3[0], n3[1], n3[2], 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, n1[0], n1[1], n1[2], 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, n2[0], n2[1], n2[2], 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, n3[0], n3[1], n3[2], 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, n1[0], n1[1], n1[2]],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, n2[0], n2[1], n2[2]],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, n3[0], n3[1], n3[2]]
])
u = G*np.asmatrix(ed).T-np.array([ # u is the local element displacement
[ 0 ], # vector minus the particular solution
[ 0 ], # to the beam's diff.eq:s
[ 0 ],
[ 0 ],
[ 0 ],
[ 0 ],
[-qx*L**2/(2*EA) ],
[ qy*L**4/(24*EIz)],
[ qz*L**4/(24*EIy)],
[-qw*L**2/(2*GKv) ],
[-qz*L**3/(6*EIy) ],
[ qy*L**3/(6*EIz) ]
])
C = np.mat([
[ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[ 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0],
[ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[ L, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[ 0, 0, L**3, L**2, L, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, L**3, L**2, L, 1, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, L, 1],
[ 0, 0, 0, 0, 0, 0,-3*L**2,-2*L, -1, 0, 0, 0],
[ 0, 0, 3*L**2, 2*L, 1, 0, 0, 0, 0, 0, 0, 0],
])
m = np.linalg.inv(C)*u
eci = np.zeros((ne,1))
es = np.zeros((ne,6))
edi = np.zeros((ne,4))
for i in np.arange(ne):
x = i*L/(ne-1)
eci[i,0] = x
es[i,:] = (np.mat([
[ EA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0,-6*EIz, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0,-6*EIy, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, GKv, 0],
[ 0, 0, 0, 0, 0, 0,-6*EIy*x,-2*EIy, 0, 0, 0, 0],
[ 0, 0, 6*EIz*x, 2*EIz, 0, 0, 0, 0, 0, 0, 0, 0]
])*m+np.array([-qx*x,-qy*x,-qz*x,-qw*x,-qz*x**2/2,qy*x**2/2]).reshape(6,1)).T
edi[i,:] = (np.mat([
[ x, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, x**3, x**2, x, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, x**3, x**2, x, 1, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, x, 1]
])*m+np.array([-qx*x**2/(2*EA),qy*x**4/(24*EIz),qz*x**4/(24*EIy),-qw*x**2/(2*GKv)]).reshape(4,1)).T
if n == None:
return es
else:
return es,edi,eci
def flw2te(ex,ey,ep,D,eq=None):
"""
Compute element stiffness (conductivity) matrix for a triangular field element.
Parameters:
ex = [x1 x2 x3]
ey = [y1 y2 y3] element coordinates
ep = [t] element thickness
D = [kxx kxy;
kyx kyy] constitutive matrix
eq heat supply per unit volume
Returns:
Ke element 'stiffness' matrix (3 x 3)
fe element load vector (3 x 1)
"""
t=ep[0];
if eq==None:
eq=0.
exm = np.asmatrix(ex)
eym = np.asmatrix(ey)
C=np.asmatrix(np.hstack([np.ones((3,1)),exm.T,eym.T]))
B=np.matrix([
[0.,1.,0.],
[0.,0.,1.]
])*C.I
A=0.5*np.linalg.det(C)
Ke=B.T*D*B*t*A
fe=np.matrix([[1.,1.,1.]]).T*eq*A*t/3
if eq==0.:
return Ke
else:
return Ke, fe
def flw2ts(ex,ey,D,ed):
"""
Compute flows or corresponding quantities in the triangular field element.
Parameters:
ex = [x1 x2 x3]
ey = [y1 y2 y3] element coordinates
D = [kxx kxy
kyx kyy] constitutive matrix
ed =[u1 u2 u3] u1,u2,u3: nodal values
.. .. ..;
Returns:
es=[ qx qy ]
... ..] element flows
et=[ gx gy ]
... ..] element gradients
"""
if len(ex.shape)>1:
qs = np.zeros([ex.shape[0],2])
qt = np.zeros([ex.shape[0],2])
row = 0
for exr, eyr, edr in zip(ex, ey, ed):
exm = np.asmatrix(exr)
eym = np.asmatrix(eyr)
edm = np.asmatrix(edr)
C=np.asmatrix(np.hstack([np.ones((3,1)),exm.T,eym.T]))
B=np.matrix([
[0.,1.,0.],
[0.,0.,1.]
])*C.I
qs[row,:]=(-D*B*edm.T).T
qt[row,:]=(B*edm.T).T
row += 1
return qs, qt
else:
exm = np.asmatrix(ex)
eym = np.asmatrix(ey)
edm = np.asmatrix(ed)
C=np.asmatrix(np.hstack([np.ones((3,1)),exm.T,eym.T]))
B=np.matrix([
[0.,1.,0.],
[0.,0.,1.]
])*C.I
qs=-D*B*edm.T
qt=B*edm.T
return qs.T, qt.T
def flw2qe(ex,ey,ep,D,eq=None):
"""
Compute element stiffness (conductivity) matrix for a triangular field element.
Parameters:
ex = [x1, x2, x3, x4]
ey = [y1, y2, y3, y4] element coordinates
ep = [t] element thickness
D = [[kxx, kxy],
[kyx, kyy]] constitutive matrix
eq heat supply per unit volume
Returns:
Ke element 'stiffness' matrix (4 x 4)
fe element load vector (4 x 1)
"""
xc = sum(ex)/4.
yc = sum(ey)/4.
K = np.zeros((5,5))
f = np.zeros((5,1))
if eq == None:
k1 = flw2te([ex[0],ex[1],xc],[ey[0],ey[1],yc],ep,D)
K = assem(np.array([1,2,5]),K,k1)
k1 = flw2te([ex[1],ex[2],xc],[ey[1],ey[2],yc],ep,D)
K = assem(np.array([2,3,5]),K,k1)
k1 = flw2te([ex[2],ex[3],xc],[ey[2],ey[3],yc],ep,D)
K = assem(np.array([3,4,5]),K,k1)
k1 = flw2te([ex[3],ex[0],xc],[ey[3],ey[0],yc],ep,D)
K = assem(np.array([4,1,5]),K,k1)
else:
k1,f1 = flw2te([ex[0],ex[1],xc],[ey[0],ey[1],yc],ep,D,eq)
K,f = assem(np.array([1,2,5]),K,k1,f,f1)
k1,f1 = flw2te([ex[1],ex[2],xc],[ey[1],ey[2],yc],ep,D,eq)
K,f = assem(np.array([2,3,5]),K,k1,f,f1)
k1,f1 = flw2te([ex[2],ex[3],xc],[ey[2],ey[3],yc],ep,D,eq)
K,f = assem(np.array([3,4,5]),K,k1,f,f1)
k1,f1 = flw2te([ex[3],ex[0],xc],[ey[3],ey[0],yc],ep,D,eq)
K,f = assem(np.array([4,1,5]),K,k1,f,f1)
Ke1,fe1 = statcon(K,f,np.array([5]));
Ke = Ke1
fe = fe1
if eq == None:
return Ke
else:
return Ke,fe
def flw2qs(ex,ey,ep,D,ed,eq=None):
"""
Compute flows or corresponding quantities in the
quadrilateral field element.
Parameters:
ex = [x1, x2, x3, x4]
ey = [y1, y2, y3, y4] element coordinates
ep = [t] element thickness
D = [[kxx, kxy],
[kyx, kyy]] constitutive matrix
ed = [[u1, u2, u3, u4],
[.., .., .., ..]] u1,u2,u3,u4: nodal values
eq heat supply per unit volume
Returns:
es = [[qx, qy],
[.., ..]] element flows
et = [[gx, gy],
[.., ..]] element gradients
"""
K = np.zeros((5,5))
f = np.zeros((5,1))
xm = sum(ex)/4
ym = sum(ey)/4
if eq == None:
q = 0
else:
q = eq
En = np.array([
[1,2,5],
[2,3,5],
[3,4,5],
[4,1,5]
])
ex1 = np.array([ex[0],ex[1],xm])
ey1 = np.array([ey[0],ey[1],ym])
ex2 = np.array([ex[1],ex[2],xm])
ey2 = np.array([ey[1],ey[2],ym])
ex3 = np.array([ex[2],ex[3],xm])
ey3 = np.array([ey[2],ey[3],ym])
ex4 = np.array([ex[3],ex[0],xm])
ey4 = np.array([ey[3],ey[0],ym])
if eq == None:
k1 = flw2te(ex1,ey1,ep,D)
K = assem(En[0],K,k1)
k1 = flw2te(ex2,ey2,ep,D)
K = assem(En[1],K,k1)
k1 = flw2te(ex3,ey3,ep,D)
K = assem(En[2],K,k1)
k1 = flw2te(ex4,ey4,ep,D)
K = assem(En[3],K,k1)
else:
k1,f1 = flw2te(ex1,ey1,ep,D,q)
K,f = assem(En[0],K,k1,f,f1)
k1,f1 = flw2te(ex2,ey2,ep,D,q)
K,f = assem(En[1],K,k1,f,f1)
k1,f1 = flw2te(ex3,ey3,ep,D,q)
K,f = assem(En[2],K,k1,f,f1)
k1,f1 = flw2te(ex4,ey4,ep,D,q)
K,f = assem(En[3],K,k1,f,f1)
if ed.ndim==1:
ed = np.array([ed])
ni,nj = np.shape(ed)
a = np.zeros((5,ni))
for i in range(ni):
a[np.ix_(range(5),[i])],r = np.asarray(solveq(K,f,np.arange(1,5),ed[i]))
s1,t1 = flw2ts(ex1,ey1,D,a[np.ix_(En[0,:]-1,np.arange(ni))].T)
s2,t2 = flw2ts(ex2,ey2,D,a[np.ix_(En[1,:]-1,np.arange(ni))].T)
s3,t3 = flw2ts(ex3,ey3,D,a[np.ix_(En[2,:]-1,np.arange(ni))].T)
s4,t4 = flw2ts(ex4,ey4,D,a[np.ix_(En[3,:]-1,np.arange(ni))].T)
es = (s1+s2+s3+s4)/4.
et = (t1+t2+t3+t4)/4.
return es,et
def flw2i4e(ex,ey,ep,D,eq=None):
"""
Compute element stiffness (conductivity)
matrix for 4 node isoparametric field element
Parameters:
ex = [x1 x2 x3 x4] element coordinates
ey = [y1 y2 y3 y4]
ep = [t ir] thickness and integration rule
D = [[kxx kxy],
[kyx kyy]] constitutive matrix
eq heat supply per unit volume
Returns:
Ke element 'stiffness' matrix (4 x 4)
fe element load vector (4 x 1)
"""
t = ep[0]
ir = ep[1]
ngp = ir*ir
if eq == None:
q = 0
else:
q = eq
if ir == 1:
g1 = 0.0
w1 = 2.0
gp = np.mat([g1,g1])
w = np.mat([w1,w1])
elif ir == 2:
g1 = 0.577350269189626
w1 = 1
gp = np.mat([
[-g1,-g1],
[ g1,-g1],
[-g1, g1],
[ g1, g1]
])
w = np.mat([
[ w1, w1],
[ w1, w1],
[ w1, w1],
[ w1, w1]
])
elif ir == 3:
g1 = 0.774596669241483
g2 = 0.
w1 = 0.555555555555555
w2 = 0.888888888888888
gp = np.mat([
[-g1,-g1],
[-g2,-g1],
[ g1,-g1],
[-g1, g2],
[ g2, g2],
[ g1, g2],
[-g1, g1],
[ g2, g1],
[ g1, g1]
])
w = np.mat([
[ w1, w1],
[ w2, w1],
[ w1, w1],
[ w1, w2],
[ w2, w2],
[ w1, w2],
[ w1, w1],
[ w2, w1],
[ w1, w1]
])
else:
cfinfo("Used number of integration points not implemented")
wp = np.multiply(w[:,0],w[:,1])
xsi = gp[:,0]
eta = gp[:,1]
r2 = ngp*2
N = np.multiply((1-xsi),(1-eta))/4.
N = np.append(N,np.multiply((1+xsi),(1-eta))/4.,axis=1)
N = np.append(N,np.multiply((1+xsi),(1+eta))/4.,axis=1)
N = np.append(N,np.multiply((1-xsi),(1+eta))/4.,axis=1)
dNr = np.mat(np.zeros((r2,4)))
dNr[0:r2:2,0] = -(1-eta)/4.
dNr[0:r2:2,1] = (1-eta)/4.
dNr[0:r2:2,2] = (1+eta)/4.
dNr[0:r2:2,3] = -(1+eta)/4.
dNr[1:r2+1:2,0] = -(1-xsi)/4.
dNr[1:r2+1:2,1] = -(1+xsi)/4.
dNr[1:r2+1:2,2] = (1+xsi)/4.
dNr[1:r2+1:2,3] = (1-xsi)/4.
Ke1 = np.mat(np.zeros((4,4)))
fe1 = np.mat(np.zeros((4,1)))
JT = dNr*np.mat([ex,ey]).T
for i in range(ngp):
indx = np.array([2*(i+1)-1,2*(i+1)])
detJ = np.linalg.det(JT[indx-1,:])
if detJ < 10*np.finfo(float).eps:
cfinfo("Jacobi determinant == 0")
JTinv = np.linalg.inv(JT[indx-1,:])
B = JTinv*dNr[indx-1,:]
Ke1 = Ke1+B.T*D*B*detJ*np.asscalar(wp[i])
fe1 = fe1+N[i,:].T*detJ*wp[i]
if eq == None:
return Ke1*t
else:
return Ke1*t,fe1*t*eq
def flw2i4s(ex,ey,ep,D,ed):
"""
Compute flows or corresponding quantities in the
4 node isoparametric element.
Parameters:
ex = [x1 x2 x3 x4] element coordinates
ey = [y1 y2 y3 y4]
ep = [t ir] thickness and integration rule
D = [[kxx kxy],
[kyx kyy]] constitutive matrix
ed = [u1, u2, u3, u4] u1,u2,u3,u4: nodal values
Returns:
es = [[qx, qy],
[.., ..]] element flows
et = [[qx, qy],
[... ..]] element gradients
eci=[[ix1, iy1], Gauss point location vector
[... ...], nint: number of integration points
[ix(nint), iy(nint)]
"""
t = ep[0]
ir = ep[1]
ngp = ir*ir
if ir == 1:
g1 = 0.0
w1 = 2.0
gp = np.mat([g1,g1])
w = np.mat([w1,w1])
elif ir == 2:
g1 = 0.577350269189626
w1 = 1
gp = np.mat([
[-g1,-g1],
[ g1,-g1],
[-g1, g1],
[ g1, g1]
])
w = np.mat([
[ w1, w1],
[ w1, w1],
[ w1, w1],
[ w1, w1]
])
elif ir == 3:
g1 = 0.774596669241483
g2 = 0.
w1 = 0.555555555555555
w2 = 0.888888888888888
gp = np.mat([
[-g1,-g1],
[-g2,-g1],
[ g1,-g1],
[-g1, g2],
[ g2, g2],
[ g1, g2],
[-g1, g1],
[ g2, g1],
[ g1, g1]
])
w = np.mat([
[ w1, w1],
[ w2, w1],
[ w1, w1],
[ w1, w2],
[ w2, w2],
[ w1, w2],
[ w1, w1],
[ w2, w1],
[ w1, w1]
])
else:
cfinfo("Used number of integration points not implemented")
wp = np.multiply(w[:,0],w[:,1])
xsi = gp[:,0]
eta = gp[:,1]
r2 = ngp*2
N = np.multiply((1-xsi),(1-eta))/4.
N = np.append(N,np.multiply((1+xsi),(1-eta))/4.,axis=1)
N = np.append(N,np.multiply((1+xsi),(1+eta))/4.,axis=1)
N = np.append(N,np.multiply((1-xsi),(1+eta))/4.,axis=1)
dNr = np.mat(np.zeros((r2,4)))
dNr[0:r2:2,0] = -(1-eta)/4.
dNr[0:r2:2,1] = (1-eta)/4.
dNr[0:r2:2,2] = (1+eta)/4.
dNr[0:r2:2,3] = -(1+eta)/4.
dNr[1:r2+1:2,0] = -(1-xsi)/4.
dNr[1:r2+1:2,1] = -(1+xsi)/4.
dNr[1:r2+1:2,2] = (1+xsi)/4.
dNr[1:r2+1:2,3] = (1-xsi)/4.
eci = N*np.mat([ex,ey]).T
if ed.ndim == 1:
ed = np.array([ed])
red,ced = np.shape(ed)
JT = dNr*np.mat([ex,ey]).T
es = np.mat(np.zeros((ngp*red,2)))
et = np.mat(np.zeros((ngp*red,2)))
for i in range(ngp):
indx = np.array([2*(i+1)-1,2*(i+1)])
detJ = np.linalg.det(JT[indx-1,:])
if detJ < 10*np.finfo(float).eps:
cfinfo("Jacobi determinatn == 0")
JTinv = np.linalg.inv(JT[indx-1,:])
B = JTinv*dNr[indx-1,:]
p1 = -D*B*ed.T
p2 = B*ed.T
es[i:ngp*red:ngp,:] = p1.T
et[i:ngp*red:ngp,:] = p2.T
return es,et,eci
def flw2i8e(ex,ey,ep,D,eq=None):
"""
Compute element stiffness (conductivity)
matrix for 8 node isoparametric field element.
Parameters:
ex = [x1, ..., x8] element coordinates
ey = [y1, ..., y8]
ep = [t, ir] thickness and integration rule
D = [[kxx, kxy],
[kyx, kyy]] constitutive matrix
eq heat supply per unit volume
Returns:
Ke element 'stiffness' matrix (8 x 8)
fe element load vector (8 x 1)
"""
t = ep[0]
ir = ep[1]
ngp = ir*ir
if eq == None:
q = 0
else:
q = eq
if ir == 1:
g1 = 0.0
w1 = 2.0
gp = np.mat([g1,g1])
w = np.mat([w1,w1])
elif ir == 2:
g1 = 0.577350269189626
w1 = 1
gp = np.mat([
[-g1,-g1],
[ g1,-g1],
[-g1, g1],
[ g1, g1]
])
w = np.mat([
[ w1, w1],
[ w1, w1],
[ w1, w1],
[ w1, w1]
])
elif ir == 3:
g1 = 0.774596669241483
g2 = 0.
w1 = 0.555555555555555
w2 = 0.888888888888888
gp = np.mat([
[-g1,-g1],
[-g2,-g1],
[ g1,-g1],
[-g1, g2],
[ g2, g2],
[ g1, g2],
[-g1, g1],
[ g2, g1],
[ g1, g1]
])
w = np.mat([
[ w1, w1],
[ w2, w1],
[ w1, w1],
[ w1, w2],
[ w2, w2],
[ w1, w2],
[ w1, w1],
[ w2, w1],
[ w1, w1]
])
else:
cfinfo("Used number of integration points not implemented")
wp = np.multiply(w[:,0],w[:,1])
xsi = gp[:,0]
eta = gp[:,1]
r2 = ngp*2
N = np.multiply(np.multiply(-(1-xsi),(1-eta)),(1+xsi+eta))/4.
N = np.append(N,np.multiply(np.multiply(-(1+xsi),(1-eta)),(1-xsi+eta))/4.,axis=1)
N = np.append(N,np.multiply(np.multiply(-(1+xsi),(1+eta)),(1-xsi-eta))/4.,axis=1)
N = np.append(N,np.multiply(np.multiply(-(1-xsi),(1+eta)),(1+xsi-eta))/4.,axis=1)
N = np.append(N,np.multiply((1-np.multiply(xsi,xsi)),(1-eta))/2.,axis=1)
N = np.append(N,np.multiply((1+xsi),(1-np.multiply(eta,eta)))/2.,axis=1)
N = np.append(N,np.multiply((1-np.multiply(xsi,xsi)),(1+eta))/2.,axis=1)
N = np.append(N,np.multiply((1-xsi),(1-np.multiply(eta,eta)))/2.,axis=1)
dNr = np.mat(np.zeros((r2,8)))
dNr[0:r2:2,0] = -(-np.multiply((1-eta),(1+xsi+eta))+np.multiply((1-xsi),(1-eta)))/4.
dNr[0:r2:2,1] = -(np.multiply((1-eta),(1-xsi+eta))-np.multiply((1+xsi),(1-eta)))/4.
dNr[0:r2:2,2] = -(np.multiply((1+eta),(1-xsi-eta))-np.multiply((1+xsi),(1+eta)))/4.
dNr[0:r2:2,3] = -(-np.multiply((1+eta),(1+xsi-eta))+np.multiply((1-xsi),(1+eta)))/4.
dNr[0:r2:2,4] = -np.multiply(xsi,(1-eta))
dNr[0:r2:2,5] = (1-np.multiply(eta,eta))/2.
dNr[0:r2:2,6] = -np.multiply(xsi,(1+eta))
dNr[0:r2:2,7] = -(1-np.multiply(eta,eta))/2.
dNr[1:r2+1:2,0] = -(-np.multiply((1-xsi),(1+xsi+eta))+np.multiply((1-xsi),(1-eta)))/4.
dNr[1:r2+1:2,1] = -(-np.multiply((1+xsi),(1-xsi+eta))+np.multiply((1+xsi),(1-eta)))/4.
dNr[1:r2+1:2,2] = -(np.multiply((1+xsi),(1-xsi-eta))-np.multiply((1+xsi),(1+eta)))/4.
dNr[1:r2+1:2,3] = -(np.multiply((1-xsi),(1+xsi-eta))-np.multiply((1-xsi),(1+eta)))/4.
dNr[1:r2+1:2,4] = -(1-np.multiply(xsi,xsi))/2.
dNr[1:r2+1:2,5] = -np.multiply(eta,(1+xsi))
dNr[1:r2+1:2,6] = (1-np.multiply(xsi,xsi))/2.
dNr[1:r2+1:2,7] = -np.multiply(eta,(1-xsi))
Ke1 = np.mat(np.zeros((8,8)))
fe1 = np.mat(np.zeros((8,1)))
JT = dNr*np.mat([ex,ey]).T
for i in range(ngp):
indx = np.array([2*(i+1)-1,2*(i+1)])
detJ = np.linalg.det(JT[indx-1,:])
if detJ < 10*np.finfo(float).eps:
cfinfo("Jacobideterminanten lika med noll!")
JTinv = np.linalg.inv(JT[indx-1,:])
B = JTinv*dNr[indx-1,:]
Ke1 = Ke1+B.T*D*B*detJ*np.asscalar(wp[i])
fe1 = fe1+N[i,:].T*detJ*wp[i]
if eq != None:
return Ke1*t,fe1*t*q
else:
return Ke1*t
def flw2i8s(ex,ey,ep,D,ed):
"""
Compute flows or corresponding quantities in the
8 node isoparametric element.
Parameters:
ex = [x1,x2,x3....,x8] element coordinates
ey = [y1,y2,y3....,y8]
ep = [t,ir] thickness and integration rule
D = [[kxx,kxy],
[kyx,kyy]] constitutive matrix
ed = [u1,....,u8] u1,....,u8: nodal values
Returns:
es = [[qx,qy],
[..,..]] element flows
et = [[qx,qy],
[..,..]] element gradients
eci=[[ix1,iy1], Gauss point location vector
[...,...], nint: number of integration points
[ix(nint),iy(nint)]]
"""
t = ep[0]
ir = ep[1]
ngp = ir*ir
if ir == 1:
g1 = 0.0
w1 = 2.0
gp = np.mat([g1,g1])
w = np.mat([w1,w1])
elif ir == 2:
g1 = 0.577350269189626
w1 = 1
gp = np.mat([
[-g1,-g1],
[ g1,-g1],
[-g1, g1],
[ g1, g1]
])
w = np.mat([
[ w1, w1],
[ w1, w1],
[ w1, w1],
[ w1, w1]
])
elif ir == 3:
g1 = 0.774596669241483
g2 = 0.
w1 = 0.555555555555555
w2 = 0.888888888888888
gp = np.mat([
[-g1,-g1],
[-g2,-g1],
[ g1,-g1],
[-g1, g2],
[ g2, g2],
[ g1, g2],
[-g1, g1],
[ g2, g1],
[ g1, g1]
])
w = np.mat([
[ w1, w1],
[ w2, w1],
[ w1, w1],
[ w1, w2],
[ w2, w2],
[ w1, w2],
[ w1, w1],
[ w2, w1],
[ w1, w1]
])
else:
cfinfo("Used number of integration points not implemented")
wp = np.multiply(w[:,0],w[:,1])
xsi = gp[:,0]
eta = gp[:,1]
r2 = ngp*2
N = np.multiply(np.multiply(-(1-xsi),(1-eta)),(1+xsi+eta))/4.
N = np.append(N,np.multiply(np.multiply(-(1+xsi),(1-eta)),(1-xsi+eta))/4.,axis=1)
N = np.append(N,np.multiply(np.multiply(-(1+xsi),(1+eta)),(1-xsi-eta))/4.,axis=1)
N = np.append(N,np.multiply(np.multiply(-(1-xsi),(1+eta)),(1+xsi-eta))/4.,axis=1)
N = np.append(N,np.multiply((1-np.multiply(xsi,xsi)),(1-eta))/2.,axis=1)
N = np.append(N,np.multiply((1+xsi),(1-np.multiply(eta,eta)))/2.,axis=1)
N = np.append(N,np.multiply((1-np.multiply(xsi,xsi)),(1+eta))/2.,axis=1)
N = np.append(N,np.multiply((1-xsi),(1-np.multiply(eta,eta)))/2.,axis=1)
dNr = np.mat(np.zeros((r2,8)))
dNr[0:r2:2,0] = -(-np.multiply((1-eta),(1+xsi+eta))+np.multiply((1-xsi),(1-eta)))/4.
dNr[0:r2:2,1] = -(np.multiply((1-eta),(1-xsi+eta))-np.multiply((1+xsi),(1-eta)))/4.
dNr[0:r2:2,2] = -(np.multiply((1+eta),(1-xsi-eta))-np.multiply((1+xsi),(1+eta)))/4.
dNr[0:r2:2,3] = -(-np.multiply((1+eta),(1+xsi-eta))+np.multiply((1-xsi),(1+eta)))/4.
dNr[0:r2:2,4] = -np.multiply(xsi,(1-eta))
dNr[0:r2:2,5] = (1-np.multiply(eta,eta))/2.
dNr[0:r2:2,6] = -np.multiply(xsi,(1+eta))
dNr[0:r2:2,7] = -(1-np.multiply(eta,eta))/2.
dNr[1:r2+1:2,0] = -(-np.multiply((1-xsi),(1+xsi+eta))+np.multiply((1-xsi),(1-eta)))/4.
dNr[1:r2+1:2,1] = -(-np.multiply((1+xsi),(1-xsi+eta))+np.multiply((1+xsi),(1-eta)))/4.
dNr[1:r2+1:2,2] = -(np.multiply((1+xsi),(1-xsi-eta))-np.multiply((1+xsi),(1+eta)))/4.
dNr[1:r2+1:2,3] = -(np.multiply((1-xsi),(1+xsi-eta))-np.multiply((1-xsi),(1+eta)))/4.
dNr[1:r2+1:2,4] = -(1-np.multiply(xsi,xsi))/2.
dNr[1:r2+1:2,5] = -np.multiply(eta,(1+xsi))
dNr[1:r2+1:2,6] = (1-np.multiply(xsi,xsi))/2.
dNr[1:r2+1:2,7] = -np.multiply(eta,(1-xsi))
eci = N*np.mat([ex,ey]).T
if ed.ndim == 1:
ed = np.array([ed])
red,ced = np.shape(ed)
JT = dNr*np.mat([ex,ey]).T
es = np.mat(np.zeros((ngp*red,2)))
et = np.mat(np.zeros((ngp*red,2)))
for i in range(ngp):
indx = np.array([2*(i+1)-1,2*(i+1)])
detJ = np.linalg.det(JT[indx-1,:])
if detJ < 10*np.finfo(float).eps:
cfinfo("Jacobi determinant == 0")
JTinv = np.linalg.inv(JT[indx-1,:])
B = JTinv*dNr[indx-1,:]
p1 = -D*B*ed.T
p2 = B*ed.T
es[i:ngp*red:ngp,:] = p1.T
et[i:ngp*red:ngp,:] = p2.T
return es,et,eci
def flw3i8e(ex,ey,ez,ep,D,eq=None):
"""
Compute element stiffness (conductivity)
matrix for 8 node isoparametric field element.
Parameters:
ex = [x1,x2,x3,...,x8]
ey = [y1,y2,y3,...,y8] element coordinates
ez = [z1,z2,z3,...,z8]
ep = [ir] Ir: Integration rule
D = [[kxx,kxy,kxz],
[kyx,kyy,kyz],
[kzx,kzy,kzz]] constitutive matrix
eq heat supply per unit volume
Output:
Ke element 'stiffness' matrix (8 x 8)
fe element load vector (8 x 1)
"""
ir = ep[0]
ngp = ir*ir*ir
if eq == None:
q = 0
else:
q = eq
if ir == 2:
g1 = 0.577350269189626
w1 = 1
gp = np.mat([
[-1,-1,-1],
[ 1,-1,-1],
[ 1, 1,-1],
[-1, 1,-1],
[-1,-1, 1],
[ 1,-1, 1],
[ 1, 1, 1],
[-1, 1, 1]
])*g1
w = np.mat(np.ones((8,3)))*w1
elif ir == 3:
g1 = 0.774596669241483
g2 = 0.
w1 = 0.555555555555555
w2 = 0.888888888888888
gp = np.mat(np.zeros((27,3)))
w = np.mat(np.zeros((27,3)))
I1 = np.array([-1,0,1,-1,0,1,-1,0,1])
I2 = np.array([0,-1,0,0,1,0,0,1,0])
gp[:,0] = np.mat([I1,I1,I1]).reshape(27,1)*g1
gp[:,0] = np.mat([I2,I2,I2]).reshape(27,1)*g2+gp[:,0]
I1 = abs(I1)
I2 = abs(I2)
w[:,0] = np.mat([I1,I1,I1]).reshape(27,1)*w1
w[:,0] = np.mat([I2,I2,I2]).reshape(27,1)*w2+w[:,0]
I1 = np.array([-1,-1,-1,0,0,0,1,1,1])
I2 = np.array([0,0,0,1,1,1,0,0,0])
gp[:,1] = np.mat([I1,I1,I1]).reshape(27,1)*g1
gp[:,1] = np.mat([I2,I2,I2]).reshape(27,1)*g2+gp[:,1]
I1 = abs(I1)
I2 = abs(I2)
w[:,1] = np.mat([I1,I1,I1]).reshape(27,1)*w1
w[:,1] = np.mat([I2,I2,I2]).reshape(27,1)*w2+w[:,1]
I1 = np.array([-1,-1,-1,-1,-1,-1,-1,-1,-1])
I2 = np.array([0,0,0,0,0,0,0,0,0])
I3 = abs(I1)
gp[:,2] = np.mat([I1,I2,I3]).reshape(27,1)*g1
gp[:,2] = np.mat([I2,I3,I2]).reshape(27,1)*g2+gp[:,2]
w[:,2] = np.mat([I3,I2,I3]).reshape(27,1)*w1
w[:,2] = np.mat([I2,I3,I2]).reshape(27,1)*w2+w[:,2]
else:
cfinfo("Used number of integration points not implemented")
return
wp = np.multiply(np.multiply(w[:,0],w[:,1]),w[:,2])
xsi = gp[:,0]
eta = gp[:,1]
zet = gp[:,2]
r2 = ngp*3
N = np.multiply(np.multiply((1-xsi),(1-eta)),(1-zet))/8.
N = np.append(N,np.multiply(np.multiply((1+xsi),(1-eta)),(1-zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1+xsi),(1+eta)),(1-zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1-xsi),(1+eta)),(1-zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1-xsi),(1-eta)),(1+zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1+xsi),(1-eta)),(1+zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1+xsi),(1+eta)),(1+zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1-xsi),(1+eta)),(1+zet))/8.,axis=1)
dNr = np.mat(np.zeros((r2,8)))
dNr[0:r2:3,0]= np.multiply(-(1-eta),(1-zet))
dNr[0:r2:3,1]= np.multiply((1-eta),(1-zet))
dNr[0:r2:3,2]= np.multiply((1+eta),(1-zet))
dNr[0:r2:3,3]= np.multiply(-(1+eta),(1-zet))
dNr[0:r2:3,4]= np.multiply(-(1-eta),(1+zet))
dNr[0:r2:3,5]= np.multiply((1-eta),(1+zet))
dNr[0:r2:3,6]= np.multiply((1+eta),(1+zet))
dNr[0:r2:3,7]= np.multiply(-(1+eta),(1+zet))
dNr[1:r2+1:3,0] = np.multiply(-(1-xsi),(1-zet))
dNr[1:r2+1:3,1] = np.multiply(-(1+xsi),(1-zet))
dNr[1:r2+1:3,2] = np.multiply((1+xsi),(1-zet))
dNr[1:r2+1:3,3] = np.multiply((1-xsi),(1-zet))
dNr[1:r2+1:3,4] = np.multiply(-(1-xsi),(1+zet))
dNr[1:r2+1:3,5] = np.multiply(-(1+xsi),(1+zet))
dNr[1:r2+1:3,6] = np.multiply((1+xsi),(1+zet))
dNr[1:r2+1:3,7] = np.multiply((1-xsi),(1+zet))
dNr[2:r2+2:3,0] = np.multiply(-(1-xsi),(1-eta))
dNr[2:r2+2:3,1] = np.multiply(-(1+xsi),(1-eta))
dNr[2:r2+2:3,2] = np.multiply(-(1+xsi),(1+eta))
dNr[2:r2+2:3,3] = np.multiply(-(1-xsi),(1+eta))
dNr[2:r2+2:3,4] = np.multiply((1-xsi),(1-eta))
dNr[2:r2+2:3,5] = np.multiply((1+xsi),(1-eta))
dNr[2:r2+2:3,6] = np.multiply((1+xsi),(1+eta))
dNr[2:r2+2:3,7] = np.multiply((1-xsi),(1+eta))
dNr = dNr/8.
Ke1 = np.mat(np.zeros((8,8)))
fe1 = np.mat(np.zeros((8,1)))
JT = dNr*np.mat([ex,ey,ez]).T
for i in range(ngp):
indx = np.array([3*(i+1)-2,3*(i+1)-1,3*(i+1)])
detJ = np.linalg.det(JT[indx-1,:])
if detJ < 10*np.finfo(float).eps:
cfinfo("Jacobi determinant == 0")
JTinv = np.linalg.inv(JT[indx-1,:])
B = JTinv*dNr[indx-1,:]
Ke1 = Ke1+B.T*D*B*detJ*np.asscalar(wp[i])
fe1 = fe1+N[i,:].T*detJ*wp[i]
if eq != None:
return Ke1,fe1*q
else:
return Ke1
def flw3i8s(ex,ey,ez,ep,D,ed):
"""
Compute flows or corresponding quantities in the
8 node (3-dim) isoparametric field element.
Parameters:
ex = [x1,x2,x3,...,x8]
ey = [y1,y2,y3,...,y8] element coordinates
ez = [z1,z2,z3,...,z8]
ep = [ir] Ir: Integration rule
D = [[kxx,kxy,kxz],
[kyx,kyy,kyz],
[kzx,kzy,kzz]] constitutive matrix
ed = [[u1,....,u8], element nodal values
[..,....,..]]
Output:
es = [[qx,qy,qz],
[..,..,..]] element flows(s)
et = [[qx,qy,qz], element gradients(s)
[..,..,..]]
eci = [[ix1,ix1,iz1], location vector
[...,...,...], nint: number of integration points
[ix(nint),iy(nint),iz(nint)]]
"""
ir = ep[0]
ngp = ir*ir*ir
if ir == 2:
g1 = 0.577350269189626
w1 = 1
gp = np.mat([
[-1,-1,-1],
[ 1,-1,-1],
[ 1, 1,-1],
[-1, 1,-1],
[-1,-1, 1],
[ 1,-1, 1],
[ 1, 1, 1],
[-1, 1, 1]
])*g1
w = np.mat(np.ones((8,3)))*w1
elif ir == 3:
g1 = 0.774596669241483
g2 = 0.
w1 = 0.555555555555555
w2 = 0.888888888888888
gp = np.mat(np.zeros((27,3)))
w = np.mat(np.zeros((27,3)))
I1 = np.array([-1,0,1,-1,0,1,-1,0,1])
I2 = np.array([0,-1,0,0,1,0,0,1,0])
gp[:,0] = np.mat([I1,I1,I1]).reshape(27,1)*g1
gp[:,0] = np.mat([I2,I2,I2]).reshape(27,1)*g2+gp[:,0]
I1 = abs(I1)
I2 = abs(I2)
w[:,0] = np.mat([I1,I1,I1]).reshape(27,1)*w1
w[:,0] = np.mat([I2,I2,I2]).reshape(27,1)*w2+w[:,0]
I1 = np.array([-1,-1,-1,0,0,0,1,1,1])
I2 = np.array([0,0,0,1,1,1,0,0,0])
gp[:,1] = np.mat([I1,I1,I1]).reshape(27,1)*g1
gp[:,1] = np.mat([I2,I2,I2]).reshape(27,1)*g2+gp[:,1]
I1 = abs(I1)
I2 = abs(I2)
w[:,1] = np.mat([I1,I1,I1]).reshape(27,1)*w1
w[:,1] = np.mat([I2,I2,I2]).reshape(27,1)*w2+w[:,1]
I1 = np.array([-1,-1,-1,-1,-1,-1,-1,-1,-1])
I2 = np.array([0,0,0,0,0,0,0,0,0])
I3 = abs(I1)
gp[:,2] = np.mat([I1,I2,I3]).reshape(27,1)*g1
gp[:,2] = np.mat([I2,I3,I2]).reshape(27,1)*g2+gp[:,2]
w[:,2] = np.mat([I3,I2,I3]).reshape(27,1)*w1
w[:,2] = np.mat([I2,I3,I2]).reshape(27,1)*w2+w[:,2]
else:
cfinfo("Used number of integration points not implemented")
return
wp = np.multiply(np.multiply(w[:,0],w[:,1]),w[:,2])
xsi = gp[:,0]
eta = gp[:,1]
zet = gp[:,2]
r2 = ngp*3
N = np.multiply(np.multiply((1-xsi),(1-eta)),(1-zet))/8.
N = np.append(N,np.multiply(np.multiply((1+xsi),(1-eta)),(1-zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1+xsi),(1+eta)),(1-zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1-xsi),(1+eta)),(1-zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1-xsi),(1-eta)),(1+zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1+xsi),(1-eta)),(1+zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1+xsi),(1+eta)),(1+zet))/8.,axis=1)
N = np.append(N,np.multiply(np.multiply((1-xsi),(1+eta)),(1+zet))/8.,axis=1)
dNr = np.mat(np.zeros((r2,8)))
dNr[0:r2:3,0]= np.multiply(-(1-eta),(1-zet))
dNr[0:r2:3,1]= np.multiply((1-eta),(1-zet))
dNr[0:r2:3,2]= np.multiply((1+eta),(1-zet))
dNr[0:r2:3,3]= np.multiply(-(1+eta),(1-zet))
dNr[0:r2:3,4]= np.multiply(-(1-eta),(1+zet))
dNr[0:r2:3,5]= np.multiply((1-eta),(1+zet))
dNr[0:r2:3,6]= np.multiply((1+eta),(1+zet))
dNr[0:r2:3,7]= np.multiply(-(1+eta),(1+zet))
dNr[1:r2+1:3,0] = np.multiply(-(1-xsi),(1-zet))
dNr[1:r2+1:3,1] = np.multiply(-(1+xsi),(1-zet))
dNr[1:r2+1:3,2] = np.multiply((1+xsi),(1-zet))
dNr[1:r2+1:3,3] = np.multiply((1-xsi),(1-zet))
dNr[1:r2+1:3,4] = np.multiply(-(1-xsi),(1+zet))
dNr[1:r2+1:3,5] = np.multiply(-(1+xsi),(1+zet))
dNr[1:r2+1:3,6] = np.multiply((1+xsi),(1+zet))
dNr[1:r2+1:3,7] = np.multiply((1-xsi),(1+zet))
dNr[2:r2+2:3,0] = np.multiply(-(1-xsi),(1-eta))
dNr[2:r2+2:3,1] = np.multiply(-(1+xsi),(1-eta))
dNr[2:r2+2:3,2] = np.multiply(-(1+xsi),(1+eta))
dNr[2:r2+2:3,3] = np.multiply(-(1-xsi),(1+eta))
dNr[2:r2+2:3,4] = np.multiply((1-xsi),(1-eta))
dNr[2:r2+2:3,5] = np.multiply((1+xsi),(1-eta))
dNr[2:r2+2:3,6] = np.multiply((1+xsi),(1+eta))
dNr[2:r2+2:3,7] = np.multiply((1-xsi),(1+eta))
dNr = dNr/8.
eci = N*np.mat([ex,ey,ez]).T
if ed.ndim == 1:
ed = np.array([ed])
red,ced = np.shape(ed)
JT = dNr*np.mat([ex,ey,ez]).T
es = np.mat(np.zeros((ngp*red,3)))
et = np.mat(np.zeros((ngp*red,3)))
for i in range(ngp):
indx = np.array([3*(i+1)-2,3*(i+1)-1,3*(i+1)])
detJ = np.linalg.det(JT[indx-1,:])
if detJ < 10*np.finfo(float).eps:
cfinfo("Jacobideterminanten lika med noll!")
JTinv = np.linalg.inv(JT[indx-1,:])
B = JTinv*dNr[indx-1,:]
p1 = -D*B*ed.T
p2 = B*ed.T
es[i:ngp*red:ngp,:] = p1.T
et[i:ngp*red:ngp,:] = p2.T
return es,et,eci
def plante(ex,ey,ep,D,eq=None):
"""
Calculate the stiffness matrix for a triangular plane stress or plane strain element.
Parameters:
ex = [x1,x2,x3] element coordinates
ey = [y1,y2,y3]
ep = [ptype,t] ptype: analysis type
t: thickness
D constitutive matrix
eq = [[bx], bx: body force x-dir
[by]] by: body force y-dir
Returns:
Ke element stiffness matrix (6 x 6)
fe equivalent nodal forces (6 x 1) (if eq is given)
"""
ptype,t = ep
bx = 0.0
by = 0.0
if not eq is None:
bx = eq[0]
by = eq[1]
C = np.mat([
[1, ex[0], ey[0], 0, 0, 0],
[0, 0, 0, 1, ex[0], ey[0]],
[1, ex[1], ey[1], 0, 0, 0],
[0, 0, 0, 1, ex[1], ey[1]],
[1, ex[2], ey[2], 0, 0, 0],
[0, 0, 0, 1, ex[2], ey[2]]
])
A = 0.5*np.linalg.det(np.mat([
[1, ex[0], ey[0]],
[1, ex[1], ey[1]],
[1, ex[2], ey[2]]
]))
# --------- plane stress --------------------------------------
if ptype == 1:
B = np.mat([
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 0]
])*np.linalg.inv(C)
colD = D.shape[1]
if colD > 3:
Cm = np.linalg.inv(D)
Dm = np.linalg.inv(Cm[np.ix_((0,1,3),(0,1,3))])
else:
Dm = D
Ke = B.T*Dm*B*A*t
fe = A/3*np.mat([bx,by,bx,by,bx,by]).T*t
if eq is None:
return Ke
else:
return Ke,fe.T
#--------- plane strain --------------------------------------
elif ptype == 2:
B = np.mat([
[0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1,],
[0, 0, 1, 0, 1, 0,]
])*np.linalg.inv(C)
colD = D.shape[1]
if colD > 3:
Dm = D[np.ix_((0,1,3),(0,1,3))]
else:
Dm = D
Ke = B.T*Dm*B*A*t
fe = A/3*np.mat([bx,by,bx,by,bx,by]).T*t
if eq == None:
return Ke
else:
return Ke,fe.T
else:
cfinfo("Error ! Check first argument, ptype=1 or 2 allowed")
if eq == None:
return None
else:
return None,None
def plants(ex,ey,ep,D,ed):
"""
Calculate element normal and shear stress for a
triangular plane stress or plane strain element.
INPUT: ex = [x1 x2 x3] element coordinates
ey = [y1 y2 y3]
ep = [ptype t ] ptype: analysis type
t: thickness
D constitutive matrix
ed =[u1 u2 ...u6 element displacement vector
...... ] one row for each element
OUTPUT: es = [ sigx sigy [sigz] tauxy element stress matrix
...... ] one row for each element
et = [ epsx epsy [epsz] gamxy element strain matrix
...... ] one row for each element
"""
ptype=ep[0]
if np.ndim(ex) == 1:
ex = np.array([ex])
if np.ndim(ey) == 1:
ey = np.array([ey])
if np.ndim(ed) == 1:
ed = np.array([ed])
rowed=ed.shape[0]
rowex=ex.shape[0]
# --------- plane stress --------------------------------------
if ptype==1:
colD = D.shape[1]
if colD>3:
Cm = np.linalg.inv(D)
Dm = np.linalg.inv(Cm[np.ix_((0,1,3),(0,1,3))])
else:
Dm = D
incie=0
if rowex==1:
incie=0
else:
incie=1
et=np.zeros([rowed,colD])
es=np.zeros([rowed,colD])
ie=0
for i in range(rowed):
C = np.matrix(
[[1, ex[ie,0], ey[ie,0], 0, 0, 0 ],
[0, 0, 0, 1, ex[ie,0], ey[ie,0] ],
[1, ex[ie,1], ey[ie,1], 0, 0, 0 ],
[0, 0, 0, 1, ex[ie,1], ey[ie,1] ],
[1, ex[ie,2], ey[ie,2], 0, 0, 0 ],
[0, 0, 0, 1, ex[ie,2], ey[ie,2] ]]
)
B = np.matrix([
[0,1,0,0,0,0],
[0,0,0,0,0,1],
[0,0,1,0,1,0]])*np.linalg.inv(C)
ee=B*np.asmatrix(ed[ie,:]).T
if colD>3:
ss=np.zeros([colD,1])
ss[[0,1,3]]=Dm*ee
ee=Cm*ss
else:
ss=Dm*ee
et[ie,:] = ee.T
es[ie,:] = ss.T
ie = ie + incie
return es, et
# --------- plane strain --------------------------------------
elif ptype == 2: #Implementation by LAPM
colD = D.shape[1]
incie=0
if rowex==1:
incie=0
else:
incie=1
et=np.zeros([rowed,colD])
es=np.zeros([rowed,colD])
ie=0
ee=np.zeros([colD,1])
for i in range(rowed):
C = np.matrix(
[[1, ex[ie,0], ey[ie,0], 0, 0, 0 ],
[0, 0, 0, 1, ex[ie,0], ey[ie,0] ],
[1, ex[ie,1], ey[ie,1], 0, 0, 0 ],
[0, 0, 0, 1, ex[ie,1], ey[ie,1] ],
[1, ex[ie,2], ey[ie,2], 0, 0, 0 ],
[0, 0, 0, 1, ex[ie,2], ey[ie,2] ]]
)
B = np.matrix([
[0,1,0,0,0,0],
[0,0,0,0,0,1],
[0,0,1,0,1,0]])*np.linalg.inv(C)
e=B*np.asmatrix(ed[ie,:]).T
if colD>3:
ee[[0,1,3]]=e
else:
ee=e
et[ie,:] = ee.T
es[ie,:] = (D*ee).T
ie = ie + incie
return es, et
else:
print("Error ! Check first argument, ptype=1 or 2 allowed")
return None
def plantf(ex,ey,ep,es):
"""
Compute internal element force vector in a triangular element
in plane stress or plane strain.
Parameters:
ex = [x1,x2,x3] node coordinates
ey = [y1,y2,y3]
ep = [ptype,t] ptype: analysis type
t: thickness
es = [[sigx,sigy,[sigz],tauxy] element stress matrix
[ ...... ]] one row for each element
OUTPUT:
fe = [[f1],[f2],...,[f8]] internal force vector
"""
ptype,t = ep
colD = es.shape[1]
#--------- plane stress --------------------------------------
if ptype == 1:
C = np.mat([
[ 1, ex[0], ey[0], 0, 0, 0 ],
[ 0, 0, 0, 1, ex[0], ey[0]],
[ 1, ex[1], ey[1], 0, 0, 0 ],
[ 0, 0, 0, 1, ex[1], ey[1]],
[ 1, ex[2], ey[2], 0, 0, 0 ],
[ 0, 0, 0, 1, ex[2], ey[2]]
])
A = 0.5*np.linalg.det(np.mat([
[ 1, ex[0], ey[0]],
[ 1, ex[1], ey[1]],
[ 1, ex[2], ey[2]]
]))
B = np.mat([
[ 0, 1, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 1],
[ 0, 0, 1, 0, 1, 0]
])*np.linalg.inv(C)
if colD > 3:
stress = np.asmatrix(es[np.ix_((0,1,3))])
else:
stress = np.asmatrix(es)
ef = (A*t*B.T*stress.T).T
return np.reshape(np.asarray(ef),6)
#--------- plane strain --------------------------------------
elif ptype == 2:
C = np.mat([
[ 1, ex[0], ey[0], 0, 0, 0 ],
[ 0, 0, 0, 1, ex[0], ey[0]],
[ 1, ex[1], ey[1], 0, 0, 0 ],
[ 0, 0, 0, 1, ex[1], ey[1]],
[ 1, ex[2], ey[2], 0, 0, 0 ],
[ 0, 0, 0, 1, ex[2], ey[2]]
])
A = 0.5*np.linalg.det(np.mat([
[ 1, ex[0], ey[0]],
[ 1, ex[1], ey[1]],
[ 1, ex[2], ey[2]]
]))
B = np.mat([
[ 0, 1, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 1],
[ 0, 0, 1, 0, 1, 0]
])*np.linalg.inv(C)
if colD > 3:
stress = np.asmatrix(es[np.ix_((1,2,4))])
else:
stress = np.asmatrix(es)
ef = (A*t*B.T*stress.T).T
return np.reshape(np.asarray(ef),6)
else:
cfinfo("Error ! Check first argument, ptype=1 or 2 allowed")
return None
def platre(ex,ey,ep,D,eq=None):
"""
Calculate the stiffness matrix for a rectangular plate element.
NOTE! Element sides must be parallel to the coordinate axis.
Parameters:
ex = [x1,x2,x3,x4] element coordinates
ey = [y1,y2,y3,y4]
ep = [t] thicknes
D constitutive matrix for
plane stress
eq = [qz] load/unit area
Returns:
Ke element stiffness matrix (12 x 12)
fe equivalent nodal forces (12 x 1)
"""
Lx = (ex[2]-ex[0]).astype(float)
Ly = (ey[2]-ey[0]).astype(float)
t = ep[0]
D = t**3/12.*D
A1 = Ly/(Lx**3)
A2 = Lx/(Ly**3)
A3 = 1/Lx/Ly
A4 = Ly/(Lx**2)
A5 = Lx/(Ly**2)
A6 = 1/Lx
A7 = 1/Ly
A8 = Ly/Lx
A9 = Lx/Ly
C1 = 4*A1*D[0,0]+4*A2*D[1,1]+2*A3*D[0,1]+5.6*A3*D[2,2]
C2 = -4*A1*D[0,0]+2*A2*D[1,1]-2*A3*D[0,1]-5.6*A3*D[2,2]
C3 = 2*A1*D[0,0]-4*A2*D[1,1]-2*A3*D[0,1]-5.6*A3*D[2,2]
C4 = -2*A1*D[0,0]-2*A2*D[1,1]+2*A3*D[0,1]+5.6*A3*D[2,2]
C5 = 2*A5*D[1,1]+A6*D[0,1]+0.4*A6*D[2,2]
C6 = 2*A4*D[0,0]+A7*D[0,1]+0.4*A7*D[2,2]
C7 = 2*A5*D[1,1]+0.4*A6*D[2,2]
C8 = 2*A4*D[0,0]+0.4*A7*D[2,2]
C9 = A5*D[1,1]-A6*D[0,1]-0.4*A6*D[2,2]
C10 = A4*D[0,0]-A7*D[0,1]-0.4*A7*D[2,2]
C11 = A5*D[1,1]-0.4*A6*D[2,2]
C12 = A4*D[0,0]-0.4*A7*D[2,2]
C13 = 4/3.*A9*D[1,1]+8/15.*A8*D[2,2]
C14 = 4/3.*A8*D[0,0]+8/15.*A9*D[2,2]
C15 = 2/3.*A9*D[1,1]-8/15.*A8*D[2,2]
C16 = 2/3.*A8*D[0,0]-8/15.*A9*D[2,2]
C17 = 2/3.*A9*D[1,1]-2/15.*A8*D[2,2]
C18 = 2/3.*A8*D[0,0]-2/15.*A9*D[2,2]
C19 = 1/3.*A9*D[1,1]+2/15.*A8*D[2,2]
C20 = 1/3.*A8*D[0,0]+2/15.*A9*D[2,2]
C21 = D[0,1]
Keq = np.mat(np.zeros((12,12)))
Keq[0,0:13] = C1,C5,-C6,C2,C9,-C8,C4,C11,-C12,C3,C7,-C10
Keq[1,1:13] = C13,-C21,C9,C15,0,-C11,C19,0,-C7,C17,0
Keq[2,2:13] = C14,C8,0,C18,C12,0,C20,-C10,0,C16
Keq[3,3:13] = C1,C5,C6,C3,C7,C10,C4,C11,C12
Keq[4,4:13] = C13,C21,-C7,C17,0,-C11,C19,0
Keq[5,5:13] = C14,C10,0,C16,-C12,0,C20
Keq[6,6:13] = C1,-C5,C6,C2,-C9,C8
Keq[7,7:13] = C13,-C21,-C9,C15,0
Keq[8,8:13] = C14,-C8,0,C18
Keq[9,9:13] = C1,-C5,-C6
Keq[10,10:13] = C13,C21
Keq[11,11] = C14
Keq = Keq.T+Keq-np.diag(np.diag(Keq))
if eq != None:
q = eq
R1 = q*Lx*Ly/4
R2 = q*Lx*Ly**2/24
R3 = q*Ly*Lx**2/24
feq = np.mat([R1,R2,-R3,R1,R2,R3,R1,-R2,R3,R1,-R2,-R3])
if eq != None:
return Keq,feq
else:
return Keq
def planqe(ex,ey,ep,D,eq=None):
"""
Calculate the stiffness matrix for a quadrilateral
plane stress or plane strain element.
Parameters:
ex=[x1 x2 x3 x4] element coordinates
ey=[y1 y2 y3 y4]
ep = [ptype, t] ptype: analysis type
t: element thickness
D constitutive matrix
eq = [bx; bx: body force in x direction
by] by: body force in y direction
OUTPUT: Ke : element stiffness matrix (8 x 8)
fe : equivalent nodal forces (row array)
"""
K=np.zeros((10,10))
f=np.zeros((10,1))
xm=sum(ex)/4.
ym=sum(ey)/4.
b1 = eq if eq is not None else np.array([[0],[0]])
ke1, fe1 = plante(np.array([ex[0], ex[1], xm]), np.array([ey[0], ey[1], ym]), ep, D, b1)
K, f = assem(np.array([1, 2, 3, 4, 9, 10]), K, ke1, f, fe1)
ke1, fe1 = plante(np.array([ex[1], ex[2], xm]), np.array([ey[1], ey[2], ym]), ep, D, b1)
K, f = assem(np.array([3, 4, 5, 6, 9, 10]), K, ke1, f, fe1)
ke1, fe1 = plante(np.array([ex[2], ex[3], xm]), np.array([ey[2], ey[3], ym]), ep, D, b1)
K, f = assem(np.array([5, 6, 7, 8, 9, 10]), K, ke1, f, fe1)
ke1, fe1 = plante(np.array([ex[3], ex[0], xm]), np.array([ey[3], ey[0], ym]), ep, D, b1)
K, f = assem(np.array([7, 8, 1, 2, 9, 10]), K, ke1, f, fe1)
Ke, fe = statcon(K, f, np.array([[9],[10]]))
if eq == None:
return Ke
else:
return Ke,fe
def planqs(ex,ey,ep,D,ed,eq=None):
"""
Calculate element normal and shear stress for a quadrilateral
plane stress or plane strain element.
Parameters:
ex = [x1 x2 x3 x4] element coordinates
ey = [y1 y2 y3 y4]
ep = [ptype, t] ptype: analysis type
t: thickness
D constitutive matrix
ed = [u1 u2 ..u8] element displacement vector
eq = [[bx] bx: body force in x direction
[by]] by: body force in y direction
OUTPUT: es = [ sigx sigy (sigz) tauxy] element stress array
et = [ epsx epsy (epsz) gamxy] element strain array
"""
if ex.shape != (4,) or ey.shape != (4,) or ed.shape != (8,):
raise ValueError('Error ! PLANQS: only one element at the time (ex, ey, ed must be a row arrays)')
K = np.zeros((10,10))
f = np.zeros((10,1))
xm = sum(ex)/4.
ym = sum(ey)/4.
b1 = eq if eq is not None else np.array([[0],[0]])
ex1 = np.array([ex[0], ex[1], xm])
ey1 = np.array([ey[0], ey[1], ym])
ex2 = np.array([ex[1], ex[2], xm])
ey2 = np.array([ey[1], ey[2], ym])
ex3 = np.array([ex[2], ex[3], xm])
ey3 = np.array([ey[2], ey[3], ym])
ex4 = np.array([ex[3], ex[0], xm])
ey4 = np.array([ey[3], ey[0], ym])
ke1, fe1 = plante(ex1, ey1, ep, D, b1)
K, f = assem(np.array([1, 2, 3, 4, 9, 10]), K, ke1, f, fe1)
ke1,fe1 = plante(ex2, ey2, ep, D, b1)
K, f = assem(np.array([3, 4, 5, 6, 9, 10]), K, ke1, f, fe1)
ke1, fe1 = plante(ex3, ey3, ep, D, b1)
K, f = assem(np.array([5, 6, 7, 8, 9, 10]), K, ke1, f, fe1)
ke1, fe1 = plante(ex4, ey4, ep, D, b1)
K, f = assem(np.array([7, 8, 1, 2, 9, 10]), K, ke1, f, fe1)
A1 = 0.5 * np.linalg.det( np.hstack([np.ones((3,1)), np.mat(ex1).T, np.mat(ey1).T]) )
A2 = 0.5 * np.linalg.det( np.hstack([np.ones((3,1)), np.mat(ex2).T, np.mat(ey2).T]) )
A3 = 0.5 * np.linalg.det( np.hstack([np.ones((3,1)), np.mat(ex3).T, np.mat(ey3).T]) )
A4 = 0.5 * np.linalg.det( np.hstack([np.ones((3,1)), np.mat(ex4).T, np.mat(ey4).T]) )
Atot = A1+A2+A3+A4;
a, _ = solveq(K, f, np.array(range(1,9)), ed)
# ni = ed.shape[0]
# a = np.mat(empty((10,ni)))
# for i in range(ni):
# a[:,i] = solveq(K, f, np.array(range(1,9)), ed[i,:])[0]
# #a = np.hstack([a, solveq(K, f, np.hstack([matrix(range(1,9)).T, ed[i,:].T]) ) ])
s1, t1 = plants(ex1, ey1, ep, D, np.hstack([a[[0, 1, 2, 3, 8, 9], :].T]) );
s2, t2 = plants(ex2, ey2, ep, D, np.hstack([a[[2, 3, 4, 5, 8, 9], :].T]) );
s3, t3 = plants(ex3, ey3, ep, D, np.hstack([a[[4, 5, 6, 7, 8, 9], :].T]) );
s4, t4 = plants(ex4, ey4, ep, D, np.hstack([a[[6, 7, 0, 1, 8, 9], :].T]) );
es = (s1*A1+s2*A2+s3*A3+s4*A4)/Atot;
et = (t1*A1+t2*A2+t3*A3+t4*A4)/Atot;
return es[0], et[0] #[0] because these are 1-by-3 arrays and we want row arrays out.
def plani4e(ex,ey,ep,D,eq=None):
"""
Calculate the stiffness matrix for a 4 node isoparametric
element in plane strain or plane stress.
Parameters:
ex = [x1 ... x4] element coordinates. Row array
ey = [y1 ... y4]
ep =[ptype, t, ir] ptype: analysis type
t : thickness
ir: integration rule
D constitutive matrix
eq = [bx; by] bx: body force in x direction
by: body force in y direction
Any array with 2 elements acceptable
Returns:
Ke : element stiffness matrix (8 x 8)
fe : equivalent nodal forces (8 x 1)
"""
ptype=ep[0]
t=ep[1]
ir=ep[2]
ngp=ir*ir
if eq == None:
q = np.zeros((2,1))
else:
q = np.reshape(eq, (2,1))
#--------- gauss points --------------------------------------
if ir == 1:
g1 = 0.0
w1 = 2.0
gp = np.mat([g1,g1])
w = np.mat([w1,w1])
elif ir == 2:
g1 = 0.577350269189626
w1 = 1
gp = np.mat([
[-g1,-g1],
[ g1,-g1],
[-g1, g1],
[ g1, g1]])
w = np.mat([
[ w1, w1],
[ w1, w1],
[ w1, w1],
[ w1, w1]])
elif ir == 3:
g1 = 0.774596669241483
g2 = 0.
w1 = 0.555555555555555
w2 = 0.888888888888888
gp = np.mat([
[-g1,-g1],
[-g2,-g1],
[ g1,-g1],
[-g1, g2],
[ g2, g2],
[ g1, g2],
[-g1, g1],
[ g2, g1],
[ g1, g1]])
w = np.mat([
[ w1, w1],
[ w2, w1],
[ w1, w1],
[ w1, w2],
[ w2, w2],
[ w1, w2],
[ w1, w1],
[ w2, w1],
[ w1, w1]])
else:
cfinfo("Used number of integrat ion points not implemented")
wp = np.multiply(w[:,0],w[:,1])
xsi = gp[:,0]
eta = gp[:,1]
r2 = ngp*2
# Shape Functions
N = np.multiply((1-xsi),(1-eta))/4.
N = np.append(N,np.multiply((1+xsi),(1-eta))/4.,axis=1)
N = np.append(N,np.multiply((1+xsi),(1+eta))/4.,axis=1)
N = np.append(N,np.multiply((1-xsi),(1+eta))/4.,axis=1)
dNr = np.mat(np.zeros((r2,4)))
dNr[0:r2:2,0] = -(1-eta)/4.
dNr[0:r2:2,1] = (1-eta)/4.
dNr[0:r2:2,2] = (1+eta)/4.
dNr[0:r2:2,3] = -(1+eta)/4.
dNr[1:r2+1:2,0] = -(1-xsi)/4.
dNr[1:r2+1:2,1] = -(1+xsi)/4.
dNr[1:r2+1:2,2] = (1+xsi)/4.
dNr[1:r2+1:2,3] = (1-xsi)/4.
#
Ke1 = np.mat(np.zeros((8,8)))
fe1 = np.mat(np.zeros((8,1)))
JT = dNr*np.mat([ex,ey]).T
# --------- plane stress --------------------------------------
if ptype==1:
colD=np.shape(D)[0]
if colD>3:
Cm=np.linalg.inv(D)
Dm=np.linalg.inv(Cm[ np.ix_([0,1,3],[0,1,3]) ])
else:
Dm=D
#
B=np.matrix(np.zeros((3,8)))
N2=np.matrix(np.zeros((2,8)))
for i in range(ngp):
indx = np.array([2*(i+1)-1,2*(i+1)])
detJ = np.linalg.det(JT[indx-1,:])
if detJ < 10*np.finfo(float).eps:
cfinfo("Jacobi determinant equal or less than zero!")
JTinv = np.linalg.inv(JT[indx-1,:])
dNx=JTinv*dNr[indx-1,:]
#
index_array_even=np.array([0,2,4,6])
index_array_odd=np.array([1,3,5,7])
#
counter=0
for index in index_array_even:
B[0,index] = dNx[0,counter]
B[2,index] = dNx[1,counter]
N2[0,index]=N[i,counter]
counter=counter+1
#
counter=0
for index in index_array_odd:
B[1,index] = dNx[1,counter]
B[2,index] = dNx[0,counter]
N2[1,index] =N[i,counter]
counter=counter+1
#
Ke1 = Ke1+B.T*Dm*B*detJ*np.asscalar(wp[i])*t
fe1 = fe1 + N2.T * q * detJ * np.asscalar(wp[i]) * t
return Ke1,fe1
#--------- plane strain --------------------------------------
elif ptype==2:
#
colD=np.shape(D)[0]
if colD>3:
Dm = D[np.ix_([0,1,3],[0,1,3])]
else:
Dm = D
#
B=np.matrix(np.zeros((3,8)))
N2=np.matrix(np.zeros((2,8)))
for i in range(ngp):
indx = np.array([2*(i+1)-1,2*(i+1)])
detJ = np.linalg.det(JT[indx-1,:])
if detJ < 10*np.finfo(float).eps:
cfinfo("Jacobideterminant equal or less than zero!")
JTinv = np.linalg.inv(JT[indx-1,:])
dNx=JTinv*dNr[indx-1,:]
#
index_array_even=np.array([0,2,4,6])
index_array_odd=np.array([1,3,5,7])
#
counter=0
for index in index_array_even:
#
B[0,index] = dNx[0,counter]
B[2,index] = dNx[1,counter]
N2[0,index]=N[i,counter]
#
counter=counter+1
#
counter=0
for index in index_array_odd:
B[1,index] = dNx[1,counter]
B[2,index] = dNx[0,counter]
N2[1,index] =N[i,counter]
counter=counter+1
#
Ke1 = Ke1 + B.T * Dm * B * detJ * np.asscalar(wp[i]) * t
fe1 = fe1+N2.T*q*detJ*np.asscalar(wp[i])*t
return Ke1,fe1
else:
cfinfo("Error ! Check first argument, ptype=1 or 2 allowed")
def assem(edof,K,Ke,f=None,fe=None):
"""
Assemble element matrices Ke ( and fe ) into the global
stiffness matrix K ( and the global force vector f )
according to the topology matrix edof.
Parameters:
edof dof topology array
K the global stiffness matrix
Ke element stiffness matrix
f the global force vector
fe element force vector
Output parameters:
K the new global stiffness matrix
f the new global force vector
fe element force vector
"""
if edof.ndim == 1:
idx = edof-1
K[np.ix_(idx,idx)] = K[np.ix_(idx,idx)] + Ke
if (not f is None) and (not fe is None):
f[np.ix_(idx)] = f[np.ix_(idx)] + fe
else:
for row in edof:
idx = row-1
K[np.ix_(idx,idx)] = K[np.ix_(idx,idx)] + Ke
if (not f is None) and (not fe is None):
f[np.ix_(idx)] = f[np.ix_(idx)] + fe
if f is None:
return K
else:
return K,f
def solveq(K,f,bcPrescr,bcVal=None):
"""
Solve static FE-equations considering boundary conditions.
Parameters:
K global stiffness matrix, dim(K)= nd x nd
f global load vector, dim(f)= nd x 1
bcPrescr 1-dim integer array containing prescribed dofs.
bcVal 1-dim float array containing prescribed values.
If not given all prescribed dofs are assumed 0.
Returns:
a solution including boundary values
Q reaction force vector
dim(a)=dim(Q)= nd x 1, nd : number of dof's
"""
nDofs = K.shape[0]
nPdofs = bcPrescr.shape[0]
if bcVal is None:
bcVal = np.zeros([nPdofs],'d')
bc = np.ones(nDofs, 'bool')
bcDofs = np.arange(nDofs)
bc[np.ix_(bcPrescr-1)] = False
bcDofs = bcDofs[bc]
fsys = f[bcDofs]-K[np.ix_((bcDofs),(bcPrescr-1))]*np.asmatrix(bcVal).reshape(nPdofs,1)
asys = np.linalg.solve(K[np.ix_((bcDofs),(bcDofs))], fsys);
a = np.zeros([nDofs,1])
a[np.ix_(bcPrescr-1)] = np.asmatrix(bcVal).reshape(nPdofs,1)
a[np.ix_(bcDofs)] = asys
Q=K*np.asmatrix(a)-f
return (np.asmatrix(a),Q)
def spsolveq(K,f,bcPrescr,bcVal=None):
"""
Solve static FE-equations considering boundary conditions.
Parameters:
K global stiffness matrix, dim(K)= nd x nd
f global load vector, dim(f)= nd x 1
bcPrescr 1-dim integer array containing prescribed dofs.
bcVal 1-dim float array containing prescribed values.
If not given all prescribed dofs are assumed 0.
Returns:
a solution including boundary values
Q reaction force vector
dim(a)=dim(Q)= nd x 1, nd : number of dof's
"""
nDofs = K.shape[0]
nPdofs = bcPrescr.shape[0]
if bcVal is None:
bcVal = np.zeros([nPdofs],'d')
bc = np.ones(nDofs, 'bool')
bcDofs = np.arange(nDofs)
bc[np.ix_(bcPrescr-1)] = False
bcDofs = bcDofs[bc]
bcVal_m = np.asmatrix(bcVal).reshape(nPdofs,1)
info("Preparing system matrix...")
mask = np.ones(K.shape[0], dtype=bool)
mask[bcDofs] = False
info("step 1... converting K->CSR")
Kcsr = K.asformat("csr")
info("step 2... Kt")
#Kt1 = K[bcDofs]
#Kt = Kt1[:,bcPrescr]
Kt = K[np.ix_((bcDofs),(bcPrescr-1))]
info("step 3... fsys")
fsys = f[bcDofs]-Kt*bcVal_m
info("step 4... Ksys")
Ksys1 = Kcsr[bcDofs]
Ksys = Ksys1[:,bcDofs]
#Ksys = Kcsr[np.ix_((bcDofs),(bcDofs))]
info ("done...")
info("Solving system...")
asys = dsolve.spsolve(Ksys, fsys);
info("Reconstructing full a...")
a = np.zeros([nDofs,1])
a[np.ix_(bcPrescr-1)] = bcVal_m
a[np.ix_(bcDofs)] = np.asmatrix(asys).transpose()
a_m = np.asmatrix(a)
Q=K*a_m-f
info("done...")
return (a_m,Q)
def extractEldisp(edof,a):
"""
Extract element displacements from the global displacement
vector according to the topology matrix edof.
Parameters:
a the global displacement vector
edof dof topology array
Returns:
ed: element displacement array
"""
ed = None
if edof.ndim==1:
nDofs = len(edof)
ed = np.zeros([nDofs])
idx = edof-1
ed[:] = a[np.ix_(idx)].T
else:
nElements = edof.shape[0]
nDofs = edof.shape[1]
ed = np.zeros([nElements,nDofs])
i=0
for row in edof:
idx = row-1
ed[i,:]=a[np.ix_(idx)].T
i+=1
return ed
extract_eldisp = extractEldisp
def statcon(K,f,cd):
"""
Condensation of static FE-equations according to the vector cd.
Parameters:
K global stiffness matrix, dim(K) = nd x nd
f global load vector, dim(f)= nd x 1
cd vector containing dof's to be eliminated
dim(cd)= nc x 1, nc: number of condensed dof's
Returns:
K1 condensed stiffness matrix,
dim(K1)= (nd-nc) x (nd-nc)
f1 condensed load vector, dim(f1)= (nd-nc) x 1
"""
nd,nd = np.shape(K)
cd = (cd-1).flatten()
aindx = np.arange(nd)
aindx = np.delete(aindx,cd,0)
bindx = cd
Kaa = np.mat(K[np.ix_(aindx,aindx)])
Kab = np.mat(K[np.ix_(aindx,bindx)])
Kbb = np.mat(K[np.ix_(bindx,bindx)])
fa = np.mat(f[aindx])
fb = np.mat(f[bindx])
K1 = Kaa-Kab*Kbb.I*Kab.T
f1 = fa-Kab*Kbb.I*fb
return K1,f1
def c_mul(a, b):
return eval(hex((np.long(a) * b) & 0xFFFFFFFF)[:-1])
def dofHash(dof):
if len(dof)==1:
return dof[0]
value = 0x345678
for item in dof:
value = c_mul(1000003, value) ^ hash(item)
value = value ^ len(dof)
if value == -1:
value = -2
return value
def createdofs(nCoords,nDof):
"""
Create dof array [nCoords x nDof]
"""
return np.arange(nCoords*nDof).reshape(nCoords,nDof)+1
def coordxtr(edof,coords,dofs):
"""
Create element coordinate matrices ex, ey, ez from edof
coord and dofs matrices.
Parameters:
edof [nel x (nen * nnd)], nnd = number of node dofs
coords [ncoords x ndims], ndims = node dimensions
dofs [ncoords x nnd]
Returns:
ex if ndims = 1
ex, ey if ndims = 2
ex, ey, ez if ndims = 3
"""
# Create dictionary with dof indices
dofDict = {}
nDofs = np.size(dofs,1)
nElements = np.size(edof,0)
nDimensions = np.size(coords,1)
nElementDofs = np.size(edof,1)
nElementNodes = int(nElementDofs/nDofs)
idx = 0
for dof in dofs:
dofDict[dofHash(dof)] = idx
idx += 1
# Loop over edof and extract element coords
ex = np.zeros((nElements,nElementNodes))
ey = np.zeros((nElements,nElementNodes))
ez = np.zeros((nElements,nElementNodes))
elementIdx = 0
for etopo in edof:
for i in range(nElementNodes):
i0 = i*nDofs
i1 = i*nDofs+nDofs-1
dof = []
if i0==i1:
dof = [etopo[i*nDofs]]
else:
dof = etopo[i*nDofs:(i*nDofs+nDofs)]
nodeCoord = coords[dofDict[dofHash(dof)]]
if nDimensions>=1:
ex[elementIdx,i] = nodeCoord[0]
if nDimensions>=2:
ey[elementIdx,i] = nodeCoord[1]
if nDimensions>=3:
ez[elementIdx,i] = nodeCoord[2]
elementIdx += 1
if nDimensions==1:
return ex
if nDimensions==2:
return ex, ey
if nDimensions==3:
return ex, ey, ez
def hooke(ptype,E,v):
"""
Calculate the material matrix for a linear
elastic and isotropic material.
Parameters:
ptype= 1: plane stress
2: plane strain
3: axisymmetry
4: three dimensional
E Young's modulus
v Poissons const.
Returns:
D material matrix
"""
if ptype == 1:
D = E*np.matrix(
[[1, v, 0],
[v, 1, 0],
[0, 0, (1-v)/2]]
)/(1-v**2);
elif ptype == 2:
D = E/(1+v)*np.matrix(
[[1-v, v, v, 0],
[v, 1-v, v, 0],
[v, v, 1-v, 0],
[0, 0, 0, (1-2*v)/2]]
)/(1-2*v)
elif ptype == 3:
D = E/(1+v)*np.matrix(
[[1-v, v, v, 0],
[v, 1-v, v, 0],
[v, v, 1-v, 0],
[0, 0, 0, (1-2*v)/2]]
)/(1-2*v)
elif ptype == 4:
D = E*np.matrix(
[[1-v, v, v, 0, 0, 0],
[v, 1-v, v, 0, 0, 0],
[v, v, 1-v, 0, 0, 0],
[0, 0, 0, (1-2*v)/2, 0, 0],
[0, 0, 0, 0, (1-2*v)/2, 0],
[0, 0, 0, 0, 0, (1-2*v)/2]]
)/(1+v)/(1-2*v)
else:
cfinfo("ptype not supported.")
return D
def effmises(es,ptype):
"""
Calculate effective von mises stresses.
Parameters:
es
ptype= 1: plane stress
2: plane strain
3: axisymmetry
4: three dimensional
es = [[sigx,sigy,[sigz],tauxy] element stress matrix
[ ...... ]] one row for each element
Returns:
eseff = [eseff_0 .. eseff_nel-1]
"""
nel = np.size(es,0)
escomps = np.size(es, 1)
eseff = np.zeros([nel])
if ptype == 1:
sigxx = es[:,0]
sigyy = es[:,1]
sigxy = es[:,2]
eseff = np.sqrt(sigxx*sigxx+sigyy*sigyy-sigxx*sigyy+3*sigxy*sigxy)
return eseff
def stress2nodal(eseff, edof):
"""
Convert element effective stresses to nodal effective
stresses.
Parameters:
eseff = [eseff_0 .. eseff_nel-1]
edof = [dof topology array]
Returns:
ev: element value array [[ev_0_0 ev_0_1 ev_0_nen-1 ]
..
ev_nel-1_0 ev_nel-1_1 ev_nel-1_nen-1]
"""
values = np.zeros(edof.max())
elnodes = int(np.size(edof,1) / 2)
for etopo, eleseff in zip(edof, eseff):
values[etopo-1] = values[etopo-1] + eleseff / elnodes
evtemp = extractEldisp(edof,values)
ev = evtemp[:,range(0,elnodes*2,2)]
return ev
| 29.742019
| 132
| 0.412453
|
a3710f78f5947a4c5dffcd1a70622f6f23770c18
| 2,331
|
py
|
Python
|
keyboards/default/admin_keyboard.py
|
itcosplay/cryptobot
|
6890cfde64a631bf0e4db55f6873a2217212d801
|
[
"MIT"
] | null | null | null |
keyboards/default/admin_keyboard.py
|
itcosplay/cryptobot
|
6890cfde64a631bf0e4db55f6873a2217212d801
|
[
"MIT"
] | null | null | null |
keyboards/default/admin_keyboard.py
|
itcosplay/cryptobot
|
6890cfde64a631bf0e4db55f6873a2217212d801
|
[
"MIT"
] | null | null | null |
from aiogram.types import ReplyKeyboardMarkup, KeyboardButton
def create_kb_coustom_main_menu(user_id):
from loader import db
from data.config import super_admins
if not user_id in super_admins:
user_status = db.get_user_status(id=user_id)
else:
user_status = 'admin'
keyboard = ReplyKeyboardMarkup()
if user_status == 'admin':
keyboard.add(KeyboardButton(text='права пользователей'))
keyboard.insert(KeyboardButton(text='информация о смс'))
keyboard.add(KeyboardButton(text='создать заявку'))
keyboard.insert(KeyboardButton(text='в работе'))
keyboard.add(KeyboardButton(text='пропуска'))
keyboard.insert(KeyboardButton(text='создать пропуск'))
keyboard.add(KeyboardButton(text='балансы'))
keyboard.insert(KeyboardButton(text='отчетность'))
elif user_status == 'changer':
keyboard.add(KeyboardButton(text='создать заявку'))
keyboard.insert(KeyboardButton(text='в работе'))
keyboard.add(KeyboardButton(text='пропуска'))
keyboard.insert(KeyboardButton(text='создать пропуск'))
keyboard.add(KeyboardButton(text='балансы'))
keyboard.insert(KeyboardButton(text='отчетность'))
elif user_status == 'executor':
keyboard.add(KeyboardButton(text='в работе'))
keyboard.add(KeyboardButton(text='балансы'))
keyboard.add(KeyboardButton(text='отчетность'))
elif user_status == 'secretary':
keyboard.add(KeyboardButton(text='информация о смс'))
keyboard.add(KeyboardButton(text='пропуска'))
keyboard.add(KeyboardButton(text='создать пропуск'))
elif user_status == 'permit':
keyboard.add(KeyboardButton(text='создать пропуск'))
else:
pass
keyboard.resize_keyboard = True
keyboard.one_time_keyboard = True
return keyboard
# main_menu = ReplyKeyboardMarkup (
# keyboard = [
# [
# KeyboardButton(text='права пользователей'),
# KeyboardButton(text='информация о смс')
# ],
# [
# KeyboardButton(text='создать заявку'),
# KeyboardButton(text='в работе')
# ],
# [
# KeyboardButton(text='пропуска'),
# ]
# ],
# resize_keyboard=True,
# one_time_keyboard=True
# )
| 30.671053
| 64
| 0.651223
|
97c9bb83ce4450206a2d2c317cdb9eb877ecaaec
| 93
|
py
|
Python
|
cbe/cbe/project/apps.py
|
cdaf/cbe
|
7945a3fad11ae4612e22163094571ac9157dca7f
|
[
"Apache-2.0"
] | 3
|
2019-02-26T19:54:51.000Z
|
2021-03-23T02:57:02.000Z
|
cbe/cbe/project/apps.py
|
cdaf/cbe
|
7945a3fad11ae4612e22163094571ac9157dca7f
|
[
"Apache-2.0"
] | 6
|
2016-12-23T02:11:21.000Z
|
2018-09-30T18:50:59.000Z
|
cbe/cbe/project/apps.py
|
cdaf/cbe
|
7945a3fad11ae4612e22163094571ac9157dca7f
|
[
"Apache-2.0"
] | 4
|
2017-02-11T04:40:52.000Z
|
2020-10-12T22:22:54.000Z
|
from django.apps import AppConfig
class ProjectConfig(AppConfig):
name = 'cbe.project'
| 15.5
| 33
| 0.752688
|
be6755846b549048e312b62b83ca9c743769cfb1
| 10,547
|
py
|
Python
|
craamvert/instruments/poemas/poemas.py
|
craam/craamvert
|
ce776346844c83037eb552bdbffb9d5b5c7c9b9d
|
[
"MIT"
] | null | null | null |
craamvert/instruments/poemas/poemas.py
|
craam/craamvert
|
ce776346844c83037eb552bdbffb9d5b5c7c9b9d
|
[
"MIT"
] | null | null | null |
craamvert/instruments/poemas/poemas.py
|
craam/craamvert
|
ce776346844c83037eb552bdbffb9d5b5c7c9b9d
|
[
"MIT"
] | 1
|
2021-03-26T22:53:43.000Z
|
2021-03-26T22:53:43.000Z
|
from craamvert.instruments import HISTORY, CONVERTED_WITH_FITS_LEVEL
from instruments.utils.fits_handlers import set_fits_file_name_and_output_path
from craamvert.instruments.poemas import POEMASDataType, POEMAS_FITS_FILE_NAME
from craamvert.instruments.poemas.utils.create_hdu import create_data_hdu
from craamvert.utils import CANT_CONVERT_FITS_LEVEL, POEMAS_INSTRUMENT, TRK_TYPE, \
COULDNT_MATCH_CONVERTED_DATA_TO_INSTRUMENT
import numpy as np
from astropy.io import fits
from instruments.instrument import Instrument
from instruments.poemas.trk import trk
# Please check python docs to further understand this class
# https://docs.python.org/3/library/abc.html
# https://docs.python.org/3/tutorial/classes.html#inheritance
# https://docs.python.org/3/tutorial/classes.html#tut-private
class POEMAS(Instrument):
def __init__(self):
# Call for Instrument attributes
super().__init__(instrument=POEMAS_INSTRUMENT)
# POEMAS information
self._records = None
# POEMAS Header data is equivalent to:
# Code, NRS, FreqNo, Freq1, Freq2, BRTMin, BRTMax
self._poemas_header_column_names = None
self._poemas_header_data = None
# POEMAS Body data is equivalent to:
# sec, ele_ang, azi_ang, TBL_45, TBR_45, TBL_90, TBR_90
self._poemas_body_column_names = None
self._poemas_body_data = None
# Fits information
self._primary_hdu_position = 0
@staticmethod
def open_file(file_name):
poemas_object = POEMAS()
poemas_object._verify_original_file_type(file_name)
poemas_object._verify_original_file_path()
poemas_object._set_path_to_xml()
poemas_object._get_converted_data()
return poemas_object
def write_fits(self, name=None, output_path=None):
# Create fits Binary Header Data Unit (HDU) to keep POEMAS header data
# Code, NRS, FreqNo, Freq1, Freq2, BRTMin, BRTMax
poemas_header_hdu = create_data_hdu(self._poemas_header_column_names,
self._poemas_header_data,
POEMASDataType.HEADER)
# Create fits Binary Header Data Unit (HDU) to keep POEMAS data
# sec, ele_ang, azi_ang, TBL_45, TBR_45, TBL_90, TBR_90
poemas_data_hdu = create_data_hdu(self._poemas_body_column_names,
self._poemas_body_data,
POEMASDataType.BODY)
# Create HDU list with all HDUs created until now
hdu_list = fits.HDUList([self._primary_hdu, poemas_header_hdu, poemas_data_hdu])
hdu_list[self._primary_hdu_position].header.append((HISTORY, CONVERTED_WITH_FITS_LEVEL
.format(self._fits_level)))
fits_file_name, fits_output_path = set_fits_file_name_and_output_path(name,
output_path,
self._date,
self._start_time,
self._end_time,
self._original_file_type,
self._fits_level,
POEMAS_FITS_FILE_NAME)
hdu_list.writeto(fits_output_path / fits_file_name)
def _get_converted_data(self):
poemas_available_converters = {
TRK_TYPE: trk.TRK().convert_from_file(self._original_file_path,
self._original_file_name,
self._path_to_xml)
}
converted_data = poemas_available_converters.get(self._original_file_type)
self._match_type_object_attributes_to_instrument_attributes(converted_data)
def _match_type_object_attributes_to_instrument_attributes(self, converted_data):
try:
# Match general information
self._date = converted_data.date
self._time = converted_data.time
self._start_time = converted_data.start_time
self._end_time = converted_data.end_time
self._records = converted_data.records
# Match data information
# Code, NRS, FreqNo, Freq1, Freq2, BRTMin, BRTMax
self._poemas_header_column_names = converted_data.header_column_names
self._poemas_header_data = converted_data.header_data
# sec, ele_ang, azi_ang, TBL_45, TBR_45, TBL_90, TBR_90
self._poemas_body_column_names = converted_data.body_column_names
self._poemas_body_data = converted_data.body_data
# Match Fits information
self._primary_hdu = converted_data.primary_hdu
except AttributeError:
print(COULDNT_MATCH_CONVERTED_DATA_TO_INSTRUMENT.format(self._instrument, self._original_file_type))
# -------------------------------------------------------------
# POEMAS specific methods
# -------------------------------------------------------------
def level_1(self):
if self._fits_level != 0:
raise ValueError(CANT_CONVERT_FITS_LEVEL.format(1, self._fits_level, self._fits_level))
# Fits level 1 for POEMAS consists in reducing the data by calculating the median
# from all data inside 1 second mark, meaning that the records will be reduced
# we'll have only seconds registered, instead of milliseconds
# POEMAS body data category size
# sec, ele_ang, azi_ang, TBL_45, TBR_45, TBL_90, TBR_90
body_data_category_size = 7
# 7 arrays to represent each body data category
body_data = [[], [], [], [], [], [], []]
# Here we prepare variables that will be used inside our loop
# We'll keep track of chunks of data, these chunks represents all data inside a second mark
original_data_position = 0
is_inside_data_chunk = False
# Our loop limit will be the size of data
loop_limit = len(self._poemas_body_data[0])
for data_position in range(0, loop_limit):
# Here we check which second mark we're looking
time_data = self._poemas_body_data[0][data_position]
time_data = time_data[3:-3]
# When not inside the data chunk it means that we're looking inside a new second mark
if not is_inside_data_chunk:
initial_second_mark = current_second_mark = time_data
original_data_position = data_position
is_inside_data_chunk = True
else:
current_second_mark = time_data
# When we finish to look inside a second mark, we have to calculate the median
# and store the result inside our new array
if int(initial_second_mark) != int(current_second_mark) or data_position + 1 == loop_limit:
# First we store which second mark we're looking
body_data[0].append(self._poemas_body_data[0][original_data_position])
# Here we calculate the median of all other data
# ele_ang, azi_ang, TBL_45, TBR_45, TBL_90, TBR_90
for field in range(1, body_data_category_size):
slice_section = slice(original_data_position, data_position)
data_chunk = self._poemas_body_data[field][slice_section]
median = np.median(data_chunk)
body_data[field].append(median)
# Then we update our chunk tracker
is_inside_data_chunk = False
# Here we update our object attributes with the new data
self._poemas_body_data = body_data
self._fits_level = 1
# Here we update the header data
# Code, NRS, FreqNo, Freq1, Freq2, BRTMin, BRTMax
# Update NRS
self._poemas_header_data[0][1] = len(self._poemas_body_data[0][1])
# Finally que update our fits level
self._fits_level = 1
def level_2(self, poemas_objects_list):
if self._fits_level != 1:
raise ValueError(CANT_CONVERT_FITS_LEVEL.format(2, self._fits_level, self._fits_level))
# Fits level 2 for POEMAS consists in group all data from a day into a single fits file
# Here we sort all poemas objects from a day, so they will be in ascending order
poemas_objects_list.append(self)
poemas_objects_list.sort(key=lambda poemas_object: poemas_object._start_time)
# 7 arrays to represent each body data category
# sec, ele_ang, azi_ang, TBL_45, TBR_45, TBL_90, TBR_90
body_data = [[], [], [], [], [], [], []]
body_data_category_size = 7
# Here we group all data form poemas objects to a new array
for poemas_object in poemas_objects_list:
for body_data_category in range(0, body_data_category_size):
body_data[body_data_category].extend(poemas_object._poemas_body_data[body_data_category])
# Here we update our object attributes with the new data
self._poemas_body_data = body_data
# Here we update the header data
# Code, NRS, FreqNo, Freq1, Freq2, BRTMin, BRTMax
total_nrs = 0
brt_min_list = []
brt_max_list = []
# Gather all information
for poemas_object in poemas_objects_list:
total_nrs += poemas_object._poemas_header_data[0]["NRS"]
brt_min_list.append(poemas_object._poemas_header_data[0]["BRTMin"])
brt_max_list.append(poemas_object._poemas_header_data[0]["BRTMax"])
# Update header information
self._poemas_header_data[0]["NRS"] = total_nrs
self._poemas_header_data[0]["BRTMin"] = min(brt_min_list)
self._poemas_header_data[0]["BRTMax"] = max(brt_max_list)
# We also update some basic information
self._time = poemas_objects_list[0]._time
self._start_time = poemas_objects_list[0]._start_time
last_object_position = len(poemas_objects_list) - 1
self._end_time = poemas_objects_list[last_object_position]._end_time
# Finally we update our fits level
self._fits_level = 2
| 44.880851
| 112
| 0.622357
|
d74d2a5ab7628e5f7f1439cda355a77610afa909
| 2,873
|
py
|
Python
|
src/utils/parse.py
|
cedricfarinazzo/ichronos.py
|
ae39dfce7e3e9b1b213e019e726da1145b604ae0
|
[
"MIT"
] | null | null | null |
src/utils/parse.py
|
cedricfarinazzo/ichronos.py
|
ae39dfce7e3e9b1b213e019e726da1145b604ae0
|
[
"MIT"
] | null | null | null |
src/utils/parse.py
|
cedricfarinazzo/ichronos.py
|
ae39dfce7e3e9b1b213e019e726da1145b604ae0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from icalendar import Calendar
from datetime import datetime, timedelta
from models import *
from utils import *
def get_lessons(data):
lessons = []
try:
gcal = Calendar.from_ical(data)
except ValueError:
print("Wrong group name")
return None
for component in gcal.walk():
if component.name == "VEVENT":
matter = component.get('summary')
description = component.get('description')
location = component.get('location')
tsr = component.decoded('dtstart')
te = component.decoded('dtend')
lesson = Lesson(matter, description, location, tsr, te)
lessons.append(lesson)
return lessons
def parse_week(lessons):
lessons.sort()
sday = ""
day = None
weeks = []
week_cur = 0
weeks.append(Week(lessons[0].dtstart.isocalendar()[1]))
for c in lessons:
if c.dtstart.isocalendar()[1] != weeks[week_cur].week:
if day is not None:
weeks[week_cur].add_day(day)
sday = c.get_day()
day = Day(sday)
week_cur += 1
weeks.append(Week(c.dtstart.isocalendar()[1]))
if sday != c.get_day():
if day is not None:
weeks[week_cur].add_day(day)
sday = c.get_day()
day = Day(sday)
day.add_lesson(c)
if day.lessons != []:
weeks[week_cur].add_day(day)
return weeks
def parse_today(lessons):
lessons.sort()
sday = datetime.datetime.today()
weeks = [Week(sday.isocalendar()[1])]
weeks[0].add_day(Day(sday.strftime('%d %b %Y')))
for c in lessons:
if sday.date() == c.dtstart.date():
weeks[0].days[0].add_lesson(c)
return weeks
def escape_groupe(group):
return group.replace('#', '%23')
def get_week(url, config):
data = schedule_request(url, verbose=config["verbose"], cache=config["cache"])
if data is None:
print("An error occured")
sys.exit(1)
lessons = get_lessons(data)
if lessons is None or lessons == []:
sys.exit(1)
return parse_week(lessons)
def get_current_week(group, config):
url = 'https://ichronos.net/feed/' + escape_groupe(group)
return get_week(url, config)
def get_custom_week(group, config, week):
url = 'https://ichronos.net/ics/' + escape_groupe(group) + '/' + week
return get_week(url, config)
def get_today(group, config):
url = 'https://ichronos.net/feed/' + escape_groupe(group)
data = schedule_request(url, verbose=config["verbose"], cache=config["cache"])
if data is None:
print("An error occured")
sys.exit(1)
lessons = get_lessons(data)
if lessons is None or lessons == []:
sys.exit(1)
return parse_today(lessons)
| 29.618557
| 82
| 0.597285
|
242ab19013e7c6e22c5d59ddffe65b7d92dd7f06
| 9,366
|
py
|
Python
|
tests/nn/checkpoint/test_checkpoint_activations.py
|
ncilfone/fairscale
|
b434b7354898febf718f23c7ff21368a6e0bbe1a
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/nn/checkpoint/test_checkpoint_activations.py
|
ncilfone/fairscale
|
b434b7354898febf718f23c7ff21368a6e0bbe1a
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/nn/checkpoint/test_checkpoint_activations.py
|
ncilfone/fairscale
|
b434b7354898febf718f23c7ff21368a6e0bbe1a
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Test fairscale.nn.misc.checkpoint_activations API."""
import pytest
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint as torch_checkpoint_wrapper
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
from fairscale.nn.misc import checkpoint_wrapper as deprecated_checkpoint_wrapper
from fairscale.utils.testing import skip_if_no_cuda, torch_version
def get_cuda_mem_allocated():
"""Helper to get cuda memory allocated if possible."""
if torch.cuda.is_available():
return torch.cuda.memory_allocated()
else:
return 0
def get_loss_and_gnorm(model, input):
"""Helper to run a forward/backward pass and return results in a dict."""
ret = {}
ret["mem_0"] = get_cuda_mem_allocated()
ret["mem_peak"] = 0
if ret["mem_0"] > 0:
torch.cuda.reset_peak_memory_stats()
model.zero_grad()
loss = model(input).sum()
ret["mem_after_fwd"] = get_cuda_mem_allocated()
loss.backward()
ret["mem_after_bwd"] = get_cuda_mem_allocated()
gnorm = torch.norm(torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()]))
ret["loss"] = loss.item()
ret["gnorm"] = gnorm.item()
if ret["mem_0"] > 0:
ret["mem_peak"] = torch.cuda.max_memory_allocated()
return ret
class BasicModel(nn.Module):
"""Basic model with a single FFN being checkpointed.
Used for extensive checkings: equivalency with non-checkpoint, torch-checkpoint, etc.
"""
def __init__(self, use_pytorch_checkpoint=False, use_fairscale_checkpoint=False, **kwargs):
super().__init__()
torch.manual_seed(0) # make sure weights are deterministic.
assert not (
use_pytorch_checkpoint and use_fairscale_checkpoint
), "Cannot use both pytorch and fairscale checkpointing mechanisms."
self.use_pytorch_checkpoint = use_pytorch_checkpoint
self.ffn = nn.Sequential(
nn.Linear(32, 128),
# add a Dropout layer to test RNG save/restore
nn.Dropout(p=0.5),
nn.Linear(128, 32),
)
if use_fairscale_checkpoint:
self.ffn = checkpoint_wrapper(self.ffn, **kwargs)
self.out = nn.Linear(32, 1)
def forward(self, x):
if self.use_pytorch_checkpoint:
x = torch_checkpoint_wrapper(self.ffn, x)
else:
x = self.ffn(x)
return self.out(x)
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_basic(device):
if "cuda" in device and not torch.cuda.is_available():
pytest.skip("test requires a GPU")
input = torch.rand(2, 16, 32).requires_grad_(True)
model = BasicModel().to(device)
no_cpt = get_loss_and_gnorm(model, input.to(device))
model = BasicModel(use_pytorch_checkpoint=True).to(device)
pyt_cpt = get_loss_and_gnorm(model, input.to(device))
model = BasicModel(use_fairscale_checkpoint=True).to(device)
fairscale_cpt = get_loss_and_gnorm(model, input.to(device))
model = BasicModel(use_fairscale_checkpoint=True, offload_to_cpu=True).to(device)
fairscale_cpt_offload = get_loss_and_gnorm(model, input.to(device))
# Check for correctness.
for key in "loss", "gnorm":
if not (no_cpt[key] == pyt_cpt[key] == fairscale_cpt[key] == fairscale_cpt_offload[key]):
print(no_cpt, pyt_cpt, fairscale_cpt, fairscale_cpt_offload)
assert 0
del no_cpt[key]
del pyt_cpt[key]
del fairscale_cpt[key]
del fairscale_cpt_offload[key]
# Check for memory usage for cuda only.
if "cpu" in device:
return
mem_peaks = [98816, 103424, 103424, 107520]
if torch_version() < (1, 7, 0):
# Older torch behaves slightly differently
mem_peaks = [102400, 103424, 103424, 107520]
assert no_cpt == {"mem_0": 38912, "mem_peak": mem_peaks[0], "mem_after_fwd": 64000, "mem_after_bwd": 74240}, no_cpt
assert pyt_cpt == {
"mem_0": 38912,
"mem_peak": mem_peaks[1],
"mem_after_fwd": 43520,
"mem_after_bwd": 74240,
}, pyt_cpt
assert fairscale_cpt == {
"mem_0": 38912,
"mem_peak": mem_peaks[2],
"mem_after_fwd": 43520,
"mem_after_bwd": 74240,
}, fairscale_cpt
assert fairscale_cpt_offload == {
"mem_0": 38912,
"mem_peak": mem_peaks[3],
"mem_after_fwd": 43520,
"mem_after_bwd": 74240,
}, fairscale_cpt_offload
class CpuOffloadModel(nn.Module):
"""Model used to check cpu offload memory saving"""
def __init__(self, enable_checkpoint=False, cpu_offload=False):
super().__init__()
torch.manual_seed(0) # make sure weights are deterministic.
# These numbers are picked to show cpu_offload memory saving.
# Inner (recomputed) activation sizes need to be just right
# to show the benefit.
self.layers = nn.Sequential(
nn.Sequential(nn.Linear(4, 4), nn.Linear(4, 4), nn.Linear(4, 8)),
nn.Sequential(nn.Linear(8, 4), nn.Linear(4, 4), nn.Linear(4, 4)),
nn.Sequential(nn.Linear(4, 6), nn.Linear(6, 8), nn.Linear(8, 2)),
)
if enable_checkpoint:
for i, layer in enumerate(self.layers):
# Only middle layer needs to have offloading
self.layers[i] = checkpoint_wrapper(layer, cpu_offload if i == 1 else False)
def forward(self, x):
return self.layers(x)
@skip_if_no_cuda
def test_offload_memory():
device = "cuda"
input = torch.rand(60, 24, 4).requires_grad_(True)
model = CpuOffloadModel().to(device)
base = get_loss_and_gnorm(model, input.to(device))
model = CpuOffloadModel(True).to(device)
cpt = get_loss_and_gnorm(model, input.to(device))
model = CpuOffloadModel(True, True).to(device)
offload = get_loss_and_gnorm(model, input.to(device))
for key in "loss", "gnorm":
if not (base[key] == cpt[key] == offload[key]):
# Use print to collect all debugging info.
print(base, cpt, offload)
assert 0
del base[key]
del cpt[key]
del offload[key]
ref_base = {"mem_0": 32256, "mem_peak": 334336, "mem_after_fwd": 274944, "mem_after_bwd": 41984}
ref_cpt = {"mem_0": 32256, "mem_peak": 253952, "mem_after_fwd": 101888, "mem_after_bwd": 41984}
ref_offload = {"mem_0": 32256, "mem_peak": 207872, "mem_after_fwd": 55808, "mem_after_bwd": 41984}
if not (base == ref_base and cpt == ref_cpt and offload == ref_offload):
# Use print to collect all debugging info.
print(base, cpt, offload)
assert 0
class MultiinMultioutModel(nn.Module):
"""Model used to check different inputs and outputs"""
def __init__(self, multiout=False, checkpoint_config=0):
super().__init__()
torch.manual_seed(0) # make sure weights are deterministic.
self.multiout = multiout
self.conv1 = nn.Sequential(nn.Conv2d(1, 5, 3), nn.ReLU(), nn.Conv2d(5, 5, 3))
self.conv2 = nn.Sequential(nn.Conv2d(3, 5, 3), nn.ReLU(), nn.Conv2d(5, 5, 3))
assert 0 <= checkpoint_config <= 3
if checkpoint_config & 1:
self.conv1 = checkpoint_wrapper(self.conv1)
if checkpoint_config & (1 << 1):
self.conv2 = checkpoint_wrapper(self.conv2)
def forward(self, x1, x2=None):
out1 = self.conv1(x1)
out2 = self.conv2(x2)
if self.multiout:
return out1, out2
return out1 + out2
@pytest.mark.parametrize("device", ["cpu", "cuda"])
@pytest.mark.parametrize("multiout", [True, False])
@pytest.mark.parametrize("checkpoint_config", [1, 2, 3])
def test_multiin_multiout(device, multiout, checkpoint_config):
if "cuda" in device and not torch.cuda.is_available():
pytest.skip("test requires a GPU")
def train(model, in1, in2):
out = model(in1, x2=in2)
if isinstance(out, tuple):
out = torch.cat(out)
loss = out.sum()
loss.backward()
gnorm = torch.norm(torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()]))
return {"loss": loss.item(), "gnorm": gnorm.item()}
in1 = torch.rand(4, 1, 32, 32).requires_grad_(True)
in2 = torch.rand(4, 3, 32, 32).requires_grad_(True)
model = MultiinMultioutModel(multiout, 0).to(device)
no_cpt = train(model, in1.to(device), in2.to(device))
model = MultiinMultioutModel(multiout, checkpoint_config).to(device)
cpt = train(model, in1.to(device), in2.to(device))
for key in ["loss", "gnorm"]:
if no_cpt[key] != cpt[key]:
print(no_cpt, cpt)
assert 0
def test_deprecated_path():
# Check if import works as before.
# from fairscale.nn.misc.checkpoint_activations import checkpoint_wrapper
from fairscale.nn import checkpoint_wrapper
ffn = nn.Sequential(nn.Linear(32, 128), nn.Dropout(p=0.5), nn.Linear(128, 32),)
ffn = checkpoint_wrapper(ffn, {})
# Check if direct import works as before.
ffn = nn.Sequential(nn.Linear(32, 128), nn.Dropout(p=0.5), nn.Linear(128, 32),)
ffn = deprecated_checkpoint_wrapper(ffn, {})
| 34.688889
| 119
| 0.647769
|
41f76df82e089723fa87a7dff77777f3cae22d54
| 4,186
|
py
|
Python
|
api/python/tests/test_api.py
|
NathanDeMaria/quilt
|
894c98c23fd4788b90ef75ff1b547c6258e5ccce
|
[
"Apache-2.0"
] | null | null | null |
api/python/tests/test_api.py
|
NathanDeMaria/quilt
|
894c98c23fd4788b90ef75ff1b547c6258e5ccce
|
[
"Apache-2.0"
] | null | null | null |
api/python/tests/test_api.py
|
NathanDeMaria/quilt
|
894c98c23fd4788b90ef75ff1b547c6258e5ccce
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta, timezone
import numpy as np
import pytest
import responses
import yaml
import quilt3 as he
from quilt3 import util
from .utils import QuiltTestCase
DEFAULT_URL = 'https://registry.example.com'
class TestAPI(QuiltTestCase):
def test_config(self):
content = {
'navigator_url': 'https://foo.bar',
'telemetry_disabled': False,
's3Proxy': None,
'apiGatewayEndpoint': None,
'binaryApiGatewayEndpoint': None
}
self.requests_mock.add(responses.GET, 'https://foo.bar/config.json', json=content, status=200)
he.config('https://foo.bar')
with open(util.CONFIG_PATH, 'r') as stream:
config = yaml.safe_load(stream)
# These come from CONFIG_TEMPLATE, not the mocked config file.
content['default_local_registry'] = util.BASE_PATH.as_uri() + '/packages'
content['default_remote_registry'] = None
content['default_install_location'] = None
content['registryUrl'] = None
assert config == content
def test_config_invalid_host(self):
# Our URL handling is very forgiving, since we might receive a host
# defined in local DNS, like 'foo' instead of 'foo.com' -- and on top
# of that, we automatically add 'https://' to the name if no schema is
# present. ..but, a bad port causes an error..
with pytest.raises(util.QuiltException, match='Port must be a number'):
he.config('https://fliff:fluff')
def test_empty_list_role(self):
empty_list_response = { 'results': [] }
self.requests_mock.add(responses.GET, DEFAULT_URL + '/api/roles',
json=empty_list_response, status=200)
assert he.admin.list_roles() == []
def test_list_role(self):
result = {
'name': 'test',
'arn': 'asdf123',
'id': '1234-1234'
}
list_response = { 'results': [result] }
self.requests_mock.add(responses.GET, DEFAULT_URL + '/api/roles',
json=list_response, status=200)
assert he.admin.list_roles() == [result]
def test_get_role(self):
result = {
'name': 'test',
'arn': 'asdf123',
'id': '1234-1234'
}
self.requests_mock.add(responses.GET, DEFAULT_URL + '/api/roles/1234-1234',
json=result, status=200)
assert he.admin.get_role('1234-1234') == result
def test_create_role(self):
result = {
'name': 'test',
'arn': 'asdf123',
'id': '1234-1234'
}
self.requests_mock.add(responses.POST, DEFAULT_URL + '/api/roles',
json=result, status=200)
assert he.admin.create_role('test', 'asdf123') == result
def test_edit_role(self):
get_result = {
'name': 'test',
'arn': 'asdf123',
'id': '1234-1234'
}
result = {
'name': 'test_new_name',
'arn': 'qwer456',
'id': '1234-1234'
}
self.requests_mock.add(responses.GET, DEFAULT_URL + '/api/roles/1234-1234',
json=get_result, status=200)
self.requests_mock.add(responses.PUT, DEFAULT_URL + '/api/roles/1234-1234',
json=result, status=200)
assert he.admin.edit_role('1234-1234', 'test_new_name', 'qwer456') == result
def test_delete_role(self):
self.requests_mock.add(responses.DELETE, DEFAULT_URL + '/api/roles/1234-1234',
status=200)
he.admin.delete_role('1234-1234')
def test_set_role(self):
self.requests_mock.add(responses.POST, DEFAULT_URL + '/api/users/set_role',
json={}, status=200)
not_found_result = {
'message': "No user exists by the provided name."
}
self.requests_mock.add(responses.POST, DEFAULT_URL + '/api/users/set_role',
json=not_found_result, status=400)
he.admin.set_role('test_user', 'test_role')
with pytest.raises(util.QuiltException):
he.admin.set_role('not_found', 'test_role')
| 34.883333
| 102
| 0.587912
|
afb57f27866ce5216b94a5f4c3239f5c21a00d2a
| 5,583
|
py
|
Python
|
deployment/test/test_maintainlib_etcdfix.py
|
wyatuestc/pai
|
65b44e1ab37cab0790af392a016cc9fb1d2318fe
|
[
"MIT"
] | 1,417
|
2019-05-07T00:51:36.000Z
|
2022-03-31T10:15:31.000Z
|
deployment/test/test_maintainlib_etcdfix.py
|
wyatuestc/pai
|
65b44e1ab37cab0790af392a016cc9fb1d2318fe
|
[
"MIT"
] | 2,447
|
2019-05-07T01:36:32.000Z
|
2022-03-30T08:47:43.000Z
|
deployment/test/test_maintainlib_etcdfix.py
|
wyatuestc/pai
|
65b44e1ab37cab0790af392a016cc9fb1d2318fe
|
[
"MIT"
] | 329
|
2019-05-07T02:28:06.000Z
|
2022-03-29T06:12:49.000Z
|
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
import filecmp
import os
import yaml
import tarfile
import shutil
import sys
import logging
import logging.config
from k8sPaiLibrary.maintainlib import etcdfix
from k8sPaiLibrary.maintainlib import common
class TestMaintainlibEtcdFix(unittest.TestCase):
"""
Test the EtcdFix's api
"""
def setUp(self):
try:
os.chdir(os.path.abspath("test"))
except:
pass
configuration_path = "test_logging.yaml"
if os.path.exists(configuration_path):
with open(configuration_path, 'rt') as f:
logging_configuration = yaml.safe_load(f.read())
logging.config.dictConfig(logging_configuration)
logging.getLogger()
def tearDown(self):
try:
os.chdir(os.path.abspath(".."))
except:
pass
def test_etcdfix_conf_validation_node_config_validation(self):
node_list = common.load_yaml_file("data/data_maintainlib_etcdfix/test_node_list_config.yaml")
cluster_config = common.load_yaml_file("data/data_maintainlib_etcdfix/generated-cluster-object-model-ok.yaml")
node_config = node_list['machinelist']['ok-machine-node']
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertTrue(validation.node_conf_validation())
node_config = node_list['machinelist']['miss-node-name']
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.node_conf_validation())
node_config = node_list['machinelist']['miss-host-ip']
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.node_conf_validation())
node_config = node_list['machinelist']['wrong-host-ip']
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.node_conf_validation())
node_config = node_list['machinelist']['wrong-ssh-port']
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.node_conf_validation())
node_config = node_list['machinelist']['miss-user-name']
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.node_conf_validation())
node_config = node_list['machinelist']['miss-password']
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.node_conf_validation())
node_config = node_list['machinelist']['miss-etcd-id']
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.node_conf_validation())
def test_etcdfix_conf_validation_cluster_config_validation(self):
node_list = common.load_yaml_file("data/data_maintainlib_etcdfix/test_node_list_config.yaml")
node_config = node_list['machinelist']['ok-machine-node']
cluster_config = common.load_yaml_file("data/data_maintainlib_etcdfix/generated-cluster-object-model-ok.yaml")
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertTrue(validation.cluster_conf_validation())
cluster_config = common.load_yaml_file("data/data_maintainlib_etcdfix/generated-cluster-object-model-miss-master.yaml")
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.cluster_conf_validation())
cluster_config = common.load_yaml_file("data/data_maintainlib_etcdfix/generated-cluster-object-model-miss-node-config.yaml")
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.cluster_conf_validation())
cluster_config = common.load_yaml_file("data/data_maintainlib_etcdfix/generated-cluster-object-model-wrong-node-config.yaml")
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.cluster_conf_validation())
cluster_config = common.load_yaml_file("data/data_maintainlib_etcdfix/generated-cluster-object-model-inconsistent-node-config.yaml")
validation = etcdfix.etcdfix_conf_validation(cluster_config, node_config)
self.assertFalse(validation.cluster_conf_validation())
| 37.979592
| 140
| 0.745477
|
e36c8fad846e33ba6cc8f9db3ca3d13586007369
| 5,140
|
py
|
Python
|
ironic/drivers/modules/drac/utils.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 350
|
2015-01-02T09:35:49.000Z
|
2022-03-28T09:25:59.000Z
|
ironic/drivers/modules/drac/utils.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 7
|
2015-05-04T16:12:41.000Z
|
2021-08-31T12:27:27.000Z
|
ironic/drivers/modules/drac/utils.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 333
|
2015-01-06T09:09:22.000Z
|
2022-02-20T08:11:40.000Z
|
# Copyright (c) 2021 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_utils import importutils
from ironic.common import exception
from ironic.drivers.modules.redfish import utils as redfish_utils
LOG = log.getLogger(__name__)
sushy = importutils.try_import('sushy')
def execute_oem_manager_method(
task, process_name, lambda_oem_func):
"""Loads OEM manager and executes passed method on it.
Known iDRAC Redfish systems has only one manager, but as Redfish
schema allows a list this method iterates through all values in case
this changes in future. If there are several managers, this will
try starting from the first in the list until the first success.
:param task: a TaskManager instance.
:param process_name: user friendly name of method to be executed.
Used in exception and log messages.
:param lambda_oem_func: method to execute as lambda function with
input parameter OEM extension manager.
Example: lambda m: m.reset_idrac()
:returns: Returned value of lambda_oem_func
:raises: RedfishError if can't execute OEM function either because
there are no managers to the system, failed to load OEM
extension or execution of the OEM method failed itself.
"""
system = redfish_utils.get_system(task.node)
if not system.managers:
raise exception.RedfishError(
"System %(system)s has no managers" %
{'system': system.uuid if system.uuid else system.identity})
oem_error_msgs = []
for manager in system.managers:
# This call makes Sushy go fishing in the ocean of Sushy
# OEM extensions installed on the system. If it finds one
# for 'Dell' which implements the 'Manager' resource
# extension, it uses it to create an object which
# instantiates itself from the OEM JSON. The object is
# returned here.
#
# If the extension could not be found for one manager, it
# will not be found for any others until it is installed, so
# abruptly exit the for loop. The vendor and resource name,
# 'Dell' and 'Manager', respectively, used to search for the
# extension are invariant in the loop.
try:
manager_oem = manager.get_oem_extension('Dell')
except sushy.exceptions.OEMExtensionNotFoundError as e:
error_msg = (_("Search for Sushy OEM extension Python package "
"'sushy-oem-idrac' failed for node %(node)s. "
"Ensure it is installed. Error: %(error)s") %
{'node': task.node.uuid, 'error': e})
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
try:
result = lambda_oem_func(manager_oem)
LOG.info("Completed: %(process_name)s with system %(system)s "
"manager %(manager)s for node %(node)s",
{'process_name': process_name,
'system': system.uuid if system.uuid else
system.identity,
'manager': manager.uuid if manager.uuid else
manager.identity,
'node': task.node.uuid})
return result
except sushy.exceptions.SushyError as e:
error_msg = (_("Manager %(manager)s: %(error)s" %
{'manager': manager.uuid if manager.uuid else
manager.identity, 'error': e}))
LOG.debug("Failed: %(process_name)s with system %(system)s "
"for node %(node)s. Will try next manager, if "
"available. Error: %(error)s",
{'process_name': process_name,
'system': system.uuid if system.uuid else
system.identity,
'node': task.node.uuid,
'error': error_msg})
oem_error_msgs.append(error_msg)
else:
error_msg = (_('In system %(system)s for node %(node)s all managers '
'failed: %(process_name)s. Errors: %(oem_error_msgs)s' %
{'system': system.uuid if system.uuid else
system.identity,
'node': task.node.uuid,
'process_name': process_name,
'oem_error_msgs': oem_error_msgs if oem_error_msgs else
'unknown'}))
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
| 45.486726
| 79
| 0.616537
|
0e32d86af6d92b8a84c3e2a62b58b5ac57b5d6c0
| 3,562
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/hymenobacterpsychrotoleransdsm18569.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/hymenobacterpsychrotoleransdsm18569.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/hymenobacterpsychrotoleransdsm18569.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Hymenobacter psychrotolerans DSM 18569.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def HymenobacterPsychrotoleransDsm18569(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Hymenobacter psychrotolerans DSM 18569 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Hymenobacter psychrotolerans DSM 18569 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="HymenobacterPsychrotoleransDsm18569",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.92381
| 223
| 0.684166
|
94857936ac6a48ad1855c4a93f241dfe6b775ec5
| 2,181
|
py
|
Python
|
aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/CreateQualityEntityRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/CreateQualityEntityRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/CreateQualityEntityRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class CreateQualityEntityRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'CreateQualityEntity','dide')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProjectName(self):
return self.get_body_params().get('ProjectName')
def set_ProjectName(self,ProjectName):
self.add_body_params('ProjectName', ProjectName)
def get_EntityLevel(self):
return self.get_body_params().get('EntityLevel')
def set_EntityLevel(self,EntityLevel):
self.add_body_params('EntityLevel', EntityLevel)
def get_MatchExpression(self):
return self.get_body_params().get('MatchExpression')
def set_MatchExpression(self,MatchExpression):
self.add_body_params('MatchExpression', MatchExpression)
def get_EnvType(self):
return self.get_body_params().get('EnvType')
def set_EnvType(self,EnvType):
self.add_body_params('EnvType', EnvType)
def get_TableName(self):
return self.get_body_params().get('TableName')
def set_TableName(self,TableName):
self.add_body_params('TableName', TableName)
| 35.177419
| 92
| 0.767079
|
9db9153d05e86b33d4d0ff761c91b936fb2bce10
| 13,829
|
py
|
Python
|
yaml_cli/__init__.py
|
tensX/yaml_cli
|
ff5155603e50a41d4ec50bd702896f0bb2481077
|
[
"MIT"
] | 36
|
2017-12-23T23:22:05.000Z
|
2021-05-28T11:22:31.000Z
|
yaml_cli/__init__.py
|
tensX/yaml_cli
|
ff5155603e50a41d4ec50bd702896f0bb2481077
|
[
"MIT"
] | 8
|
2018-01-26T10:19:10.000Z
|
2021-07-12T19:55:07.000Z
|
yaml_cli/__init__.py
|
tensX/yaml_cli
|
ff5155603e50a41d4ec50bd702896f0bb2481077
|
[
"MIT"
] | 11
|
2018-06-22T19:31:58.000Z
|
2021-11-21T04:03:30.000Z
|
#!/usr/bin/env python
import sys
import argparse
import fileinput
import sys
import os
import yaml
from yaml_cli.version import __version__
ACTION_SET = 'set'
ACTION_RM = 'rm'
BOOLEAN_VALUES_TRUE = ('1', 'true', 'True', 'yes')
BOOLEAN_VALUES_FALSE = ('', '0', 'false', 'False', 'no')
HELP_KEY_SYNTAX = "mykey:subkey:subkey"
class YamlCli(object):
DEBUG = False
VERBOSE = False
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, metavar='FILE', help="YAML file to load. ")
parser.add_argument('-o', '--output', type=str, metavar='FILE', help="Output file. If not provided output is written to STDOUT")
parser.add_argument('-f', '--file', type=str, metavar='FILE', help="YAML file for inplace manipulation.")
parser.add_argument('-d', '--delete', action=RmKeyAction, help="Delete key: {}. Skipped silently if key doesn't exist.".format(HELP_KEY_SYNTAX))
parser.add_argument('-s', '--string', action=KeyValueAction, help="Set key with string value: {} 'my value'".format(HELP_KEY_SYNTAX))
parser.add_argument('-n', '--number', action=NumberKeyValueAction, help="Set key with number value: {} 3.7".format(HELP_KEY_SYNTAX))
parser.add_argument('-b', '--boolean', action=BooleanKeyValueAction, help="Set key with number value: {} true (possible values: {} {})".format(HELP_KEY_SYNTAX, BOOLEAN_VALUES_TRUE, BOOLEAN_VALUES_FALSE))
parser.add_argument('-l', '--list', action=ListKeyValueAction, help="Set key with value as list of strings: {} intem1 intem2 intem3".format(HELP_KEY_SYNTAX))
parser.add_argument('--null', action=NullKeyAction, help="Set key with null value: {}".format(HELP_KEY_SYNTAX))
parser.add_argument('-la', '--list-append', action='store_true', help="If a key to set already exists, do not replace it but instead create a list and append.")
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='store_true', help="Verbose output")
parser.add_argument('--debug', action='store_true', help="Debug output")
args = parser.parse_args()
try:
self.DEBUG = args.debug
self.VERBOSE = args.verbose or args.debug
self.log("Input argparse: {}".format(args), debug=True)
append_mode = args.list_append
infile = args.input or args.file
outfile = args.output or args.file
myYaml = self.get_input_yaml(infile)
if args.set_keys:
for elem in args.set_keys:
try:
if elem['action'] == ACTION_SET:
self.log("setting key {}".format(elem))
myYaml = self.set_key(myYaml, elem['key'], elem['val'], append_mode)
if elem['action'] == ACTION_RM:
self.log("deleting key {}".format(elem))
myYaml = self.rm_key(myYaml, elem['key'])
except:
log_exception("Exception while handling key {}".format(elem['key']))
if outfile:
self.save_yaml(outfile, myYaml)
else:
self.stdout_yaml(myYaml)
except Exception:
log_exception()
def get_input_yaml(self, filename):
"""
Get YAML input either from file or from STDIN
In case of any error, sys.exit(1) is called
:param filename:
:return: dict YAML data
"""
if filename:
return self.load_yaml_from_file(filename)
else:
return self.read_yaml_from_sdtin()
def load_yaml_from_file(self, name):
"""
load YAML file
In case of any error, this function calls sys.exit(1)
:param name: path & file name
:return: YAML as dict
"""
try:
with open(name, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
sys.exit(1)
except IOError as e:
print(e)
sys.exit(1)
def read_yaml_from_sdtin(self):
if sys.stdin.isatty():
return dict()
res = sys.stdin.read()
try:
read = yaml.safe_load(res)
# Let's find out why load(sometimes returns plain strings or None a if this has a good reason)
if type(read) is dict:
return read
else:
print("No valid YAML input: '%s'" % res)
sys.exit(1)
except yaml.YAMLError as exc:
print(exc)
sys.exit(1)
def save_yaml(self, name, data):
"""
Saves given YAML data to file
:param name: file path
:param data: YAML data
"""
try:
with open(name, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
except IOError as e:
print(e)
sys.exit(1)
def stdout_yaml(self, data):
"""
Prints YAML data to STDOUT
:param data: YAML data
:return:
"""
print(yaml.dump(data, default_flow_style=False))
def set_key(self, myYaml, key, value, append_mode=False):
"""
Set or add a key to given YAML data. Call itself recursively.
:param myYaml: YAML data to be modified
:param key: key as array of key tokens
:param value: value of any data type
:param append_mode default is False
:return: modified YAML data
"""
# self.log("set_key {} = {} | yaml: {}".format(key, value, myYaml), debug=True)
if len(key) == 1:
if not append_mode or not key[0] in myYaml:
myYaml[key[0]] = value
else:
if type(myYaml[key[0]]) is not list:
myYaml[key[0]] = [myYaml[key[0]]]
myYaml[key[0]].append(value)
else:
if not key[0] in myYaml or type(myYaml[key[0]]) is not dict:
# self.log("set_key {} = {} creating item".format(key, value, myYaml), debug=True)
myYaml[key[0]] = {}
myYaml[key[0]] = self.set_key(myYaml[key[0]], key[1:], value, append_mode)
return myYaml
def rm_key(self, myYaml, key):
"""
Remove a key and it's value from given YAML data structure.
No error or such thrown if the key doesn't exist.
:param myYaml: YAML data to be modified
:param key: key as array of key tokens
:return: modified YAML data
"""
# self.log("rm_key {} | yaml: {}".format(key, myYaml), debug=True)
if len(key) == 1 and key[0] in myYaml:
del myYaml[key[0]]
elif key[0] in myYaml:
myYaml[key[0]] = self.rm_key(myYaml[key[0]], key[1:])
return myYaml
def log(self, msg, debug=False):
"""
Write a message to STDOUT
:param msg: the message to print
:param debug: If True the message is only printed if --debug flag is set
"""
if self.VERBOSE or (debug and self.DEBUG):
ds = 'DEBUG ' if debug else ''
print("{debug}{msg}".format(debug=ds, msg=msg))
def run():
YamlCli()
if __name__ == "__main__":
run()
#############################################
#### ACTIONS
#############################################
class KeyValueAction(argparse.Action):
"""
Action for pair of key and string value.
Action that defines and handles a key value pair from command line where value is of type string.
All key value pairs are stored in 'set_keys' in the resulting namespace object.
Requires KeyValueType
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(KeyValueAction, self).__init__(option_strings, dest, **kwargs)
self.dest = 'set_keys'
self.nargs = 2
self.type = KeyValueType()
self.metavar = 'KEY', 'VAL'
def __call__(self, parser, namespace, values, option_string=None):
"""
Gets called for each pair of arguments after they have been type checked.
:param parser:
:param namespace:
:param values: holding the values read from command line
:param option_string:
"""
entry = dict(
key = values[0],
val = values[1],
action = ACTION_SET
)
data = getattr(namespace, self.dest)
if data is None: data = []
data.append(entry)
setattr(namespace, self.dest, data)
self.reset_type()
def reset_type(self):
"""
All KeyValueTypes (self.type) need to be reset once all data values are read.
This method silently fails if the type in self.type is not resettable.
"""
try:
reset = self.type.reset
except AttributeError:
pass
else:
reset()
class NumberKeyValueAction(KeyValueAction):
"""
Action for pair of key and numeric value. (int and float)
Requires NumberKeyValueType
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(NumberKeyValueAction, self).__init__(option_strings, dest, **kwargs)
self.type = NumberKeyValueType()
class BooleanKeyValueAction(KeyValueAction):
"""
Action for pair of key and boolean value.
Valid input to be interpreted as booleans are defined in BOOLEAN_VALUES_TRUE and BOOLEAN_VALUES_FALSE
Requires BooleanKeyValueType
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(BooleanKeyValueAction, self).__init__(option_strings, dest, **kwargs)
self.type = BooleanKeyValueType()
class NullKeyAction(KeyValueAction):
"""
Action for a key which value will be set to null.
Expects only one argument namely the key.
Requires KeyValueType
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(NullKeyAction, self).__init__(option_strings, dest, **kwargs)
self.nargs = 1
self.type = KeyValueType()
self.metavar = 'KEY'
def __call__(self, parser, namespace, values, option_string=None):
entry = dict(
key = values[0],
val = None,
action = ACTION_SET
)
data = getattr(namespace, self.dest)
if data is None: data = []
data.append(entry)
setattr(namespace, self.dest, data)
self.reset_type()
class RmKeyAction(KeyValueAction):
"""
Action for a key which value will be removed from YAML data.
Expects only one argument namely the key.
Requires KeyValueType
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(RmKeyAction, self).__init__(option_strings, dest, **kwargs)
self.nargs = 1
self.type = KeyValueType()
self.metavar = 'KEY'
def __call__(self, parser, namespace, values, option_string=None):
entry = dict(
key = values[0],
val = None,
action = ACTION_RM
)
data = getattr(namespace, self.dest)
if data is None: data = []
data.append(entry)
setattr(namespace, self.dest, data)
self.reset_type()
class ListKeyValueAction(KeyValueAction):
"""
Action for a key with one, multiple or none value.
Can be provided with any number of values
Requires KeyValueType
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(ListKeyValueAction, self).__init__(option_strings, dest, **kwargs)
self.nargs = '+'
self.type = KeyValueType()
def __call__(self, parser, namespace, values, option_string=None):
entry = dict(
key = values[0],
val = values[1:],
action = ACTION_SET
)
data = getattr(namespace, self.dest)
if data is None: data = []
data.append(entry)
setattr(namespace, self.dest, data)
self.reset_type()
#############################################
#### TYPES
#############################################
class KeyValueType(object):
"""
Type to validate key value pairs.
Unlike other types in argparse, this one validates different types.
First it expects a value of type key followed by a value of type value.
It needs to be reset to handle the next pair beginning with a key.
"""
def __init__(self):
self.key_expected = True
self.last_key = None
def __call__(self, string):
"""
Called for each value.
:param string:
:return:
"""
if self.key_expected:
self.key_expected = False
self.last_key = string
return self.verify_key(string)
else:
# self.key_expected = True
return self.verify_val(string)
def reset(self):
"""
resets its instance so that it can accept the next pair beginning with a key
:return:
"""
self.key_expected = True
self.last_key = None
def verify_key(self, string):
"""
Tests if the given string is a valid key and tokenizes it.
:param string: string read from command line
:return: tokenized key as list
"""
arr = self._split_unescape(string)
if len(arr) != len(filter(None, arr)):
msg = "'{}' is not a valid key".format(string)
raise argparse.ArgumentTypeError(msg)
else:
return arr
def verify_val(self, string):
"""
Returns the value as it is.
Can be overridden in inheriting classes.
:param string:
:return:
"""
return string
@staticmethod
def _split_unescape(s, delim=':', escape='\\', unescape=True):
"""
>>> split_unescape('foo,bar', ',')
['foo', 'bar']
>>> split_unescape('foo$,bar', ',', '$')
['foo,bar']
>>> split_unescape('foo$$,bar', ',', '$', unescape=True)
['foo$', 'bar']
>>> split_unescape('foo$$,bar', ',', '$', unescape=False)
['foo$$', 'bar']
>>> split_unescape('foo$', ',', '$', unescape=True)
['foo$']
from: https://stackoverflow.com/a/21882672/2631798
"""
ret = []
current = []
itr = iter(s)
for ch in itr:
if ch == escape:
try:
# skip the next character; it has been escaped!
if not unescape:
current.append(escape)
current.append(next(itr))
except StopIteration:
if unescape:
current.append(escape)
elif ch == delim:
# split! (add current to the list and reset it)
ret.append(''.join(current))
current = []
else:
current.append(ch)
ret.append(''.join(current))
return ret
class NumberKeyValueType(KeyValueType):
def verify_val(self, string):
try:
return int(string)
except Exception as e:
pass
try:
return float(string)
except Exception as e:
msg = "'{}' is not a number for key {}".format(string, self.last_key)
raise argparse.ArgumentTypeError(msg)
class BooleanKeyValueType(KeyValueType):
def verify_val(self, string):
if string in BOOLEAN_VALUES_FALSE:
return False
if string in BOOLEAN_VALUES_TRUE:
return True
msg = "'{}' is not a boolean for key {}".format(string, self.last_key)
raise argparse.ArgumentTypeError(msg)
def log_exception(msg='None', exit=True):
print("{exc_type}: {exc_msg} ({f_name}:{l_num}) - Message: {msg}".format(
exc_type=sys.exc_info()[0].__name__,
exc_msg=sys.exc_info()[1],
f_name=os.path.split(sys.exc_info()[2].tb_frame.f_code.co_filename)[1],
l_num=sys.exc_info()[2].tb_lineno,
msg=msg
))
if exit:
sys.exit(1)
| 29.052521
| 205
| 0.672211
|
1f62411d5470fbfa81affeac56932f42d15c98cc
| 2,984
|
py
|
Python
|
malib/policies/explorations/epsilon_greedy_strategy.py
|
alvaro-serra/malib
|
fe2b0736974c2a3ed9e41121b6cf475a3ee0b5a0
|
[
"MIT"
] | 23
|
2020-07-05T11:13:00.000Z
|
2022-01-28T00:24:41.000Z
|
malib/policies/explorations/epsilon_greedy_strategy.py
|
Taospirit/malib
|
fe2b0736974c2a3ed9e41121b6cf475a3ee0b5a0
|
[
"MIT"
] | 2
|
2020-09-07T19:09:40.000Z
|
2021-06-02T02:21:51.000Z
|
malib/policies/explorations/epsilon_greedy_strategy.py
|
Taospirit/malib
|
fe2b0736974c2a3ed9e41121b6cf475a3ee0b5a0
|
[
"MIT"
] | 8
|
2020-07-06T07:24:37.000Z
|
2021-09-27T20:28:25.000Z
|
# Created by yingwen at 2019-03-12
"""
ϵ-greedy exploration strategy.
Random exploration according to the value of epsilon.
"""
import numpy as np
from malib.policies.explorations.base_exploration import ExplorationBase
# from garage.misc.overrides import overrides
# TODO: maybe follow this to optimize performance: https://github.com/rlworkgroup/garage/blob/master/garage/misc/overrides.py
class EpsilonGreedyExploration(ExplorationBase):
"""
ϵ-greedy exploration strategy.
Select action based on the value of ϵ. ϵ will decrease from
max_epsilon to min_epsilon within decay_ratio * total_timesteps.
At state s, with probability
1 − ϵ: select action = argmax Q(s, a)
ϵ : select a random action from an uniform distribution.
Args:
env_spec: Environment specification
total_timesteps: Total steps in the training, equivalent to
max_path_length * n_epochs.
max_epsilon: The maximum(starting) value of epsilon.
min_epsilon: The minimum(terminal) value of epsilon.
decay_ratio: Fraction of total steps for epsilon decay.
"""
def __init__(self,
action_space,
total_timesteps,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1):
self._max_epsilon = max_epsilon
self._min_epsilon = min_epsilon
self._decay_period = int(total_timesteps * decay_ratio)
self._action_space = action_space
self._epsilon = self._max_epsilon
def get_action(self, t, observation, policy, **kwargs):
"""
Get action from this policy for the input observation.
Args:
t: Iteration.
observation: Observation from the environment.
policy: Policy network to predict action based on the observation.
Returns:
opt_action: optimal action from this policy.
"""
opt_action = policy.get_action(observation)
self._decay()
if np.random.random() < self._epsilon:
opt_action = self._action_space.sample()
return opt_action
def get_actions(self, t, observations, policy, **kwargs):
"""
Get actions from this policy for the input observations.
Args:
t: Iteration.
observation: Observation from the environment.
policy: Policy network to predict action based on the observation.
Returns:
opt_action: optimal actions from this policy.
"""
opt_actions = policy.get_actions(observations)
for itr in range(len(opt_actions)):
self._decay()
if np.random.random() < self._epsilon:
opt_actions[itr] = self._action_space.sample()
return opt_actions
def _decay(self):
if self._epsilon > self._min_epsilon:
self._epsilon -= (
self._max_epsilon - self._min_epsilon) / self._decay_period
| 37.3
| 125
| 0.647453
|
e702ac7686f87e923850f3a9ec4f19e3da6a4a73
| 9,931
|
py
|
Python
|
nncf/dynamic_graph/patch_pytorch.py
|
LeonidBeynenson/nncf_pytorch
|
f8ded2752aded28d3559308c354235e5011ccbe0
|
[
"Apache-2.0"
] | null | null | null |
nncf/dynamic_graph/patch_pytorch.py
|
LeonidBeynenson/nncf_pytorch
|
f8ded2752aded28d3559308c354235e5011ccbe0
|
[
"Apache-2.0"
] | null | null | null |
nncf/dynamic_graph/patch_pytorch.py
|
LeonidBeynenson/nncf_pytorch
|
f8ded2752aded28d3559308c354235e5011ccbe0
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2019-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
from typing import Callable, List
import warnings
from torch import Tensor
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nncf.dynamic_graph.trace_tensor import TracedTensor, flatten_args
from nncf.dynamic_graph.wrappers import wrap_operator, wrap_module_call, ignore_scope
from nncf.common.utils.logger import logger
class CustomTraceFunction:
def __call__(self, operator: Callable, *args, **kwargs):
raise NotImplementedError
class ForwardTraceOnly(CustomTraceFunction):
def __call__(self, operator: Callable, *args, **kwargs):
""" This wrapper override will result in the operator not being added to graph,
but the result will still have TracedTensors with parent IDs left the same as in input.
Useful for operators which are not likely to be present in patterns considered for
compression, but still have to be accounted for so that the NNCF internal graph representation
does not become disjoint. """
result = operator(*args, **kwargs)
fargs = flatten_args(args, kwargs)
input_traced_tensor_indices = [i for i in range(len(fargs)) if isinstance(fargs[i], TracedTensor)]
if isinstance(result, (list, tuple)):
output_tensors_to_be_traced_indices = [i for i in range(len(result)) if
isinstance(result[i], Tensor)]
was_tuple = isinstance(result, tuple)
result = list(result)
if len(input_traced_tensor_indices) == 1:
# Broadcast one and the same creator ID of input to all outputs
for out_idx in output_tensors_to_be_traced_indices:
forwarded_meta = deepcopy(fargs[input_traced_tensor_indices[0]].tensor_meta)
forwarded_meta.shape = tuple(result[out_idx].shape)
result[out_idx] = TracedTensor.from_torch_tensor(result[out_idx],
forwarded_meta)
elif len(input_traced_tensor_indices) != len(output_tensors_to_be_traced_indices):
raise RuntimeError("Unable to forward trace through operator {} - "
"input and output tensor count mismatch!".format(operator.__name__))
else:
# Assume that output tensor order corresponds to input tensor order
for in_idx, out_idx in zip(input_traced_tensor_indices, output_tensors_to_be_traced_indices):
forwarded_meta = deepcopy(fargs[in_idx].tensor_meta)
forwarded_meta.shape = tuple(result[out_idx].shape)
result[out_idx] = TracedTensor.from_torch_tensor(result[out_idx],
forwarded_meta)
if was_tuple:
result = tuple(result)
elif len(input_traced_tensor_indices) > 1:
raise RuntimeError("Unable to forward trace through operator {} - "
"input and output tensor count mismatch!".format(operator.__name__))
elif input_traced_tensor_indices:
forwarded_meta = deepcopy(fargs[input_traced_tensor_indices[0]].tensor_meta)
forwarded_meta.shape = tuple(result.shape)
return TracedTensor.from_torch_tensor(result,
forwarded_meta)
# No traced tensors in input, return a usual torch.Tensor as well
return result
class PatchedOperatorInfo:
def __init__(self, name: str, custom_trace_fn: CustomTraceFunction = None):
"""custom_trace_fn will be called instead of the regular node search/insertion step
during the corresponding operator call"""
self.name = name
self.custom_trace_fn = custom_trace_fn
def register_operator(name=None):
def wrap(operator):
op_name = name
if op_name is None:
op_name = operator.__name__
return wrap_operator(operator, PatchedOperatorInfo(op_name))
return wrap
# TODO: Use same wrapper for model.forward() calls
def torch_jit_script_wrapper(*args, **kwargs):
# Torch JIT cannot work with NNCF-modified operators,
# so at each import of a @torch.jit.script-decorated
# function we need to un-patch the torch operators
unpatch_torch_operators()
retval = _ORIG_JIT_SCRIPT(*args, **kwargs)
patch_torch_operators()
return retval
def get_arg_positions_to_quantize(op_name: str):
from nncf.dynamic_graph.function_input_quantization import FUNCTIONS_TO_QUANTIZE
return next((x.positions_of_args_to_quantize for x in FUNCTIONS_TO_QUANTIZE
if x.name == op_name), None)
class OriginalOpInfo:
def __init__(self, name: str, namespace, op):
self.name = name
self.namespace = namespace
self.op = op
ORIGINAL_OPERATORS = [] # type: List[OriginalOpInfo]
_JIT_ALREADY_WRAPPED = False
_OPERATORS_ALREADY_WRAPPED = False
_ORIG_JIT_SCRIPT = None
def patch_torch_jit_script():
# This import statement is required, otherwise we get a
# "RuntimeError: undefined value torch" inside the real torch.jit.script
# pylint:disable=unused-import,redefined-outer-name,reimported
import torch
orig = getattr(torch.jit, "script")
global _ORIG_JIT_SCRIPT
_ORIG_JIT_SCRIPT = orig
setattr(torch.jit, "script", torch_jit_script_wrapper)
def patch_namespace_opname(namespace, patched_op_info: PatchedOperatorInfo):
name = patched_op_info.name
if hasattr(namespace, name):
orig = getattr(namespace, name)
ORIGINAL_OPERATORS.append(OriginalOpInfo(name, namespace, orig))
setattr(namespace, name, wrap_operator(orig, patched_op_info))
else:
warnings.warn("Not patching {} since it is missing in this version of PyTorch"
.format(name))
def patch_namespace_by_patchspec(namespace, patchspec: 'PatchSpec'):
for op_name in patchspec.underlying_function_names:
patched_op_info = PatchedOperatorInfo(op_name, patchspec.custom_trace_fn)
patch_namespace_opname(namespace, patched_op_info)
def patch_torch_operators():
# Only patch torch.jit.script during first patch_torch_operators call
global _JIT_ALREADY_WRAPPED
if not _JIT_ALREADY_WRAPPED:
patch_torch_jit_script()
_JIT_ALREADY_WRAPPED = True
# Do not patch operators twice as well
global _OPERATORS_ALREADY_WRAPPED
if _OPERATORS_ALREADY_WRAPPED:
return
_OPERATORS_ALREADY_WRAPPED = True
# patch operators
import torch.nn.functional as F
import torch
from nncf.dynamic_graph.operator_metatypes import OPERATOR_METATYPES
for op_meta_class in OPERATOR_METATYPES.registry_dict.values(): # type: OperatorMetatype
if op_meta_class.torch_nn_functional_patch_spec is not None:
ps = op_meta_class.torch_nn_functional_patch_spec
patch_namespace_by_patchspec(F, ps)
if op_meta_class.torch_module_patch_spec is not None:
ps = op_meta_class.torch_module_patch_spec
patch_namespace_by_patchspec(torch, ps)
if op_meta_class.torch_tensor_patch_spec is not None:
ps = op_meta_class.torch_tensor_patch_spec
patch_namespace_by_patchspec(TracedTensor, ps)
# Patch __repr__ methods so that debugging does not add new nodes to the graph
patch_namespace_opname(TracedTensor, PatchedOperatorInfo("__repr__", ForwardTraceOnly()))
ORIGINAL_OPERATORS.append(OriginalOpInfo("__call__", torch.nn.Module, torch.nn.Module.__call__))
torch.nn.Module.__call__ = wrap_module_call(torch.nn.Module.__call__)
ignore_scope(DataParallel)
ignore_scope(DistributedDataParallel)
def unpatch_torch_operators():
global _OPERATORS_ALREADY_WRAPPED
if not _OPERATORS_ALREADY_WRAPPED:
return
_OPERATORS_ALREADY_WRAPPED = False
for orig_op_info in ORIGINAL_OPERATORS:
setattr(orig_op_info.namespace, orig_op_info.name, orig_op_info.op)
def patch_extension_build_function():
"""
The function patches PyTorch and fix a bug inside CUDA extensions building;
The bug must be fixed with a new PyTorch 1.8.0
"""
import torch.utils.cpp_extension
try:
torch_version_numbers = torch.__version__.split('+')[0]
split_torch_version = list(map(int, torch_version_numbers.split('.')))
except ValueError as e:
logger.warning('Skip applying a patch to building extension with a reason: '
'Cannot parse a PyTorch version with the error {}'.format(e))
return
if split_torch_version >= [1, 8, 0]:
return
if torch.__version__ not in ('1.5.1', '1.7.0', '1.7.1'):
logger.warning('Skip applying a patch to building extension with a reason: '
'PyTorch version is not supported for this')
return
def sort_arch_flags(func):
def wrapped(*args, **kwargs):
flags = func(*args, **kwargs)
return sorted(flags)
return wrapped
# pylint:disable=protected-access
torch.utils.cpp_extension._get_cuda_arch_flags = \
sort_arch_flags(torch.utils.cpp_extension._get_cuda_arch_flags)
| 41.207469
| 109
| 0.692075
|
aab8c0ae832231425ba641fe6094965aaf5db2a5
| 7,296
|
py
|
Python
|
qa/rpc-tests/walletbackup.py
|
maurizio17/C-Bit
|
73bc5652564d344af6847eefde6d386225d4ad02
|
[
"MIT"
] | 1
|
2021-04-04T20:40:53.000Z
|
2021-04-04T20:40:53.000Z
|
qa/rpc-tests/walletbackup.py
|
maurizio17/C-Bit
|
73bc5652564d344af6847eefde6d386225d4ad02
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/walletbackup.py
|
maurizio17/C-Bit
|
73bc5652564d344af6847eefde6d386225d4ad02
|
[
"MIT"
] | 1
|
2017-09-03T18:54:46.000Z
|
2017-09-03T18:54:46.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The C-Bit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
class WalletBackupTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
# nodes 1, 2,3 are spenders, let's give them a keypool=100
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
sync_blocks(self.nodes)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
| 35.940887
| 95
| 0.653783
|
24a2c217db5e1281dc7d7055790a071cd531c185
| 30,169
|
py
|
Python
|
neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_manager.py
|
SnabbCo/neutron
|
a657c06d10f2171149c6b1863df36522bdc11cd7
|
[
"Apache-2.0"
] | 3
|
2015-02-02T02:51:39.000Z
|
2015-02-23T10:20:23.000Z
|
neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_manager.py
|
SnabbCo/neutron
|
a657c06d10f2171149c6b1863df36522bdc11cd7
|
[
"Apache-2.0"
] | 4
|
2015-02-23T10:21:11.000Z
|
2015-03-04T09:28:20.000Z
|
neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_manager.py
|
SnabbCo/neutron
|
a657c06d10f2171149c6b1863df36522bdc11cd7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Henry Gessau, Cisco Systems
import mock
from webob import exc as wexc
from neutron.openstack.common import uuidutils
from neutron.plugins.ml2.drivers.cisco.apic import apic_manager
from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc
from neutron.tests import base
from neutron.tests.unit.ml2.drivers.cisco.apic import (
test_cisco_apic_common as mocked)
class TestCiscoApicManager(base.BaseTestCase,
mocked.ControllerMixin,
mocked.ConfigMixin,
mocked.DbModelMixin):
def setUp(self):
super(TestCiscoApicManager, self).setUp()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
mocked.DbModelMixin.set_up_mocks(self)
self.mock_apic_manager_login_responses()
self.mgr = apic_manager.APICManager()
self.session = self.mgr.apic.session
self.assert_responses_drained()
self.reset_reponses()
def test_mgr_session_login(self):
login = self.mgr.apic.authentication
self.assertEqual(login['userName'], mocked.APIC_USR)
def test_mgr_session_logout(self):
self.mock_response_for_post('aaaLogout')
self.mgr.apic.logout()
self.assert_responses_drained()
self.assertIsNone(self.mgr.apic.authentication)
def test_to_range(self):
port_list = [4, 2, 3, 1, 7, 8, 10, 20, 6, 22, 21]
expected_ranges = [(1, 4), (6, 8), (10, 10), (20, 22)]
port_ranges = [r for r in apic_manager.group_by_ranges(port_list)]
self.assertEqual(port_ranges, expected_ranges)
def test_get_profiles(self):
self.mock_db_query_filterby_first_return('faked')
self.assertEqual(
self.mgr.db.get_port_profile_for_node('node'),
'faked'
)
self.assertEqual(
self.mgr.db.get_profile_for_module('node', 'prof', 'module'),
'faked'
)
self.assertEqual(
self.mgr.db.get_profile_for_module_and_ports(
'node', 'prof', 'module', 'from', 'to'
),
'faked'
)
def test_add_profile(self):
self.mgr.db.add_profile_for_module_and_ports(
'node', 'prof', 'hpselc', 'module', 'from', 'to')
self.assertTrue(self.mocked_session.add.called)
self.assertTrue(self.mocked_session.flush.called)
def test_ensure_port_profile_created(self):
port_name = mocked.APIC_PORT
self.mock_responses_for_create('infraAccPortP')
self.mock_response_for_get('infraAccPortP', name=port_name)
port = self.mgr.ensure_port_profile_created_on_apic(port_name)
self.assert_responses_drained()
self.assertEqual(port['name'], port_name)
def test_ensure_port_profile_created_exc(self):
port_name = mocked.APIC_PORT
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('infraAccPortP')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.ensure_port_profile_created_on_apic,
port_name)
self.assert_responses_drained()
def test_ensure_node_profile_created_for_switch_old(self):
old_switch = mocked.APIC_NODE_PROF
self.mock_response_for_get('infraNodeP', name=old_switch)
self.mgr.ensure_node_profile_created_for_switch(old_switch)
self.assert_responses_drained()
old_name = self.mgr.node_profiles[old_switch]['object']['name']
self.assertEqual(old_name, old_switch)
def test_ensure_node_profile_created_for_switch_new(self):
new_switch = mocked.APIC_NODE_PROF
self.mock_response_for_get('infraNodeP')
self.mock_responses_for_create('infraNodeP')
self.mock_responses_for_create('infraLeafS')
self.mock_responses_for_create('infraNodeBlk')
self.mock_response_for_get('infraNodeP', name=new_switch)
self.mgr.ensure_node_profile_created_for_switch(new_switch)
self.assert_responses_drained()
new_name = self.mgr.node_profiles[new_switch]['object']['name']
self.assertEqual(new_name, new_switch)
def test_ensure_node_profile_created_for_switch_new_exc(self):
new_switch = mocked.APIC_NODE_PROF
self.mock_response_for_get('infraNodeP')
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('infraNodeP')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.ensure_node_profile_created_for_switch,
new_switch)
self.assert_responses_drained()
def test_ensure_vmm_domain_created_old(self):
dom = mocked.APIC_DOMAIN
self.mock_response_for_get('vmmDomP', name=dom)
self.mgr.ensure_vmm_domain_created_on_apic(dom)
self.assert_responses_drained()
old_dom = self.mgr.vmm_domain['name']
self.assertEqual(old_dom, dom)
def _mock_new_vmm_dom_responses(self, dom, seg_type=None):
vmm = mocked.APIC_VMMP
dn = self.mgr.apic.vmmDomP.mo.dn(vmm, dom)
self.mock_response_for_get('vmmDomP')
self.mock_responses_for_create('vmmDomP')
if seg_type:
self.mock_responses_for_create(seg_type)
self.mock_response_for_get('vmmDomP', name=dom, dn=dn)
def test_ensure_vmm_domain_created_new_no_vlan_ns(self):
dom = mocked.APIC_DOMAIN
self._mock_new_vmm_dom_responses(dom)
self.mgr.ensure_vmm_domain_created_on_apic(dom)
self.assert_responses_drained()
new_dom = self.mgr.vmm_domain['name']
self.assertEqual(new_dom, dom)
def test_ensure_vmm_domain_created_new_no_vlan_ns_exc(self):
dom = mocked.APIC_DOMAIN
self.mock_response_for_get('vmmDomP')
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('vmmDomP')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.ensure_vmm_domain_created_on_apic, dom)
self.assert_responses_drained()
def test_ensure_vmm_domain_created_new_with_vlan_ns(self):
dom = mocked.APIC_DOMAIN
self._mock_new_vmm_dom_responses(dom, seg_type='infraRsVlanNs__vmm')
ns = {'dn': 'test_vlan_ns'}
self.mgr.ensure_vmm_domain_created_on_apic(dom, vlan_ns=ns)
self.assert_responses_drained()
new_dom = self.mgr.vmm_domain['name']
self.assertEqual(new_dom, dom)
def test_ensure_vmm_domain_created_new_with_vxlan_ns(self):
dom = mocked.APIC_DOMAIN
# TODO(Henry): mock seg_type vxlan when vxlan is ready
self._mock_new_vmm_dom_responses(dom, seg_type=None)
ns = {'dn': 'test_vxlan_ns'}
self.mgr.ensure_vmm_domain_created_on_apic(dom, vxlan_ns=ns)
self.assert_responses_drained()
new_dom = self.mgr.vmm_domain['name']
self.assertEqual(new_dom, dom)
def test_ensure_infra_created_no_infra(self):
self.mgr.switch_dict = {}
self.mgr.ensure_infra_created_on_apic()
def _ensure_infra_created_seq1_setup(self):
am = 'neutron.plugins.ml2.drivers.cisco.apic.apic_manager.APICManager'
np_create_for_switch = mock.patch(
am + '.ensure_node_profile_created_for_switch').start()
self.mock_db_query_filterby_first_return(None)
pp_create_for_switch = mock.patch(
am + '.ensure_port_profile_created_on_apic').start()
pp_create_for_switch.return_value = {'dn': 'port_profile_dn'}
return np_create_for_switch, pp_create_for_switch
def test_ensure_infra_created_seq1(self):
np_create_for_switch, pp_create_for_switch = (
self._ensure_infra_created_seq1_setup())
def _profile_for_module(aswitch, ppn, module):
profile = mock.Mock()
profile.ppn = ppn
profile.hpselc_id = '-'.join([aswitch, module, 'hpselc_id'])
return profile
self.mgr.db.get_profile_for_module = mock.Mock(
side_effect=_profile_for_module)
self.mgr.db.get_profile_for_module_and_ports = mock.Mock(
return_value=None)
self.mgr.db.add_profile_for_module_and_ports = mock.Mock()
num_switches = len(self.mgr.switch_dict)
for loop in range(num_switches):
self.mock_responses_for_create('infraRsAccPortP')
self.mock_responses_for_create('infraPortBlk')
self.mgr.ensure_infra_created_on_apic()
self.assert_responses_drained()
self.assertEqual(np_create_for_switch.call_count, num_switches)
self.assertEqual(pp_create_for_switch.call_count, num_switches)
for switch in self.mgr.switch_dict:
np_create_for_switch.assert_any_call(switch)
def test_ensure_infra_created_seq1_exc(self):
np_create_for_switch, __ = self._ensure_infra_created_seq1_setup()
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('infraAccPortP')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.ensure_infra_created_on_apic)
self.assert_responses_drained()
self.assertTrue(np_create_for_switch.called)
self.assertEqual(np_create_for_switch.call_count, 1)
def _ensure_infra_created_seq2_setup(self):
am = 'neutron.plugins.ml2.drivers.cisco.apic.apic_manager.APICManager'
np_create_for_switch = mock.patch(
am + '.ensure_node_profile_created_for_switch').start()
def _profile_for_node(aswitch):
profile = mock.Mock()
profile.profile_id = '-'.join([aswitch, 'profile_id'])
return profile
self.mgr.db.get_port_profile_for_node = mock.Mock(
side_effect=_profile_for_node)
self.mgr.db.get_profile_for_module = mock.Mock(
return_value=None)
self.mgr.function_profile = {'dn': 'dn'}
self.mgr.db.get_profile_for_module_and_ports = mock.Mock(
return_value=True)
return np_create_for_switch
def test_ensure_infra_created_seq2(self):
np_create_for_switch = self._ensure_infra_created_seq2_setup()
num_switches = len(self.mgr.switch_dict)
for loop in range(num_switches):
self.mock_responses_for_create('infraHPortS')
self.mock_responses_for_create('infraRsAccBaseGrp')
self.mgr.ensure_infra_created_on_apic()
self.assert_responses_drained()
self.assertEqual(np_create_for_switch.call_count, num_switches)
for switch in self.mgr.switch_dict:
np_create_for_switch.assert_any_call(switch)
def test_ensure_infra_created_seq2_exc(self):
np_create_for_switch = self._ensure_infra_created_seq2_setup()
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('infraHPortS')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.ensure_infra_created_on_apic)
self.assert_responses_drained()
self.assertTrue(np_create_for_switch.called)
self.assertEqual(np_create_for_switch.call_count, 1)
def test_ensure_context_unenforced_new_ctx(self):
self.mock_response_for_get('fvCtx')
self.mock_responses_for_create('fvCtx')
self.mgr.ensure_context_unenforced()
self.assert_responses_drained()
def test_ensure_context_unenforced_pref1(self):
self.mock_response_for_get('fvCtx', pcEnfPref='1')
self.mock_response_for_post('fvCtx')
self.mgr.ensure_context_unenforced()
self.assert_responses_drained()
def test_ensure_context_unenforced_pref2(self):
self.mock_response_for_get('fvCtx', pcEnfPref='2')
self.mgr.ensure_context_unenforced()
self.assert_responses_drained()
def _mock_vmm_dom_prereq(self, dom):
self._mock_new_vmm_dom_responses(dom)
self.mgr.ensure_vmm_domain_created_on_apic(dom)
def _mock_new_phys_dom_responses(self, dom, seg_type=None):
dn = self.mgr.apic.physDomP.mo.dn(dom)
self.mock_response_for_get('physDomP')
self.mock_responses_for_create('physDomP')
if seg_type:
self.mock_responses_for_create(seg_type)
self.mock_response_for_get('physDomP', name=dom, dn=dn)
def _mock_phys_dom_prereq(self, dom):
self._mock_new_phys_dom_responses(dom)
self.mgr.ensure_phys_domain_created_on_apic(dom)
def test_ensure_entity_profile_created_old(self):
ep = mocked.APIC_ATT_ENT_PROF
self.mock_response_for_get('infraAttEntityP', name=ep)
self.mgr.ensure_entity_profile_created_on_apic(ep)
self.assert_responses_drained()
def _mock_new_entity_profile(self, exc=None):
self.mock_response_for_get('infraAttEntityP')
self.mock_responses_for_create('infraAttEntityP')
self.mock_responses_for_create('infraRsDomP')
if exc:
self.mock_error_get_response(exc, code='103', text=u'Fail')
else:
self.mock_response_for_get('infraAttEntityP')
def test_ensure_entity_profile_created_new(self):
self._mock_phys_dom_prereq(mocked.APIC_PDOM)
ep = mocked.APIC_ATT_ENT_PROF
self._mock_new_entity_profile()
self.mgr.ensure_entity_profile_created_on_apic(ep)
self.assert_responses_drained()
def test_ensure_entity_profile_created_new_exc(self):
self._mock_phys_dom_prereq(mocked.APIC_PDOM)
ep = mocked.APIC_ATT_ENT_PROF
self._mock_new_entity_profile(exc=wexc.HTTPBadRequest)
self.mock_response_for_post('infraAttEntityP')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.ensure_entity_profile_created_on_apic, ep)
self.assert_responses_drained()
def _mock_entity_profile_preqreq(self):
self._mock_phys_dom_prereq(mocked.APIC_PDOM)
ep = mocked.APIC_ATT_ENT_PROF
self._mock_new_entity_profile()
self.mgr.ensure_entity_profile_created_on_apic(ep)
def test_ensure_function_profile_created_old(self):
self._mock_entity_profile_preqreq()
fp = mocked.APIC_FUNC_PROF
self.mock_response_for_get('infraAccPortGrp', name=fp)
self.mgr.ensure_function_profile_created_on_apic(fp)
self.assert_responses_drained()
old_fp = self.mgr.function_profile['name']
self.assertEqual(old_fp, fp)
def _mock_new_function_profile(self, fp):
dn = self.mgr.apic.infraAttEntityP.mo.dn(fp)
self.mock_responses_for_create('infraAccPortGrp')
self.mock_responses_for_create('infraRsAttEntP')
self.mock_response_for_get('infraAccPortGrp', name=fp, dn=dn)
def test_ensure_function_profile_created_new(self):
fp = mocked.APIC_FUNC_PROF
dn = self.mgr.apic.infraAttEntityP.mo.dn(fp)
self.mgr.entity_profile = {'dn': dn}
self.mock_response_for_get('infraAccPortGrp')
self.mock_responses_for_create('infraAccPortGrp')
self.mock_responses_for_create('infraRsAttEntP')
self.mock_response_for_get('infraAccPortGrp', name=fp, dn=dn)
self.mgr.ensure_function_profile_created_on_apic(fp)
self.assert_responses_drained()
new_fp = self.mgr.function_profile['name']
self.assertEqual(new_fp, fp)
def test_ensure_function_profile_created_new_exc(self):
fp = mocked.APIC_FUNC_PROF
dn = self.mgr.apic.infraAttEntityP.mo.dn(fp)
self.mgr.entity_profile = {'dn': dn}
self.mock_response_for_get('infraAccPortGrp')
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('infraAccPortGrp')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.ensure_function_profile_created_on_apic, fp)
self.assert_responses_drained()
def test_ensure_vlan_ns_created_old(self):
ns = mocked.APIC_VLAN_NAME
mode = mocked.APIC_VLAN_MODE
self.mock_response_for_get('fvnsVlanInstP', name=ns, mode=mode)
new_ns = self.mgr.ensure_vlan_ns_created_on_apic(ns, '100', '199')
self.assert_responses_drained()
self.assertIsNone(new_ns)
def _mock_new_vlan_instance(self, ns, vlan_encap=None):
self.mock_responses_for_create('fvnsVlanInstP')
if vlan_encap:
self.mock_response_for_get('fvnsEncapBlk', **vlan_encap)
else:
self.mock_response_for_get('fvnsEncapBlk')
self.mock_responses_for_create('fvnsEncapBlk__vlan')
self.mock_response_for_get('fvnsVlanInstP', name=ns)
def test_ensure_vlan_ns_created_new_no_encap(self):
ns = mocked.APIC_VLAN_NAME
self.mock_response_for_get('fvnsVlanInstP')
self._mock_new_vlan_instance(ns)
new_ns = self.mgr.ensure_vlan_ns_created_on_apic(ns, '200', '299')
self.assert_responses_drained()
self.assertEqual(new_ns['name'], ns)
def test_ensure_vlan_ns_created_new_exc(self):
ns = mocked.APIC_VLAN_NAME
self.mock_response_for_get('fvnsVlanInstP')
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('fvnsVlanInstP')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.ensure_vlan_ns_created_on_apic,
ns, '200', '299')
self.assert_responses_drained()
def test_ensure_vlan_ns_created_new_with_encap(self):
ns = mocked.APIC_VLAN_NAME
self.mock_response_for_get('fvnsVlanInstP')
ns_args = {'name': 'encap', 'from': '300', 'to': '399'}
self._mock_new_vlan_instance(ns, vlan_encap=ns_args)
new_ns = self.mgr.ensure_vlan_ns_created_on_apic(ns, '300', '399')
self.assert_responses_drained()
self.assertEqual(new_ns['name'], ns)
def test_ensure_tenant_created_on_apic(self):
self.mock_response_for_get('fvTenant', name='any')
self.mgr.ensure_tenant_created_on_apic('two')
self.mock_response_for_get('fvTenant')
self.mock_responses_for_create('fvTenant')
self.mgr.ensure_tenant_created_on_apic('four')
self.assert_responses_drained()
def test_ensure_bd_created_existing_bd(self):
self.mock_response_for_get('fvBD', name='BD')
self.mgr.ensure_bd_created_on_apic('t1', 'two')
self.assert_responses_drained()
def test_ensure_bd_created_not_ctx(self):
self.mock_response_for_get('fvBD')
self.mock_responses_for_create('fvBD')
self.mock_response_for_get('fvCtx')
self.mock_responses_for_create('fvCtx')
self.mock_responses_for_create('fvRsCtx')
self.mgr.ensure_bd_created_on_apic('t2', 'three')
self.assert_responses_drained()
def test_ensure_bd_created_exc(self):
self.mock_response_for_get('fvBD')
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('fvBD')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.ensure_bd_created_on_apic, 't2', 'three')
self.assert_responses_drained()
def test_ensure_bd_created_ctx_pref1(self):
self.mock_response_for_get('fvBD')
self.mock_responses_for_create('fvBD')
self.mock_response_for_get('fvCtx', pcEnfPref='1')
self.mock_responses_for_create('fvRsCtx')
self.mgr.ensure_bd_created_on_apic('t3', 'four')
self.assert_responses_drained()
def test_ensure_bd_created_ctx_pref2(self):
self.mock_response_for_get('fvBD')
self.mock_responses_for_create('fvBD')
self.mock_response_for_get('fvCtx', pcEnfPref='2')
self.mock_response_for_post('fvCtx')
self.mock_responses_for_create('fvRsCtx')
self.mgr.ensure_bd_created_on_apic('t3', 'four')
self.assert_responses_drained()
def test_delete_bd(self):
self.mock_response_for_post('fvBD')
self.mgr.delete_bd_on_apic('t1', 'bd')
self.assert_responses_drained()
def test_ensure_subnet_created(self):
self.mock_response_for_get('fvSubnet', name='sn1')
self.mgr.ensure_subnet_created_on_apic('t0', 'bd1', '2.2.2.2/8')
self.mock_response_for_get('fvSubnet')
self.mock_responses_for_create('fvSubnet')
self.mgr.ensure_subnet_created_on_apic('t2', 'bd3', '4.4.4.4/16')
self.assert_responses_drained()
def test_ensure_filter_created(self):
self.mock_response_for_get('vzFilter', name='f1')
self.mgr.ensure_filter_created_on_apic('t1', 'two')
self.mock_response_for_get('vzFilter')
self.mock_responses_for_create('vzFilter')
self.mgr.ensure_filter_created_on_apic('t2', 'four')
self.assert_responses_drained()
def test_ensure_epg_created_for_network_old(self):
self.mock_db_query_filterby_first_return('faked')
epg = self.mgr.ensure_epg_created_for_network('X', 'Y', 'Z')
self.assertEqual(epg, 'faked')
def test_ensure_epg_created_for_network_new(self):
tenant = mocked.APIC_TENANT
network = mocked.APIC_NETWORK
netname = mocked.APIC_NETNAME
self._mock_phys_dom_prereq(mocked.APIC_PDOM)
self.mock_db_query_filterby_first_return(None)
self.mock_responses_for_create('fvAEPg')
self.mock_response_for_get('fvBD', name=network)
self.mock_responses_for_create('fvRsBd')
self.mock_responses_for_create('fvRsDomAtt')
new_epg = self.mgr.ensure_epg_created_for_network(tenant,
network, netname)
self.assert_responses_drained()
self.assertEqual(new_epg.network_id, network)
self.assertTrue(self.mocked_session.add.called)
self.assertTrue(self.mocked_session.flush.called)
def test_ensure_epg_created_for_network_exc(self):
tenant = mocked.APIC_TENANT
network = mocked.APIC_NETWORK
netname = mocked.APIC_NETNAME
self.mock_db_query_filterby_first_return(None)
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('fvAEPg')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.ensure_epg_created_for_network,
tenant, network, netname)
self.assert_responses_drained()
def test_delete_epg_for_network_no_epg(self):
self.mock_db_query_filterby_first_return(None)
self.mgr.delete_epg_for_network('tenant', 'network')
def test_delete_epg_for_network(self):
epg = mock.Mock()
epg.epg_id = mocked.APIC_EPG
self.mock_db_query_filterby_first_return(epg)
self.mock_response_for_post('fvAEPg')
self.mgr.delete_epg_for_network('tenant', 'network')
self.assertTrue(self.mocked_session.delete.called)
self.assertTrue(self.mocked_session.flush.called)
def test_ensure_path_created_for_port(self):
epg = mock.Mock()
epg.epg_id = 'epg01'
eepg = mock.Mock(return_value=epg)
apic_manager.APICManager.ensure_epg_created_for_network = eepg
self.mock_response_for_get('fvRsPathAtt', tDn='foo')
self.mgr.ensure_path_created_for_port('tenant', 'network', 'rhel01',
'static', 'netname')
self.assert_responses_drained()
def test_ensure_path_created_for_port_no_path_att(self):
epg = mock.Mock()
epg.epg_id = 'epg2'
eepg = mock.Mock(return_value=epg)
self.mgr.ensure_epg_created_for_network = eepg
self.mock_response_for_get('fvRsPathAtt')
self.mock_responses_for_create('fvRsPathAtt')
self.mgr.ensure_path_created_for_port('tenant', 'network', 'ubuntu2',
'static', 'netname')
self.assert_responses_drained()
def test_ensure_path_created_for_port_unknown_host(self):
epg = mock.Mock()
epg.epg_id = 'epg3'
eepg = mock.Mock(return_value=epg)
apic_manager.APICManager.ensure_epg_created_for_network = eepg
self.mock_response_for_get('fvRsPathAtt', tDn='foo')
self.assertRaises(cexc.ApicHostNotConfigured,
self.mgr.ensure_path_created_for_port,
'tenant', 'network', 'cirros3', 'static', 'netname')
def test_create_tenant_filter(self):
tenant = mocked.APIC_TENANT
self.mock_responses_for_create('vzFilter')
self.mock_responses_for_create('vzEntry')
filter_id = self.mgr.create_tenant_filter(tenant)
self.assert_responses_drained()
self.assertTrue(uuidutils.is_uuid_like(str(filter_id)))
def test_create_tenant_filter_exc(self):
tenant = mocked.APIC_TENANT
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('vzFilter')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.create_tenant_filter, tenant)
self.assert_responses_drained()
def test_set_contract_for_epg_consumer(self):
tenant = mocked.APIC_TENANT
epg = mocked.APIC_EPG
contract = mocked.APIC_CONTRACT
self.mock_responses_for_create('fvRsCons')
self.mgr.set_contract_for_epg(tenant, epg, contract)
self.assert_responses_drained()
def test_set_contract_for_epg_provider(self):
tenant = mocked.APIC_TENANT
epg = mocked.APIC_EPG
contract = mocked.APIC_CONTRACT
epg_obj = mock.Mock()
epg_obj.epg_id = epg
epg_obj.provider = False
self.mock_db_query_filterby_first_return(epg_obj)
self.mock_responses_for_create('fvRsProv')
self.mock_response_for_post('vzBrCP')
self.mgr.set_contract_for_epg(tenant, epg, contract, provider=True)
self.assert_responses_drained()
self.assertTrue(self.mocked_session.merge.called)
self.assertTrue(self.mocked_session.flush.called)
self.assertTrue(epg_obj.provider)
def test_set_contract_for_epg_provider_exc(self):
tenant = mocked.APIC_TENANT
epg = mocked.APIC_EPG
contract = mocked.APIC_CONTRACT
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('vzBrCP')
self.mock_response_for_post('fvRsProv')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.set_contract_for_epg,
tenant, epg, contract, provider=True)
self.assert_responses_drained()
def test_delete_contract_for_epg_consumer(self):
tenant = mocked.APIC_TENANT
epg = mocked.APIC_EPG
contract = mocked.APIC_CONTRACT
self.mock_response_for_post('fvRsCons')
self.mgr.delete_contract_for_epg(tenant, epg, contract)
self.assert_responses_drained()
def test_delete_contract_for_epg_provider(self):
tenant = mocked.APIC_TENANT
epg = mocked.APIC_EPG
contract = mocked.APIC_CONTRACT
epg_obj = mock.Mock()
epg_obj.epg_id = epg + '-other'
epg_obj.provider = False
self.mock_db_query_filterby_first_return(epg_obj)
self.mock_response_for_post('fvRsProv')
self.mock_response_for_post('fvRsCons')
self.mock_responses_for_create('fvRsProv')
self.mock_response_for_post('vzBrCP')
self.mgr.delete_contract_for_epg(tenant, epg, contract, provider=True)
self.assert_responses_drained()
self.assertTrue(self.mocked_session.merge.called)
self.assertTrue(self.mocked_session.flush.called)
self.assertTrue(epg_obj.provider)
def test_create_tenant_contract_existing(self):
tenant = mocked.APIC_TENANT
contract = mocked.APIC_CONTRACT
self.mock_db_query_filterby_first_return(contract)
new_contract = self.mgr.create_tenant_contract(tenant)
self.assertEqual(new_contract, contract)
def test_create_tenant_contract_new(self):
tenant = mocked.APIC_TENANT
contract = mocked.APIC_CONTRACT
dn = self.mgr.apic.vzBrCP.mo.dn(tenant, contract)
self.mock_db_query_filterby_first_return(None)
self.mock_responses_for_create('vzBrCP')
self.mock_response_for_get('vzBrCP', dn=dn)
self.mock_responses_for_create('vzSubj')
self.mock_responses_for_create('vzFilter')
self.mock_responses_for_create('vzEntry')
self.mock_responses_for_create('vzInTerm')
self.mock_responses_for_create('vzRsFiltAtt__In')
self.mock_responses_for_create('vzOutTerm')
self.mock_responses_for_create('vzRsFiltAtt__Out')
self.mock_responses_for_create('vzCPIf')
self.mock_responses_for_create('vzRsIf')
new_contract = self.mgr.create_tenant_contract(tenant)
self.assert_responses_drained()
self.assertTrue(self.mocked_session.add.called)
self.assertTrue(self.mocked_session.flush.called)
self.assertEqual(new_contract['tenant_id'], tenant)
def test_create_tenant_contract_exc(self):
tenant = mocked.APIC_TENANT
self.mock_db_query_filterby_first_return(None)
self.mock_error_post_response(wexc.HTTPBadRequest)
self.mock_response_for_post('vzBrCP')
self.assertRaises(cexc.ApicResponseNotOk,
self.mgr.create_tenant_contract, tenant)
self.assert_responses_drained()
| 43.160229
| 79
| 0.694156
|
058a645dd3fdea3da28ac62eb3367f6f9f14555e
| 408
|
py
|
Python
|
jetavator_databricks_local/jetavator_databricks_local/config/secret_lookup/DatabricksSecretLookup.py
|
jetavator/jetavator_databricks
|
719c934b6391f6f41ca34b4d4df8c697c1a25283
|
[
"Apache-2.0"
] | null | null | null |
jetavator_databricks_local/jetavator_databricks_local/config/secret_lookup/DatabricksSecretLookup.py
|
jetavator/jetavator_databricks
|
719c934b6391f6f41ca34b4d4df8c697c1a25283
|
[
"Apache-2.0"
] | null | null | null |
jetavator_databricks_local/jetavator_databricks_local/config/secret_lookup/DatabricksSecretLookup.py
|
jetavator/jetavator_databricks
|
719c934b6391f6f41ca34b4d4df8c697c1a25283
|
[
"Apache-2.0"
] | null | null | null |
from jetavator.config.secret_lookup import SecretLookup
from lazy_property import LazyProperty
class DatabricksSecretLookup(SecretLookup, register_as='databricks'):
@LazyProperty
def dbutils(self):
import IPython
return IPython.get_ipython().user_ns["dbutils"]
def lookup_secret(self, secret_name):
return self.dbutils.secrets.get(scope="jetavator", key=secret_name)
| 27.2
| 75
| 0.754902
|
2b754ee37c2c119486f850073956693d21bd99a4
| 10,002
|
py
|
Python
|
qa/common/gen_tag_sigdef.py
|
jiweibo/triton_server
|
a0f7868eb0cad9d6a119edf845387ceae808d350
|
[
"BSD-3-Clause"
] | 1
|
2021-12-16T03:54:01.000Z
|
2021-12-16T03:54:01.000Z
|
qa/common/gen_tag_sigdef.py
|
jiweibo/triton_server
|
a0f7868eb0cad9d6a119edf845387ceae808d350
|
[
"BSD-3-Clause"
] | null | null | null |
qa/common/gen_tag_sigdef.py
|
jiweibo/triton_server
|
a0f7868eb0cad9d6a119edf845387ceae808d350
|
[
"BSD-3-Clause"
] | 1
|
2021-07-26T13:00:08.000Z
|
2021-07-26T13:00:08.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import os
from builtins import range
from future.utils import iteritems
import unittest
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
import tensorflow.compat.v1 as tf
import gen_ensemble_model_utils as gu
"""Create SaveModels that contains multiple tags and multiple signature defs"""
def create_savedmodel(models_dir,
model_version=1,
dims=16,
model_name="sig_tag",
tag_name="testTag",
signature_def_name="testSigDef"):
"""
Creates 4 SavedModels that have different combinations of model_name and tag_name.
The models multiplies the input tensor by a multiplier and the multiplier value is different for each model.
Naming convention and config used:
<model_name>0: tag: "serve", signature_def: "serving_default", multiplier 1
<model_name>1: tag: "serve", signature_def: <signature_def_name>, multiplier 2
<model_name>2: tag: <tag_name>, signature_def: "serving_default", multiplier 3
<model_name>3: tag: <tag_name>, signature_def: <signature_def_name>, multiplier 4
"""
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with tf.Session() as sess:
input_tensor = tf.placeholder(tf.float32, [dims], "TENSOR_INPUT")
# tag:"serve", signature_def:"serving_default"
multiplier_0 = tf.constant(1.0, name="multiplier_0")
# tag:"serve", signature_def:signature_def_name
multiplier_1 = tf.constant(2.0, name="multiplier_1")
# tag:tag_name, signature_def:"serving_default"
multiplier_2 = tf.constant(3.0, name="multiplier_2")
# tag:tag_name, signature_def:signature_def_name
multiplier_3 = tf.constant(4.0, name="multiplier_3")
output_tensor_0 = tf.multiply(multiplier_0,
input_tensor,
name="TENSOR_OUTPUT")
output_tensor_1 = tf.multiply(multiplier_1,
input_tensor,
name="TENSOR_OUTPUT")
output_tensor_2 = tf.multiply(multiplier_2,
input_tensor,
name="TENSOR_OUTPUT")
output_tensor_3 = tf.multiply(multiplier_3,
input_tensor,
name="TENSOR_OUTPUT")
# build_tensor_info_op could be used if build_tensor_info is deprecated
input_tensor_info = tf.saved_model.utils.build_tensor_info(input_tensor)
output_tensor_info_0 = tf.saved_model.utils.build_tensor_info(
output_tensor_0)
output_tensor_info_1 = tf.saved_model.utils.build_tensor_info(
output_tensor_1)
output_tensor_info_2 = tf.saved_model.utils.build_tensor_info(
output_tensor_2)
output_tensor_info_3 = tf.saved_model.utils.build_tensor_info(
output_tensor_3)
# Using predict method name because simple save uses it
# tag:"serve", signature_def:"serving_default"
signature_0 = tf.saved_model.signature_def_utils.build_signature_def(
inputs={"INPUT": input_tensor_info},
outputs={"OUTPUT": output_tensor_info_0},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
# tag:"serve", signature_def:signature_def_name
signature_1 = tf.saved_model.signature_def_utils.build_signature_def(
inputs={"INPUT": input_tensor_info},
outputs={"OUTPUT": output_tensor_info_1},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
# tag:tag_name, signature_def:"serving_default"
signature_2 = tf.saved_model.signature_def_utils.build_signature_def(
inputs={"INPUT": input_tensor_info},
outputs={"OUTPUT": output_tensor_info_2},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
# tag:tag_name, signature_def:signature_def_name
signature_3 = tf.saved_model.signature_def_utils.build_signature_def(
inputs={"INPUT": input_tensor_info},
outputs={"OUTPUT": output_tensor_info_3},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
signature_def_map_0 = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_0,
signature_def_name: signature_1
}
signature_def_map_1 = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_2,
signature_def_name: signature_3
}
b = builder.SavedModelBuilder(model_version_dir + "/model.savedmodel")
b.add_meta_graph_and_variables(sess,
tags=[tag_constants.SERVING],
signature_def_map=signature_def_map_0,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
clear_devices=True)
b.add_meta_graph(tags=[tag_name],
signature_def_map=signature_def_map_1,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
clear_devices=True)
b.save()
def create_savedmodel_modelconfig(models_dir,
model_version=1,
dims=16,
model_name="sig_tag",
tag_name="testTag",
signature_def_name="testSigDef"):
config_dir = models_dir + "/" + model_name
config = '''
name: "{}"
platform: "tensorflow_savedmodel"
input [
{{
name: "INPUT"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT"
data_type: {}
dims: [ {} ]
}}
]
parameters: {{
key: "TF_GRAPH_TAG"
value: {{
string_value: "{}"
}}
}}
parameters: {{
key: "TF_SIGNATURE_DEF"
value: {{
string_value: "{}"
}}
}}
'''.format(model_name, gu.np_to_model_dtype(tf.float32), str(dims),
gu.np_to_model_dtype(tf.float32), str(dims), tag_name,
signature_def_name)
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='getting model output dir')
parser.add_argument('--dir', help='directory to run model in')
args = parser.parse_args()
base_dir = args.dir
base_model_name = "sig_tag"
base_tag = "serve"
test_tag = "testTag"
base_sig_def = "serving_default"
test_sig_def = "testSigDef"
for i in range(4):
model_name = base_model_name + str(i)
create_savedmodel(args.dir,
model_name=model_name,
tag_name=test_tag,
signature_def_name=test_sig_def)
create_savedmodel_modelconfig(args.dir,
model_name="sig_tag0",
tag_name=base_tag,
signature_def_name=base_sig_def)
create_savedmodel_modelconfig(args.dir,
model_name="sig_tag1",
tag_name=base_tag,
signature_def_name=test_sig_def)
create_savedmodel_modelconfig(args.dir,
model_name="sig_tag2",
tag_name=test_tag,
signature_def_name=base_sig_def)
create_savedmodel_modelconfig(args.dir,
model_name="sig_tag3",
tag_name=test_tag,
signature_def_name=test_sig_def)
| 42.74359
| 112
| 0.630574
|
0c10e7de42187b5ff38c7a04fee9d96da4eb6ea0
| 2,026
|
py
|
Python
|
paddlers/transforms/__init__.py
|
huilin16/PaddleRS
|
ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a
|
[
"Apache-2.0"
] | 40
|
2022-02-28T02:07:28.000Z
|
2022-03-31T09:54:29.000Z
|
paddlers/transforms/__init__.py
|
wondering516/PaddleRS
|
b6f7033f3c0ca7bc6952456c0a0f53eef6c1c07f
|
[
"Apache-2.0"
] | 5
|
2022-03-15T12:13:33.000Z
|
2022-03-31T15:54:08.000Z
|
paddlers/transforms/__init__.py
|
wondering516/PaddleRS
|
b6f7033f3c0ca7bc6952456c0a0f53eef6c1c07f
|
[
"Apache-2.0"
] | 20
|
2022-02-28T02:07:31.000Z
|
2022-03-31T11:40:40.000Z
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .operators import *
from .batch_operators import BatchRandomResize, BatchRandomResizeByShort, _BatchPadding
from paddlers import transforms as T
def arrange_transforms(model_type, transforms, mode='train'):
# 给transforms添加arrange操作
if model_type == 'segmenter':
if mode == 'eval':
transforms.apply_im_only = True
else:
transforms.apply_im_only = False
arrange_transform = ArrangeSegmenter(mode)
elif model_type == 'changedetector':
if mode == 'eval':
transforms.apply_im_only = True
else:
transforms.apply_im_only = False
arrange_transform = ArrangeChangeDetector(mode)
elif model_type == 'classifier':
arrange_transform = ArrangeClassifier(mode)
elif model_type == 'detector':
arrange_transform = ArrangeDetector(mode)
else:
raise Exception("Unrecognized model type: {}".format(model_type))
transforms.arrange_outputs = arrange_transform
def build_transforms(transforms_info):
transforms = list()
for op_info in transforms_info:
op_name = list(op_info.keys())[0]
op_attr = op_info[op_name]
if not hasattr(T, op_name):
raise Exception("There's no transform named '{}'".format(op_name))
transforms.append(getattr(T, op_name)(**op_attr))
eval_transforms = T.Compose(transforms)
return eval_transforms
| 38.226415
| 87
| 0.705824
|
d3532b268dcfcc065a9c981e6bc1f18c971d1c70
| 867
|
py
|
Python
|
pyfitterbap/version.py
|
jetperch/fitterbap
|
dc29db72c2d7b01d90556a251be0a361574033bc
|
[
"Apache-2.0"
] | 21
|
2021-05-14T20:16:56.000Z
|
2022-03-30T18:54:31.000Z
|
pyfitterbap/version.py
|
jetperch/fitterbap
|
dc29db72c2d7b01d90556a251be0a361574033bc
|
[
"Apache-2.0"
] | null | null | null |
pyfitterbap/version.py
|
jetperch/fitterbap
|
dc29db72c2d7b01d90556a251be0a361574033bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.4.1"
__title__ = 'pyfitterbap'
__description__ = 'Fitterbap python bindings'
__url__ = 'https://github.com/jetperch/fitterbap'
__author__ = 'Jetperch LLC'
__author_email__ = 'dev@jetperch.com'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2017-2021 Jetperch LLC'
| 34.68
| 74
| 0.762399
|
ac9a483e6fd3fc024fb2ad6e2d92bd929abe01af
| 12,198
|
py
|
Python
|
scripts/fortran_tools/fortran_write.py
|
pjpegion/ccpp-framework
|
6874fc9b49237b70df7af9b513ea10df697c27d6
|
[
"Apache-2.0"
] | 17
|
2018-04-17T16:02:31.000Z
|
2021-12-06T11:26:43.000Z
|
scripts/fortran_tools/fortran_write.py
|
pjpegion/ccpp-framework
|
6874fc9b49237b70df7af9b513ea10df697c27d6
|
[
"Apache-2.0"
] | 305
|
2018-03-27T15:44:36.000Z
|
2022-03-31T02:37:05.000Z
|
scripts/fortran_tools/fortran_write.py
|
pjpegion/ccpp-framework
|
6874fc9b49237b70df7af9b513ea10df697c27d6
|
[
"Apache-2.0"
] | 48
|
2018-03-26T21:37:46.000Z
|
2022-03-24T12:29:57.000Z
|
#!/usr/bin/env python
#
"""Code to write Fortran code
"""
# Python library imports
from __future__ import print_function
# CCPP framework imports
class FortranWriter(object):
"""Class to turn output into properly continued and indented Fortran code
>>> FortranWriter("foo.F90", 'r', 'test', 'mod_name') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: Read mode not allowed in FortranWriter object
>>> FortranWriter("foo.F90", 'wb', 'test', 'mod_name') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: Binary mode not allowed in FortranWriter object
"""
###########################################################################
# Class variables
###########################################################################
__INDENT = 3 # Spaces per indent level
__CONTINUE_INDENT = 5 # Extra spaces on continuation line
__LINE_FILL = 97 # Target line length
__LINE_MAX = 130 # Max line length
# CCPP copyright statement to be included in all generated Fortran files
__COPYRIGHT = '''!
! This work (Common Community Physics Package Framework), identified by
! NOAA, NCAR, CU/CIRES, is free of known copyright restrictions and is
! placed in the public domain.
!
! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
! THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
! IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
! CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
__MOD_HEADER = '''
!>
!! @brief Auto-generated {file_desc}
!!
!
module {module}
'''
__MOD_PREAMBLE = ["implicit none", "private"]
__CONTAINS = '''
CONTAINS'''
__MOD_FOOTER = '''
end module {module}'''
###########################################################################
def indent(self, level=0, continue_line=False):
'Return an indent string for any level'
indent = self._indent * level
if continue_line:
indent = indent + self._continue_indent
# End if
return indent*' '
###########################################################################
def find_best_break(self, choices, last=None):
"""Find the best line break point given <choices>.
If <last> is present, use it as a target line length."""
if last is None:
last = self._line_fill
# End if
# Find largest good break
possible = [x for x in choices if x < last]
if not possible:
best = self._line_max + 1
else:
best = max(possible)
# End if
if (best > self._line_max) and (last < self._line_max):
best = self.find_best_break(choices, last=self._line_max)
# End if
return best
###########################################################################
def write(self, statement, indent_level, continue_line=False):
"""Write <statement> to the open file, indenting to <indent_level>
(see self.indent).
If <continue_line> is True, treat this line as a continuation of
a previous statement."""
if isinstance(statement, list):
for stmt in statement:
self.write(stmt, indent_level, continue_line)
# End for
elif '\n' in statement:
for stmt in statement.split('\n'):
self.write(stmt, indent_level, continue_line)
# End for
else:
istr = self.indent(indent_level, continue_line)
outstr = istr + statement.strip()
line_len = len(outstr)
if line_len > self._line_fill:
# Collect pretty break points
spaces = list()
commas = list()
sptr = len(istr)
in_single_char = False
in_double_char = False
while sptr < line_len:
if in_single_char:
if outstr[sptr] == "'":
in_single_char = False
# End if (no else, just copy stuff in string)
elif in_double_char:
if outstr[sptr] == '"':
in_double_char = False
# End if (no else, just copy stuff in string)
elif outstr[sptr] == "'":
in_single_char = True
elif outstr[sptr] == '"':
in_double_char = True
elif outstr[sptr] == '!':
# Comment in non-character context, suck in rest of line
spaces.append(sptr-1)
sptr = line_len - 1
elif outstr[sptr] == ' ':
# Non-quote spaces are where we can break
spaces.append(sptr)
elif outstr[sptr] == ',':
# Non-quote commas are where we can break
commas.append(sptr)
elif outstr[sptr:sptr+2] == '//':
# Non-quote commas are where we can break
commas.append(sptr + 1)
# End if (no else, other characters will be ignored)
sptr = sptr + 1
# End while
best = self.find_best_break(spaces)
if best >= self._line_fill:
best = self.find_best_break(commas)
# End if
if best > self._line_max:
# This is probably a bad situation that might not
# compile, just write the line and hope for the best.
line_continue = False
elif len(outstr) > best:
# If next line is just comment, do not use continue
# NB: Is this a Fortran issue or just a gfortran issue?
line_continue = outstr[best+1:].lstrip()[0] != '!'
else:
line_continue = True
# End if
if line_continue:
fill = "{}&".format((self._line_fill - best)*' ')
else:
fill = ''
# End if
self._file.write("{}{}\n".format(outstr[0:best+1], fill))
statement = outstr[best+1:]
self.write(statement, indent_level, continue_line=line_continue)
else:
self._file.write("{}\n".format(outstr))
# End if
# End if
###########################################################################
def __init__(self, filename, mode, file_description, module_name,
indent=None, continue_indent=None,
line_fill=None, line_max=None):
"""Initialize thie FortranWriter object.
Some boilerplate is written automatically."""
self.__file_desc = file_description
self.__module = module_name
# We only handle writing situations (for now) and only text
if 'r' in mode:
raise ValueError('Read mode not allowed in FortranWriter object')
# end if
if 'b' in mode:
raise ValueError('Binary mode not allowed in FortranWriter object')
# End if
self._file = open(filename, mode)
if indent is None:
self._indent = FortranWriter.__INDENT
else:
self._indent = indent
# End if
if continue_indent is None:
self._continue_indent = FortranWriter.__CONTINUE_INDENT
else:
self._continue_indent = continue_indent
# End if
if line_fill is None:
self._line_fill = FortranWriter.__LINE_FILL
else:
self._line_fill = line_fill
# End if
if line_max is None:
self._line_max = FortranWriter.__LINE_MAX
else:
self._line_max = line_max
# End if
###########################################################################
def write_preamble(self):
"""Write the module boilerplate that goes between use statements
and module declarations."""
self.write("", 0)
for stmt in FortranWriter.__MOD_PREAMBLE:
self.write(stmt, 1)
# end for
self.write("", 0)
###########################################################################
def end_module_header(self):
"""Write the module contains statement."""
self.write(FortranWriter.__CONTAINS, 0)
###########################################################################
def __enter__(self, *args):
self.write(FortranWriter.__COPYRIGHT, 0)
self.write(self.module_header(), 0)
return self
###########################################################################
def __exit__(self, *args):
self.write(FortranWriter.__MOD_FOOTER.format(module=self.__module), 0)
self._file.close()
return False
###########################################################################
def module_header(self):
"""Return the standard Fortran module header for <filename> and
<module>"""
return FortranWriter.__MOD_HEADER.format(file_desc=self.__file_desc,
module=self.__module)
###########################################################################
@classmethod
def copyright(cls):
"""Return the standard Fortran file copyright string"""
return cls.__COPYRIGHT
###############################################################################
if __name__ == "__main__":
# First, run doctest
import doctest
doctest.testmod()
# Make sure we can write a file
import sys
import os
import os.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
NAME = 'foo'
while os.path.exists(NAME+'.F90'):
NAME = NAME + 'xo'
# End while
NAME = NAME + '.F90'
if os.access(os.getcwd(), os.W_OK):
_CHECK = FortranWriter.copyright().split('\n')
with FortranWriter(NAME, 'w', 'doctest', 'foo') as foo:
foo.write_preamble()
foo.end_module_header()
foo.write(("subroutine foo(long_argument1, long_argument2, "
"long_argument3, long_argument4, long_argument5)"), 2)
foo.write("end subroutine foo", 2)
_CHECK.extend(foo.module_header().rstrip().split('\n'))
# End with
_CHECK.extend(["", "", " implicit none", " private",
"", "", "CONTAINS"])
_CHECK.extend([(' subroutine foo(long_argument1, long_argument2, '
'long_argument3, long_argument4, &'),
' long_argument5)',
' end subroutine foo', '',
'end module foo'])
# Check file
with open(NAME, 'r') as foo:
_STATEMENTS = foo.readlines()
if len(_STATEMENTS) != len(_CHECK):
EMSG = "ERROR: File has {} statements, should have {}"
print(EMSG.format(len(_STATEMENTS), len(_CHECK)))
else:
for _line_num, _statement in enumerate(_STATEMENTS):
if _statement.rstrip() != _CHECK[_line_num]:
EMSG = "ERROR: Line {} does not match"
print(EMSG.format(_line_num+1))
print("{}".format(_statement.rstrip()))
print("{}".format(_CHECK[_line_num]))
# End if
# End for
# End with
os.remove(NAME)
else:
print("WARNING: Unable to write test file, '{}'".format(NAME))
# End if
# No else
| 39.096154
| 93
| 0.501476
|
62730bfa286433260c03cb4df1fb179fc5dde275
| 12,847
|
py
|
Python
|
Blender/appdata_common/Blender Foundation/Blender/BLENDER_VERSION/scripts/addons/DTB/DtbCommands.py
|
guto88/DazToBlender
|
00656eadd67e5c5fca9a651ccc7209c7af67d79f
|
[
"Apache-2.0"
] | null | null | null |
Blender/appdata_common/Blender Foundation/Blender/BLENDER_VERSION/scripts/addons/DTB/DtbCommands.py
|
guto88/DazToBlender
|
00656eadd67e5c5fca9a651ccc7209c7af67d79f
|
[
"Apache-2.0"
] | null | null | null |
Blender/appdata_common/Blender Foundation/Blender/BLENDER_VERSION/scripts/addons/DTB/DtbCommands.py
|
guto88/DazToBlender
|
00656eadd67e5c5fca9a651ccc7209c7af67d79f
|
[
"Apache-2.0"
] | null | null | null |
import bpy
import os
import sys
sys.path.append(os.path.dirname(__file__))
from . import DataBase
from . import Versions
from . import MatDct
from . import DtbMaterial
from . import Util
from . import Global
from . import Poses
from bpy_extras.io_utils import ImportHelper
from bpy.props import StringProperty
from bpy.types import Operator
class SEARCH_OT_Commands(bpy.types.Operator):
bl_idname = "command.search"
bl_label = 'Command'
def execute(self, context):
search_morph(context)
return {'FINISHED'}
def search_morph_(self, context):
search_morph(context)
def search_morph(context):
w_mgr = context.window_manager
key = w_mgr.search_prop
nozero = False
if key.startswith("!"):
nozero = True
key = key[1:]
if len(key) < 2:
return
if key.startswith("#"):
WCmd.Command(key[1:], context)
return
cobj = bpy.context.object
mesh = cobj.data
for z in range(2):
find = False
max = len(mesh.shape_keys.key_blocks)
for kidx, kb in enumerate(mesh.shape_keys.key_blocks):
if kidx <= Versions.get_active_object().active_shape_key_index:
continue
if nozero and kb.value == 0.0:
continue
if (key.lower() in kb.name.lower()):
Versions.get_active_object().active_shape_key_index = kidx
find = True
break
if z == 0 and find == False:
if max > 1:
Versions.get_active_object().active_shape_key_index = 1
else:
break
get_obj_name = ""
class ImportFilesCollection(bpy.types.PropertyGroup):
name : StringProperty(
name="File Path",
description="Filepath used for importing the file",
maxlen=1024,
subtype='FILE_PATH',
)
bpy.utils.register_class(ImportFilesCollection)
class IMP_OT_dir(bpy.types.Operator, ImportHelper):
bl_idname = "imp.material"
bl_label = "Import material"
bl_description = 'processing select directry'
bl_label = "Select Directory"
filepath : StringProperty(
name="input file",
subtype= 'DIR_PATH'
)
filename_ext : ""#*.png;*.jpg;*.bmp;*.exr;*.jpeg;*.tif;*.gif"
filter_glob : StringProperty(
default="",#*.png;*.jpg;*.bmp;*.exr;*.jpeg;*.tif;*.gif",
options={'HIDDEN'},
)
def execute(self, context):
if os.path.isfile(self.filepath):
self.filepath = os.path.dirname(self.filepath)
md = MatDct.MatDct()
if self.filepath.endswith("\\"):
self.filepath = self.filepath[0:len(self.filepath)-1]
md.make_dct_from_directory(self.filepath)
dct = md.get_dct()
DtbMaterial.readImages(dct)
return{'FINISHED'}
bpy.utils.register_class(IMP_OT_dir)
class IMP_OT_object(Operator, ImportHelper):
bl_idname = "imp.object"
bl_label = "Import Daz G8 Object"
filename_ext : ".obj"
filter_glob : StringProperty(
default="*.obj",
options={'HIDDEN'},
)
files : bpy.props.CollectionProperty(type=ImportFilesCollection)
def execute(self, context):
dirname = os.path.dirname(self.filepath)
for i, f in enumerate(self.files, 1):
print("f===",f)
durPath = (os.path.join(dirname, f.name))
from . import ToHighReso
tgm = ToHighReso.get_Morph(durPath, get_obj_name)
if ('face' in get_obj_name):
tgm.get_face_or_body('face')
elif ('body' in get_obj_name):
tgm.get_face_or_body('body')
else:
rls = ['r','l']
for rl in rls:
if ('hand' in get_obj_name):
tgm.get_face_or_body(rl + 'hand')
elif ('leg' in get_obj_name):
tgm.get_face_or_body(rl + 'knee')
break
return {'FINISHED'}
bpy.utils.register_class(IMP_OT_object)
class IMP_OT_dazG8_pose(Operator, ImportHelper):
bl_idname = "import_daz_g8.pose"
bl_label = "Import Daz G8 Pose"
filename_ext : ".duf"
filter_glob : StringProperty(
default="*.duf",
options={'HIDDEN'},
)
files : bpy.props.CollectionProperty(type=ImportFilesCollection)
def execute(self, context):
dirname = os.path.dirname(self.filepath)
for i, f in enumerate(self.files, 1):
durPath = (os.path.join(dirname, f.name))
up = Poses.Posing("POSE")
up.pose_copy(durPath)
return {'FINISHED'}
bpy.utils.register_class(IMP_OT_dazG8_pose)
class Command:
def __init__(self,key,context):
key = Global.orthopedic_sharp(key)
Util.active_object_to_current_collection()
not_erace = ['getpose', 'accessory']
kwd = ['getface', 'getbody', 'gethand', 'rentface', 'rentbody', 'renthand', "getleg", "rentleg"]
flg_morph = False
for kw in kwd:
if key.startswith(kw):
if Global.getAmtr() is None and Global.getRgfy() is not None:
Versions.msg("This feature does not work in Rigify mode", "I'm sorry", "INFO")
w_mgr = context.window_manager
w_mgr.search_prop = ""
flg_morph = False
else:
flg_morph = True
break
if key=='getpose':
if Global.getAmtr() is None:
Versions.msg("Invalid Command", "Message")
return
Global.deselect()
Versions.select(Global.getAmtr(),True)
Versions.active_object(Global.getAmtr())
Global.setOpsMode("POSE")
bpy.ops.import_daz_g8.pose('INVOKE_DEFAULT')
elif key=='getgenital':
Get_Genital()
elif key=='finger' and Global.getAmtr() is not None:
Global.finger(0)
elif key=='realsize':
if Global.getAmtr() is None:
Versions.msg("This function is effective only in the basic mode", "Message", 'INFO')
return
if Global.getSize() == 1:
Versions.msg("Already Real Size", "Message", 'INFO')
return
Global.changeSize(1,[])
Global.scale_environment()
elif key=='gettexture':
bpy.ops.imp.material('INVOKE_DEFAULT')
elif key=='clearextrabones':
Global.deselect()
Versions.active_object_none()
for obj in Util.myacobjs():
if obj.type != 'ARMATURE':
continue
Versions.select(obj, True)
Versions.active_object(obj)
Global.setOpsMode("EDIT")
db = DataBase.DB()
dels = []
for eb in obj.data.edit_bones:
for bb in db.tbl_basic_bones:
if eb.name.startswith(bb[0] + ".00"):
dels.append(eb)
break
for d in dels:
obj.data.edit_bones.remove(d)
Global.deselect()
Global.setOpsMode("POSE")
elif key=='geograft':
print("IsMan",Global.getIsMan(),"--GetIdx",Global.get_geo_idx())
elif key=='spliteyelash' and Global.getIsG3()==False:
Global.deselect()
Global.setOpsMode("OBJECT")
Versions.select(Global.getBody(),True)
Versions.active_object(Global.getBody())
removeEyelash()
Global.setOpsMode("OBJECT")
elif key=='realskin':
DtbMaterial.skin_levl(True)
elif key=='easyskin':
DtbMaterial.skin_levl(False)
elif key=='myheros':
print(Global.getAmtr(),Global.getRgfy(),Global.getBody(),Global.getEnvRoot(),Global.getSize(),Util.cur_col_name(),
"*",Global.get_Amtr_name(),Global.get_Rgfy_name(),Global.get_Body_name())
elif key=='onedrive':
db = DataBase.DB()
from . import DtbShapeKeys
sk = DtbShapeKeys.DtbShapeKeys(False)
sk.makeOneDriver(db)
elif key=='clearmorph':
from . import DtbShapeKeys
sk = DtbShapeKeys.DtbShapeKeys(False)
sk.delete_oneobj_sk_from_command()
elif flg_morph:
global get_obj_name
Versions.active_object(Global.getBody())
Global.setOpsMode("OBJECT")
get_obj_name = key
bpy.ops.imp.object('INVOKE_DEFAULT')
elif key in not_erace:
pass
else:
Versions.msg("Invalid Command","Message")
if key not in not_erace:
w_mgr = context.window_manager
w_mgr.search_prop = ""
def removeEyelash():
import bmesh
Global.deselect()
Versions.select(Global.getBody(), True)
Versions.active_object(Global.getBody())
Global.setOpsMode("EDIT")
find = False
bpy.ops.mesh.select_all(action='DESELECT')
for sidx, slot in enumerate(Global.getBody().material_slots):
if slot.name.startswith("drb_EylsMoisture") or slot.name.startswith("drb_Eyelashes"):
Global.getBody().active_material_index = sidx
bpy.ops.object.material_slot_select()
find = True
bm = bmesh.from_edit_mesh(Global.getBody().data)
bm.verts.ensure_lookup_table()
cnt = 0
for i in range(1,60):
if bm.verts[len(bm.verts)-i].select:
cnt += 1
if cnt>50 and find:
bpy.ops.mesh.separate(type='SELECTED')
sobjs = bpy.context.selected_objects
for sobj in sobjs:
if sobj !=Global.getBody():
sobj.name = 'EyeLash'
Global.deselect()
Global.setOpsMode('OBJECT')
Versions.active_object(Global.getBody())
Global.setOpsMode('OBJECT')
class Get_Genital:
_EYLS = ""
def eyls(self):
if self._EYLS!="":
if self._EYLS in Util.myccobjs():
return Util.myccobjs().get(self._EYLS)
return None
def __init__(self):
if Global.getBody() is None:
return
self.exec_()
def check_eyls(self,dir):
ey_dir = dir + "EYELASH" + Global.getFileSp()
if os.path.exists(ey_dir):
ey_list = os.listdir(ey_dir)
for el in ey_list:
if el[len(el) - 4:] == '.obj':
Versions.import_obj(ey_dir + el)
new_obj_name = Global.what_new()
new_obj = Util.myccobjs().get(new_obj_name)
Versions.select(new_obj,True)
Versions.active_object(new_obj)
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
self._EYLS = new_obj_name
def exec_(self):
dir = Global.getRootPath()+"GEN" +Global.getFileSp()
if os.path.exists(dir)==False:
return
self.check_eyls(dir)
Global.deselect()
list = os.listdir(dir)
for lidx,l in enumerate(list):
if l[len(l)-4:].lower() !='.obj':
continue
now_eyls_obj = None
if self.eyls() is not None:
Versions.active_object(self.eyls())
now_eyls_obj = bpy.data.objects.new('EYELASH' + str(lidx), self.eyls().data)
Versions.set_link(now_eyls_obj,True,'DAZ_HIDE')
body = Versions.import_obj(dir+l)
if body is None:
continue
Versions.active_object(body)
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
oname = l[0:len(l)-4]
if (oname in body.name):
if now_eyls_obj is not None:
Versions.select(body, True)
Versions.select(now_eyls_obj,True)
Versions.active_object(body)
bpy.ops.object.join()
Versions.active_object(body)
else:
continue
if len(body.data.vertices) != len(Global.getBody().data.vertices):
Util.allobjs().remove(body)
continue
Versions.select(body,True)
Versions.select(Global.getBody(),True)
Versions.active_object(Global.getBody())
bpy.ops.object.join_shapes()
self.toMsGen()
Util.allobjs().remove(body)
Global.deselect()
if self.eyls() is not None:
Util.allobjs().remove(self.eyls())
def toMsGen(self):
mesh = Global.getBody().data
max = len(mesh.shape_keys.key_blocks)
kb = mesh.shape_keys.key_blocks[max-1]
kb.slider_min = -1
| 35.48895
| 126
| 0.56449
|
916e9c237f7a7bcd712b42cf296d2a9f9ff196b3
| 6,666
|
py
|
Python
|
imcsdk/mometa/storage/StorageLocalDiskProps.py
|
kgrozis/UCS-CIMC-Scripts
|
44069ee853299fe5aeed023e8c998ce2534b8d8b
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/storage/StorageLocalDiskProps.py
|
kgrozis/UCS-CIMC-Scripts
|
44069ee853299fe5aeed023e8c998ce2534b8d8b
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/storage/StorageLocalDiskProps.py
|
kgrozis/UCS-CIMC-Scripts
|
44069ee853299fe5aeed023e8c998ce2534b8d8b
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for StorageLocalDiskProps ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import ImcVersion, MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class StorageLocalDiskPropsConsts():
pass
class StorageLocalDiskProps(ManagedObject):
"""This is StorageLocalDiskProps class."""
consts = StorageLocalDiskPropsConsts()
naming_props = set([])
mo_meta = MoMeta("StorageLocalDiskProps", "storageLocalDiskProps", "general-props", VersionMeta.Version151f, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'storageController', u'storageLocalDisk'], [], ["Get"])
prop_meta = {
"block_count": MoPropertyMeta("block_count", "blockCount", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"block_size": MoPropertyMeta("block_size", "blockSize", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"boot_drive": MoPropertyMeta("boot_drive", "bootDrive", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"coerced_size": MoPropertyMeta("coerced_size", "coercedSize", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"device_id": MoPropertyMeta("device_id", "deviceId", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"enclosure_device_id": MoPropertyMeta("enclosure_device_id", "enclosureDeviceId", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"health": MoPropertyMeta("health", "health", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"interface_type": MoPropertyMeta("interface_type", "interfaceType", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"link_speed": MoPropertyMeta("link_speed", "linkSpeed", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"media_error_count": MoPropertyMeta("media_error_count", "mediaErrorCount", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"media_type": MoPropertyMeta("media_type", "mediaType", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"non_coerced_size": MoPropertyMeta("non_coerced_size", "nonCoercedSize", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"other_error_count": MoPropertyMeta("other_error_count", "otherErrorCount", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"pd_status": MoPropertyMeta("pd_status", "pdStatus", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"physical_drive": MoPropertyMeta("physical_drive", "physicalDrive", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"power_state": MoPropertyMeta("power_state", "powerState", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"predictive_failure_count": MoPropertyMeta("predictive_failure_count", "predictiveFailureCount", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"raw_size": MoPropertyMeta("raw_size", "rawSize", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"sas_address0": MoPropertyMeta("sas_address0", "sasAddress0", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"sas_address1": MoPropertyMeta("sas_address1", "sasAddress1", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"sequence_number": MoPropertyMeta("sequence_number", "sequenceNumber", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
}
prop_map = {
"blockCount": "block_count",
"blockSize": "block_size",
"bootDrive": "boot_drive",
"childAction": "child_action",
"coercedSize": "coerced_size",
"deviceId": "device_id",
"dn": "dn",
"enclosureDeviceId": "enclosure_device_id",
"health": "health",
"interfaceType": "interface_type",
"linkSpeed": "link_speed",
"mediaErrorCount": "media_error_count",
"mediaType": "media_type",
"nonCoercedSize": "non_coerced_size",
"otherErrorCount": "other_error_count",
"pdStatus": "pd_status",
"physicalDrive": "physical_drive",
"powerState": "power_state",
"predictiveFailureCount": "predictive_failure_count",
"rawSize": "raw_size",
"rn": "rn",
"sasAddress0": "sas_address0",
"sasAddress1": "sas_address1",
"sequenceNumber": "sequence_number",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.block_count = None
self.block_size = None
self.boot_drive = None
self.child_action = None
self.coerced_size = None
self.device_id = None
self.enclosure_device_id = None
self.health = None
self.interface_type = None
self.link_speed = None
self.media_error_count = None
self.media_type = None
self.non_coerced_size = None
self.other_error_count = None
self.pd_status = None
self.physical_drive = None
self.power_state = None
self.predictive_failure_count = None
self.raw_size = None
self.sas_address0 = None
self.sas_address1 = None
self.sequence_number = None
self.status = None
ManagedObject.__init__(self, "StorageLocalDiskProps", parent_mo_or_dn, **kwargs)
| 64.096154
| 225
| 0.666817
|
c89f028890b6ca9ac66f10f1361fca5078168a85
| 58,956
|
py
|
Python
|
astropy/io/ascii/core.py
|
nealmcb/astropy
|
b1addf5ae4ae8c18b048940c460a3be2735f5392
|
[
"BSD-3-Clause"
] | 1
|
2020-03-27T04:13:08.000Z
|
2020-03-27T04:13:08.000Z
|
astropy/io/ascii/core.py
|
nealmcb/astropy
|
b1addf5ae4ae8c18b048940c460a3be2735f5392
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/io/ascii/core.py
|
nealmcb/astropy
|
b1addf5ae4ae8c18b048940c460a3be2735f5392
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
core.py:
Core base classes and functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu)
"""
import copy
import csv
import functools
import itertools
import operator
import os
import re
import warnings
import inspect
from collections import OrderedDict
from contextlib import suppress
from io import StringIO
import numpy
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from . import connect
from .docs import READ_DOCSTRING, WRITE_DOCSTRING
# Global dictionary mapping format arg to the corresponding Reader class
FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
class CsvWriter:
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = '2b=48Av%0-V3p>bX'
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (dialect.delimiter == ' ')
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked
We want to be able to put the generic term ``masked`` into a dictionary.
The constant ``numpy.ma.masked`` is not hashable (see
https://github.com/numpy/numpy/issues/4660), so we need to extend it
here with a hash value.
See https://github.com/numpy/numpy/issues/11021 for rationale for
__copy__ and __deepcopy__ methods.
"""
def __hash__(self):
'''All instances of this class shall have the same hash.'''
# Any large number will do.
return 1234567890
def __copy__(self):
"""This is a singleton so just return self."""
return self
def __deepcopy__(self, memo):
return self
masked = MaskedConstant()
class InconsistentTableError(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
class OptionalTableImportError(ImportError):
"""
Indicates that a dependency for table reading is not present.
An instance of this class is raised whenever an optional reader
with certain required dependencies cannot operate because of
an ImportError.
"""
class ParameterError(NotImplementedError):
"""
Indicates that a reader cannot handle a passed parameter.
The C-based fast readers in ``io.ascii`` raise an instance of
this error class upon encountering a parameter that the
C engine cannot handle.
"""
class FastOptionsError(NotImplementedError):
"""
Indicates that one of the specified options for fast
reading is invalid.
"""
class NoType:
"""
Superclass for ``StrType`` and ``NumType`` classes.
This class is the default type of ``Column`` and provides a base
class for other data types.
"""
class StrType(NoType):
"""
Indicates that a column consists of text data.
"""
class NumType(NoType):
"""
Indicates that a column consists of numerical data.
"""
class FloatType(NumType):
"""
Describes floating-point data.
"""
class BoolType(NoType):
"""
Describes boolean data.
"""
class IntType(NumType):
"""
Describes integer data.
"""
class AllType(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
class Column:
"""Table column.
The key attributes of a Column object are:
* **name** : column name
* **type** : column type (NoType, StrType, NumType, FloatType, IntType)
* **dtype** : numpy dtype (optional, overrides **type** if set)
* **str_vals** : list of column values as strings
* **data** : list of converted column values
"""
def __init__(self, name):
self.name = name
self.type = NoType # Generic type (Int, Float, Str etc)
self.dtype = None # Numpy dtype if available
self.str_vals = []
self.fill_values = {}
class BaseInputter:
"""
Get the lines from the table input and return a list of lines.
"""
encoding = None
"""Encoding used to read the file"""
def get_lines(self, table):
"""
Get the lines from the ``table`` input. The input table can be one of:
* File name
* String (newline separated) with all header and data lines (must have at least 2 lines)
* File-like object with read() method
* List of strings
Parameters
----------
table : str, file_like, list
Can be either a file name, string (newline separated) with all header and data
lines (must have at least 2 lines), a file-like object with a ``read()`` method,
or a list of strings.
Returns
-------
lines : list
List of lines
"""
try:
if (hasattr(table, 'read')
or ('\n' not in table + '' and '\r' not in table + '')):
with get_readable_fileobj(table,
encoding=self.encoding) as fileobj:
table = fileobj.read()
lines = table.splitlines()
except TypeError:
try:
# See if table supports indexing, slicing, and iteration
table[0]
table[0:1]
iter(table)
lines = table
except TypeError:
raise TypeError(
'Input "table" must be a string (filename or data) or an iterable')
return self.process_lines(lines)
def process_lines(self, lines):
"""Process lines for subsequent use. In the default case do nothing.
This routine is not generally intended for removing comment lines or
stripping whitespace. These are done (if needed) in the header and
data line processing.
Override this method if something more has to be done to convert raw
input lines to the table rows. For example the
ContinuationLinesInputter derived class accounts for continuation
characters if a row is split into lines."""
return lines
class BaseSplitter:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter = None
""" one-character string used to separate fields """
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end."""
return line.strip()
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
if self.delimiter is None:
delimiter = ' '
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
class DefaultSplitter(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = ' '
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognized by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first"""
if self.delimiter == r'\s':
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip()
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Returns
-------
lines : iterator
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = ' ' if self.delimiter == r'\s' else self.delimiter
csv_reader = csv.reader(lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = ' ' if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
lineterminator='')
if self.process_val:
vals = [self.process_val(x) for x in vals]
out = self.csv_writer.writerow(vals)
return out
def _replace_tab_with_space(line, escapechar, quotechar):
"""Replace tabs with spaces in given string, preserving quoted substrings
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = 'NONE'
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if hasattr(line_or_func, '__call__'):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
class BaseHeader:
"""
Base table header reader
"""
auto_format = 'col{}'
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [re.sub('^' + self.comment, '', x).strip()
for x in comment_lines]
if comment_lines:
meta.setdefault('table', {})['comments'] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError('No data lines found so cannot autogenerate '
'column names')
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i)
for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError('No header line found in table')
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines"""
if self.comment:
re_comment = re.compile(self.comment)
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment is not False:
for comment in meta.get('comments', []):
lines.append(self.write_comment + comment)
def write(self, lines):
if self.start_line is not None:
for i, spacer_line in zip(range(self.start_line),
itertools.cycle(self.write_spacer_lines)):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self):
"""Return the column names of the table"""
return tuple(col.name if isinstance(col, Column) else col.info.name
for col in self.cols)
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
"""
colnames = self.colnames
for name in names:
if name not in colnames:
raise KeyError(f"Column {name} does not exist")
self.cols = [col for col in self.cols if col.name not in names]
def rename_column(self, name, new_name):
"""
Rename a column.
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
"""
try:
idx = self.colnames.index(name)
except ValueError:
raise KeyError(f"Column {name} does not exist")
col = self.cols[idx]
# For writing self.cols can contain cols that are not Column. Raise
# exception in that case.
if isinstance(col, Column):
col.name = new_name
else:
raise TypeError(f'got column type {type(col)} instead of required '
f'{Column}')
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError('Unknown data type ""{}"" for column "{}"'.format(
col.raw_type, col.name))
def check_column_names(self, names, strict_names, guessing):
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (_is_number(name) or len(name) == 0
or name[0] in bads or name[-1] in bads):
raise InconsistentTableError(
'Column name {!r} does not meet strict name requirements'
.format(name))
# When guessing require at least two columns
if guessing and len(self.colnames) <= 1:
raise ValueError('Table format guessing requires at least two columns, got {}'
.format(list(self.colnames)))
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError(
'Length of names argument ({}) does not match number'
' of table columns ({})'.format(len(names), len(self.colnames)))
class BaseData:
"""
Base table data reader.
"""
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
end_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" Regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
fill_include_names = None
fill_exclude_names = None
fill_values = [(masked, '')]
formats = {}
def __init__(self):
# Need to make sure fill_values list is instance attribute, not class attribute.
# On read, this will be overwritten by the default in the ui.read (thus, in
# the current implementation there can be no different default for different
# Readers). On write, ui.py does not specify a default, so this line here matters.
self.fill_values = copy.copy(self.fill_values)
self.formats = copy.copy(self.formats)
self.splitter = self.splitter_class()
def process_lines(self, lines):
"""
Strip out comment lines and blank lines from list of ``lines``
Parameters
----------
lines : list
All lines in table
Returns
-------
lines : list
List of lines
"""
nonblank_lines = (x for x in lines if x.strip())
if self.comment:
re_comment = re.compile(self.comment)
return [x for x in nonblank_lines if not re_comment.match(x)]
else:
return [x for x in nonblank_lines]
def get_data_lines(self, lines):
"""Set the ``data_lines`` attribute to the lines slice comprising the
table data values."""
data_lines = self.process_lines(lines)
start_line = _get_line_index(self.start_line, data_lines)
end_line = _get_line_index(self.end_line, data_lines)
if start_line is not None or end_line is not None:
self.data_lines = data_lines[slice(start_line, end_line)]
else: # Don't copy entire data lines unless necessary
self.data_lines = data_lines
def get_str_vals(self):
"""Return a generator that returns a list of column values (as strings)
for each data line."""
return self.splitter(self.data_lines)
def masks(self, cols):
"""Set fill value for each column and then apply that fill value
In the first step it is evaluated with value from ``fill_values`` applies to
which column using ``fill_include_names`` and ``fill_exclude_names``.
In the second step all replacements are done for the appropriate columns.
"""
if self.fill_values:
self._set_fill_values(cols)
self._set_masks(cols)
def _set_fill_values(self, cols):
"""Set the fill values of the individual cols based on fill_values of BaseData
fill values has the following form:
<fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...)
fill_values = <fill_spec> or list of <fill_spec>'s
"""
if self.fill_values:
# when we write tables the columns may be astropy.table.Columns
# which don't carry a fill_values by default
for col in cols:
if not hasattr(col, 'fill_values'):
col.fill_values = {}
# if input is only one <fill_spec>, then make it a list
with suppress(TypeError):
self.fill_values[0] + ''
self.fill_values = [self.fill_values]
# Step 1: Set the default list of columns which are affected by
# fill_values
colnames = set(self.header.colnames)
if self.fill_include_names is not None:
colnames.intersection_update(self.fill_include_names)
if self.fill_exclude_names is not None:
colnames.difference_update(self.fill_exclude_names)
# Step 2a: Find out which columns are affected by this tuple
# iterate over reversed order, so last condition is set first and
# overwritten by earlier conditions
for replacement in reversed(self.fill_values):
if len(replacement) < 2:
raise ValueError("Format of fill_values must be "
"(<bad>, <fill>, <optional col1>, ...)")
elif len(replacement) == 2:
affect_cols = colnames
else:
affect_cols = replacement[2:]
for i, key in ((i, x) for i, x in enumerate(self.header.colnames)
if x in affect_cols):
cols[i].fill_values[replacement[0]] = str(replacement[1])
def _set_masks(self, cols):
"""Replace string values in col.str_vals and set masks"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
col.mask = numpy.zeros(len(col.str_vals), dtype=numpy.bool)
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
col.mask[i] = True
def _replace_vals(self, cols):
"""Replace string values in col.str_vals"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
if masked in col.fill_values and hasattr(col, 'mask'):
mask_val = col.fill_values[masked]
for i in col.mask.nonzero()[0]:
col.str_vals[i] = mask_val
def str_vals(self):
'''convert all values in table to a list of lists of strings'''
self._set_fill_values(self.cols)
self._set_col_formats()
for col in self.cols:
col.str_vals = list(col.info.iter_str_vals())
self._replace_vals(self.cols)
return [col.str_vals for col in self.cols]
def write(self, lines):
if hasattr(self.start_line, '__call__'):
raise TypeError('Start_line attribute cannot be callable for write()')
else:
data_start_line = self.start_line or 0
while len(lines) < data_start_line:
lines.append(itertools.cycle(self.write_spacer_lines))
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
lines.append(self.splitter.join(vals))
def _set_col_formats(self):
"""
"""
for col in self.cols:
if col.info.name in self.formats:
col.info.format = self.formats[col.info.name]
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://docs.scipy.org/doc/numpy/user/basics.types.html>`_,
e.g. numpy.int, numpy.uint, numpy.int8, numpy.int64, numpy.float,
numpy.float64, numpy.str.
Returns
-------
(converter, converter_type) : (function, generic data-type)
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
``converter_type`` tracks the generic data type produced by the converter
function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if 'int' in type_name:
converter_type = IntType
elif 'float' in type_name:
converter_type = FloatType
elif 'bool' in type_name:
converter_type = BoolType
elif 'str' in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all((svals == 'False') | (svals == 'True')):
raise ValueError('bool input strings must be only False or True')
vals = numpy.asarray(vals)
trues = vals == 'True'
falses = vals == 'False'
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False or True')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
class BaseOutputter:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type)"""
converters_out = []
try:
for converter in converters:
converter_func, converter_type = converter
if not issubclass(converter_type, NoType):
raise ValueError()
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError):
raise ValueError('Error: invalid format for converters, see '
'documentation\n{}'.format(converters))
return converters_out
def _convert_vals(self, cols):
for col in cols:
# If a specific dtype was specified for a column, then use that
# to set the defaults, otherwise use the generic defaults.
default_converters = ([convert_numpy(col.dtype)] if col.dtype
else self.default_converters)
# If the user supplied a specific convert then that takes precedence over defaults
converters = self.converters.get(col.name, default_converters)
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = 'no converters defined'
while not hasattr(col, 'data'):
try:
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError('converter type does not match column type')
col.data = converter_func(col.str_vals)
col.type = converter_type
except (TypeError, ValueError) as err:
col.converters.pop(0)
last_err = err
except OverflowError as err:
# Overflow during conversion (most likely an int that
# doesn't fit in native C long). Put string at the top of
# the converters list for the next while iteration.
warnings.warn(
"OverflowError converting to {} in column {}, reverting to String."
.format(converter_type.__name__, col.name), AstropyWarning)
col.converters.insert(0, convert_numpy(numpy.str))
last_err = err
except IndexError:
raise ValueError(f'Column {col.name} failed to convert: {last_err}')
class TableOutputter(BaseOutputter):
"""
Output the table as an astropy.table.Table object.
"""
default_converters = [convert_numpy(numpy.int),
convert_numpy(numpy.float),
convert_numpy(numpy.str)]
def __call__(self, cols, meta):
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
t_cols = [numpy.ma.MaskedArray(x.data, mask=x.mask)
if hasattr(x, 'mask') and numpy.any(x.mask)
else x.data for x in cols]
out = Table(t_cols, names=[x.name for x in cols], meta=meta['table'])
for col, out_col in zip(cols, out.columns.values()):
for attr in ('format', 'unit', 'description'):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, 'meta'):
out_col.meta.update(col.meta)
return out
class MetaBaseReader(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
format = dct.get('_format_name')
if format is None:
return
fast = dct.get('_fast')
if fast is not None:
FAST_CLASSES[format] = cls
FORMAT_CLASSES[format] = cls
io_formats = ['ascii.' + format] + dct.get('_io_registry_format_aliases', [])
if dct.get('_io_registry_suffix'):
func = functools.partial(connect.io_identify, dct['_io_registry_suffix'])
connect.io_registry.register_identifier(io_formats[0], Table, func)
for io_format in io_formats:
func = functools.partial(connect.io_read, io_format)
header = f"ASCII reader '{io_format}' details\n"
func.__doc__ = (inspect.cleandoc(READ_DOCSTRING).strip() + '\n\n'
+ header + re.sub('.', '=', header) + '\n')
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_reader(io_format, Table, func)
if dct.get('_io_registry_can_write', True):
func = functools.partial(connect.io_write, io_format)
header = f"ASCII writer '{io_format}' details\n"
func.__doc__ = (inspect.cleandoc(WRITE_DOCSTRING).strip() + '\n\n'
+ header + re.sub('.', '=', header) + '\n')
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_writer(io_format, Table, func)
def _is_number(x):
with suppress(ValueError):
x = float(x)
return True
return False
def _apply_include_exclude_names(table, names, include_names, exclude_names):
"""
Apply names, include_names and exclude_names to a table or BaseHeader.
For the latter this relies on BaseHeader implementing ``colnames``,
``rename_column``, and ``remove_columns``.
Parameters
----------
table : `~astropy.table.Table`, `~astropy.io.ascii.BaseHeader`
Input table
names : list
List of names to override those in table (set to None to use existing names)
include_names : list
List of names to include in output
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
"""
if names is not None:
# Rename table column names to those passed by user
# Temporarily rename with names that are not in `names` or `table.colnames`.
# This ensures that rename succeeds regardless of existing names.
xxxs = 'x' * max(len(name) for name in list(names) + list(table.colnames))
for ii, colname in enumerate(table.colnames):
table.rename_column(colname, xxxs + str(ii))
for ii, name in enumerate(names):
table.rename_column(xxxs + str(ii), name)
names = set(table.colnames)
if include_names is not None:
names.intersection_update(include_names)
if exclude_names is not None:
names.difference_update(exclude_names)
if names != set(table.colnames):
remove_names = set(table.colnames) - set(names)
table.remove_columns(remove_names)
class BaseReader(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = OrderedDict(table=OrderedDict(),
cols=OrderedDict())
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file_like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + '':
self.data.table_name = os.path.basename(table)
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = ('Number of header columns ({}) inconsistent with'
' data columns ({}) at data line {}\n'
'Header values: {}\n'
'Data values: {}'.format(
n_cols, len(str_vals), i,
[x.name for x in cols], str_vals))
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
self.data.masks(cols)
if hasattr(self.header, 'table_meta'):
self.meta['table'].update(self.header.table_meta)
_apply_include_exclude_names(self.header, self.names,
self.include_names, self.exclude_names)
table = self.outputter(self.header.cols, self.meta)
self.cols = self.header.cols
return table
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self):
"""Return lines in the table that match header.comment regexp"""
if not hasattr(self, 'lines'):
raise ValueError('Table must be read prior to accessing the header comment lines')
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
class ContinuationLinesInputter(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined
with the subsequent line. Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = '\\'
replace_char = ' '
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append(''.join(parts))
parts = []
return outlines
class WhitespaceSplitter(DefaultSplitter):
def process_line(self, line):
"""Replace tab with space within ``line`` while respecting quoted substrings"""
newline = []
in_quote = False
lastchar = None
for char in line:
if char == self.quotechar and (self.escapechar is None
or lastchar != self.escapechar):
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
extra_reader_pars = ('Reader', 'Inputter', 'Outputter',
'delimiter', 'comment', 'quotechar', 'header_start',
'data_start', 'data_end', 'converters', 'encoding',
'data_Splitter', 'header_Splitter',
'names', 'include_names', 'exclude_names', 'strict_names',
'fill_values', 'fill_include_names', 'fill_exclude_names')
def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs):
"""Initialize a table reader allowing for common customizations. See ui.get_reader()
for param docs. This routine is for internal (package) use only and is useful
because it depends only on the "core" module.
"""
from .fastbasic import FastBasic
if issubclass(Reader, FastBasic): # Fast readers handle args separately
if Inputter is not None:
kwargs['Inputter'] = Inputter
return Reader(**kwargs)
# If user explicitly passed a fast reader with enable='force'
# (e.g. by passing non-default options), raise an error for slow readers
if 'fast_reader' in kwargs:
if kwargs['fast_reader']['enable'] == 'force':
raise ParameterError('fast_reader required with '
'{}, but this is not a fast C reader: {}'
.format(kwargs['fast_reader'], Reader))
else:
del kwargs['fast_reader'] # Otherwise ignore fast_reader parameter
reader_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_reader_pars)
reader = Reader(**reader_kwargs)
if Inputter is not None:
reader.inputter = Inputter()
if Outputter is not None:
reader.outputter = Outputter()
# Issue #855 suggested to set data_start to header_start + default_header_length
# Thus, we need to retrieve this from the class definition before resetting these numbers.
try:
default_header_length = reader.data.start_line - reader.header.start_line
except TypeError: # Start line could be None or an instancemethod
default_header_length = None
if 'delimiter' in kwargs:
reader.header.splitter.delimiter = kwargs['delimiter']
reader.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
reader.header.comment = kwargs['comment']
reader.data.comment = kwargs['comment']
if 'quotechar' in kwargs:
reader.header.splitter.quotechar = kwargs['quotechar']
reader.data.splitter.quotechar = kwargs['quotechar']
if 'data_start' in kwargs:
reader.data.start_line = kwargs['data_start']
if 'data_end' in kwargs:
reader.data.end_line = kwargs['data_end']
if 'header_start' in kwargs:
if (reader.header.start_line is not None):
reader.header.start_line = kwargs['header_start']
# For FixedWidthTwoLine the data_start is calculated relative to the position line.
# However, position_line is given as absolute number and not relative to header_start.
# So, ignore this Reader here.
if (('data_start' not in kwargs) and (default_header_length is not None)
and reader._format_name not in ['fixed_width_two_line', 'commented_header']):
reader.data.start_line = reader.header.start_line + default_header_length
elif kwargs['header_start'] is not None:
# User trying to set a None header start to some value other than None
raise ValueError('header_start cannot be modified for this Reader')
if 'converters' in kwargs:
reader.outputter.converters = kwargs['converters']
if 'data_Splitter' in kwargs:
reader.data.splitter = kwargs['data_Splitter']()
if 'header_Splitter' in kwargs:
reader.header.splitter = kwargs['header_Splitter']()
if 'names' in kwargs:
reader.names = kwargs['names']
if 'include_names' in kwargs:
reader.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
reader.exclude_names = kwargs['exclude_names']
# Strict names is normally set only within the guessing process to
# indicate that column names cannot be numeric or have certain
# characters at the beginning or end. It gets used in
# BaseHeader.check_column_names().
if 'strict_names' in kwargs:
reader.strict_names = kwargs['strict_names']
if 'fill_values' in kwargs:
reader.data.fill_values = kwargs['fill_values']
if 'fill_include_names' in kwargs:
reader.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
reader.data.fill_exclude_names = kwargs['fill_exclude_names']
if 'encoding' in kwargs:
reader.encoding = kwargs['encoding']
reader.inputter.encoding = kwargs['encoding']
return reader
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'strip_whitespace',
'names', 'include_names', 'exclude_names',
'fill_values', 'fill_include_names',
'fill_exclude_names')
def _get_writer(Writer, fast_writer, **kwargs):
"""Initialize a table writer allowing for common customizations. This
routine is for internal (package) use only and is useful because it depends
only on the "core" module. """
from .fastbasic import FastBasic
# A value of None for fill_values imply getting the default string
# representation of masked values (depending on the writer class), but the
# machinery expects a list. The easiest here is to just pop the value off,
# i.e. fill_values=None is the same as not providing it at all.
if 'fill_values' in kwargs and kwargs['fill_values'] is None:
del kwargs['fill_values']
if issubclass(Writer, FastBasic): # Fast writers handle args separately
return Writer(**kwargs)
elif fast_writer and f'fast_{Writer._format_name}' in FAST_CLASSES:
# Switch to fast writer
kwargs['fast_writer'] = fast_writer
return FAST_CLASSES[f'fast_{Writer._format_name}'](**kwargs)
writer_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_writer_pars)
writer = Writer(**writer_kwargs)
if 'delimiter' in kwargs:
writer.header.splitter.delimiter = kwargs['delimiter']
writer.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
writer.header.write_comment = kwargs['comment']
writer.data.write_comment = kwargs['comment']
if 'quotechar' in kwargs:
writer.header.splitter.quotechar = kwargs['quotechar']
writer.data.splitter.quotechar = kwargs['quotechar']
if 'formats' in kwargs:
writer.data.formats = kwargs['formats']
if 'strip_whitespace' in kwargs:
if kwargs['strip_whitespace']:
# Restore the default SplitterClass process_val method which strips
# whitespace. This may have been changed in the Writer
# initialization (e.g. Rdb and Tab)
writer.data.splitter.process_val = operator.methodcaller('strip')
else:
writer.data.splitter.process_val = None
if 'names' in kwargs:
writer.header.names = kwargs['names']
if 'include_names' in kwargs:
writer.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
writer.exclude_names = kwargs['exclude_names']
if 'fill_values' in kwargs:
# Prepend user-specified values to the class default.
with suppress(TypeError, IndexError):
# Test if it looks like (match, replace_string, optional_colname),
# in which case make it a list
kwargs['fill_values'][1] + ''
kwargs['fill_values'] = [kwargs['fill_values']]
writer.data.fill_values = kwargs['fill_values'] + writer.data.fill_values
if 'fill_include_names' in kwargs:
writer.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
writer.data.fill_exclude_names = kwargs['fill_exclude_names']
return writer
| 36.93985
| 99
| 0.610676
|
2e1b4ff3258f0bee2719e86b244f01ad86854a7c
| 14,097
|
py
|
Python
|
plugins/CodeCounter/CodeCounter/GUI.py
|
bopopescu/NovalIDE
|
590c2adb69d54fa4a6c9dad5459198be057b1329
|
[
"MulanPSL-1.0"
] | null | null | null |
plugins/CodeCounter/CodeCounter/GUI.py
|
bopopescu/NovalIDE
|
590c2adb69d54fa4a6c9dad5459198be057b1329
|
[
"MulanPSL-1.0"
] | null | null | null |
plugins/CodeCounter/CodeCounter/GUI.py
|
bopopescu/NovalIDE
|
590c2adb69d54fa4a6c9dad5459198be057b1329
|
[
"MulanPSL-1.0"
] | null | null | null |
#coding=utf-8
import tkinter as tk
import os
import threading
import time
from tkinter import ttk
from tkinter.filedialog import askdirectory,askopenfilename
from codecounter import CodeCounter
from noval import GetApp
import noval.consts as consts
import noval.syntax.syntax as syntax
import easyplugindev as epd
from easyplugindev import _
from easyplugindev import ListboxFrame
def getResourcePath():
from pkg_resources import resource_filename
path = resource_filename(__name__,'')
clone_local_img_path = os.path.join(path,"codecounter.png") # 导入同一个包下的文件.
return clone_local_img_path
def getCurrentProjectDocuments():
try:
app=GetApp()
projBrowser=GetApp().MainFrame.GetView(consts.PROJECT_VIEW_NAME)
return projBrowser.GetView().GetDocument().GetModel().filePaths
except Exception as e:
#print(e)
return ['/media/hzy/程序/novalide/NovalIDE/plugins/CodeCounter/codecounter/TextFile1.txt',
'/media/hzy/程序/novalide/NovalIDE/plugins/CodeCounter/codecounter/GUI.py',
'/media/hzy/程序/novalide/NovalIDE/plugins/CodeCounter/codecounter/test1.c',
'/media/hzy/程序/novalide/NovalIDE/plugins/CodeCounter/codecounter/TextFile2 (copy).logger',
'/media/hzy/程序/novalide/NovalIDE/plugins/CodeCounter/codecounter/TextFile2.log'
]
# 这一部分用于调试。
def getSupportedDocuments():
try:
allLanguageLexers=syntax.SyntaxThemeManager().Lexers#获取ide支持的全部语言。
s=''
for item in allLanguageLexers:
st=item.GetExt()
#print(st)
s+=' '+st
s=s.replace(' ',';')#扩展名中间以分号分隔。
s=s.strip(';')
return s
except:
return 'c;py;txt;md'
def stdInputHandler(entry):
s=entry.get()
#处理输入输出的标准接口函数。将以分号隔开的字符串分割成列表
if(s.strip()==''):
return s,[]
s=s.replace(';',';')
s=s.replace('.','')
s.replace('\\','/')
tmpList=s.split(';')
entry.delete(0,tk.END)
entry.insert(0,s)
return s,tmpList # 返回数组和字符串
class CodeCounterDialog(epd.ModalDialog):
def __init__(self, master,title,label,selection=-1,show_scrollbar=False):
epd.ModalDialog.__init__(self, master, takefocus=1)
self.title(title)
self.projectDocs=getCurrentProjectDocuments()
self.resizable(height=tk.FALSE, width=tk.FALSE)
self.path = ''
self.pathMode = tk.IntVar()#0为当前项目,1为选择其他文件。
var0=0
self.pathMode.set(var0)#默认勾选当前项目。
promptFrame=ttk.Frame(self.main_frame)
radioButton1 = ttk.Radiobutton(promptFrame,text=_("Current Project"), value=0,
variable=self.pathMode,command=self.changePathMode)
radioButton2 = ttk.Radiobutton(promptFrame,text=_("Folder"), value=1,
variable=self.pathMode,command=self.changePathMode)
labelPrompt1 = ttk.Label(promptFrame,text=_("Choose"))#提示标签
labelPrompt2 = ttk.Label(promptFrame,text=_("to Count Lines"))#提示标签
#-------------- 标签栏与radioButton
labelPrompt1.pack(side=tk.LEFT,anchor=tk.W,expand=0,fill=None,padx=consts.DEFAUT_CONTRL_PAD_X)
radioButton1.pack(side=tk.LEFT)
radioButton2.pack(side=tk.LEFT)
labelPrompt2.pack(side=tk.LEFT)
promptFrame.pack()
# ---------------row=2
self.pathFrame=ttk.Frame(self.main_frame)
self.pathEntry=ttk.Entry(self.pathFrame)
labelForPath=ttk.Label(self.pathFrame,text=_("Source Path:\t"))
labelForPath.pack(side=tk.LEFT,anchor=tk.W,expand=0,fill=None)
self.pathEntry.delete(0,tk.END)
self.pathEntry.pack(side=tk.LEFT,fill=tk.X,expand=1, padx=(consts.DEFAUT_CONTRL_PAD_X,consts.DEFAUT_CONTRL_PAD_X))
if(self.pathMode.get()==0):
state='disabled'
else:
state='normal'
self.pathEntry['state']=state#默认为选择当前项目的文件,因此无需任何输入。
self.askpathButton = ttk.Button(self.pathFrame,command=self.chooseDir,
text="...",state=state,width=1)
self.askpathButton.pack(side=tk.LEFT)
self.pathFrame.pack(expand=1,fill=tk.X,padx=consts.DEFAUT_CONTRL_PAD_X,pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
# --------------输入扩展名的框
self.supportedDocExts=getSupportedDocuments()
extFrame=ttk.Frame(self.main_frame)
extPromptLabel=ttk.Label(extFrame,text=_('Filter file Types:\t'))
extPromptLabel.pack(side=tk.LEFT)
self.extEntry = ttk.Entry(extFrame,text=self.supportedDocExts)
tempLabel=ttk.Label(extFrame,text=' ',width=2)#这个标签是占位用的。
self.extEntry.pack(side=tk.LEFT,expand=1,fill=tk.X,padx=(consts.DEFAUT_CONTRL_PAD_X,consts.DEFAUT_CONTRL_PAD_X))
self.extEntry.delete(0,tk.END)
self.extEntry.insert(0,self.supportedDocExts)
tempLabel.pack(side=tk.LEFT)
extFrame.pack(expand=1,fill=tk.X,padx=consts.DEFAUT_CONTRL_PAD_X,pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
# ----------输入排除路径的列表
excludePromptLabelFrame=ttk.Frame(self.main_frame)
excludePromptLabel = ttk.Label(excludePromptLabelFrame,text=_('Exclude Path:'),anchor=tk.W)
excludePromptLabel.pack(side=tk.LEFT)
excludePromptLabelFrame.pack(expand=1,fill=tk.X,
padx=consts.DEFAUT_CONTRL_PAD_X,pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
## 这个frame放了排除路径有关的所有控件。
excludePathFrame=ttk.Frame(self.main_frame)
##这个frame是装了两个按钮,用来添加和减少路径的。
excludePathButtonFrame=ttk.Frame(excludePathFrame)
self.askexcludePathButton = ttk.Button(excludePathButtonFrame,command=self.chooseExcludedDir,
text="+",width=1)
self.removePathButton=ttk.Button(excludePathButtonFrame,command=self.removeExcludedDir,
text="-",width=1)
self.askexcludePathButton.pack(pady=consts.DEFAUT_CONTRL_PAD_Y)
self.removePathButton.pack(pady=consts.DEFAUT_CONTRL_PAD_Y)
# 这个frame是装了列表和滚动条的。
self.excludeFolderListFrame=ListboxFrame(excludePathFrame)
self.excludeFolderListBox=self.excludeFolderListFrame.listbox
## self.excludeFolderListBox.insert(0,'')
self.excludeFolderListBox.config(height=5)
self.excludeFolderListFrame.pack(expand=1,side=tk.LEFT,fill=tk.X,
padx=(consts.DEFAUT_CONTRL_PAD_X,consts.DEFAUT_CONTRL_PAD_X))
excludePathButtonFrame.pack(side=tk.LEFT)
excludePathFrame.pack(expand=1,fill=tk.X,
padx=(consts.DEFAUT_CONTRL_PAD_X,consts.DEFAUT_CONTRL_PAD_X))
# --------开始计算的按钮
self.startCountingButton = ttk.Button(self.main_frame,
command=self.startCounting,text=_("Start Counting!"))
self.startCountingButton.pack(expand=1,fill=tk.X,padx=consts.DEFAUT_CONTRL_PAD_X,pady=(
consts.DEFAUT_CONTRL_PAD_Y,consts.DEFAUT_CONTRL_PAD_Y))
#--------表格窗体、滚动条等。
self.tableFrame=ttk.Frame(self.main_frame)
self.scrollbar=ttk.Scrollbar(self.tableFrame,orient=tk.VERTICAL)
self.table = ttk.Treeview(self.tableFrame,show="headings")
self.scrollbar.config(command=self.table.yview)
self.table.configure(yscrollcommand=self.scrollbar.set)
# 定义列
self.table['columns'] = ['fileName','validLines',
'blankLines','commentLines','allRows']
# 设置列宽度
self.table.column('fileName',width=400)
self.table.column('validLines',width=80)
self.table.column('blankLines',width=80)
self.table.column('commentLines',width=100)
self.table.column('allRows',width=80)
# 添加列名
self.table.heading('fileName',text=_('File Path'))
self.table.heading('validLines',text=_('Code Lines'))
self.table.heading('blankLines',text=_('Blank Lines'))
self.table.heading('commentLines',text=_('Comment Lines'))
self.table.heading('allRows',text=_('Total Lines'))
self.table.pack(side=tk.LEFT,fill=tk.X,expand=1)
self.scrollbar.pack(side=tk.RIGHT,fill=tk.Y,expand=1)
self.tableFrame.pack(padx=consts.DEFAUT_CONTRL_PAD_X,pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
self.progressBar = ttk.Progressbar(self.main_frame, orient = tk.HORIZONTAL,
length = 100, mode = 'determinate')
self.progressBar.pack(expand=1,fill=tk.X,padx=consts.DEFAUT_CONTRL_PAD_X,pady=(consts.DEFAUT_CONTRL_PAD_Y,consts.DEFAUT_CONTRL_PAD_Y))
self.countingFlag=False
self.countingThread=threading.Thread(target=self.count)# 统计行数的线程。
def startCounting(self):
if(self.countingFlag==False):
if(self.countingThread.isAlive()):
#print('isAlive!!!!!!')
self.countingFlag=False
return #等待直到线程退出为止
self.countingFlag=True
self.startCountingButton.config(text=_("Stop Counting!"))
self.countingThread=threading.Thread(target=self.count)
self.countingThread.setDaemon(True)
self.countingThread.start()
else:
self.countingFlag=False
self.startCountingButton.config(text=_("Start Counting!"))
def removeExcludedDir(self):
selIndex=self.excludeFolderListBox.curselection()
self.excludeFolderListBox.delete(selIndex)
def popWarningWindow(self,message):
tk.messagebox.showinfo(_('Warning'),message)
def chooseExcludedDir(self):
self.excludeFolderListBox.selection_clear(0,self.excludeFolderListBox.size()-1)
path = askdirectory()
if(path!=''):
path=epd.formatPathForPlatform(path)
pathList=self.excludeFolderListBox.get(0,self.excludeFolderListBox.size()-1)
if(path not in pathList):#文件名不重复的话就加上
self.excludeFolderListBox.insert('end',path)
else:#如果有重复,就将重复选项设置为选中。
index = pathList.index(path)
self.excludeFolderListBox.selection_set(index)
def getAllExcludedFolders(self):
f=self.excludeFolderListBox.get(0,self.excludeFolderListBox.size()-1)
#print(f)
return list(f)
def clearResultTable(self):
for child in self.table.get_children():# 首先将列表中已有的元素清除。经测试,即使列表中啥都没有,
# 也能顺利运行,所以就不做判断非空了。
self.table.delete(child)
def count(self):
extStr,extNameList=stdInputHandler(self.extEntry)
excFolderList=self.getAllExcludedFolders()
self.clearResultTable()
self.progressBar['value']=0
if(self.pathMode.get()==1):
self.path=self.pathEntry.get().strip()# 每次要从文本框中获取一次路径。
if(os.path.isdir(self.path)):#判断路径是否存在。如果存在,执行if中的语句。
result=CodeCounter.countDirFileLines(self.path,excludeDirs=excFolderList,excludeFiles=[],
includeExts=extNameList,progressBar=self.progressBar,master=self)
else:
self.projectDocs=getCurrentProjectDocuments()
#print(self.projectDocs)
fileNum=CodeCounter.countDirFileLines(fileList=self.projectDocs,excludeDirs=excFolderList,excludeFiles=[],
includeExts=extNameList,progressBar=self.progressBar,master=self)
if(fileNum==0):
#self.popWarningWindow("Current Project is Empty!")
#self.wait_window()
pass
time.sleep(3)
self.progressBar['value']=0
def changePathMode(self):
if(self.pathMode.get()==1):
self.askpathButton['state']='normal'
self.pathEntry['state']='normal'
else:
self.askpathButton['state']='disable'
self.pathEntry['state']='disable'
def chooseDir(self):
self.pathMode.set(1)
if(self.pathMode.get()==1):
path = askdirectory()
elif(self.pathMode.get()==0):
path = askopenfilename()
self.pathEntry.delete(0,tk.END)
self.pathEntry.insert(0,epd.formatPathForPlatform(path))
self.path=path
def _ok(self, event=None):
pass
def GetStringSelection(self):
return self.listbox.get(self.listbox.curselection()[0])
def startDialog(app,ico):#这个函数用来启动插件。无论是装入IDE时还是调试时,都调用这个函数。
dialog=CodeCounterDialog(app,title=_("CodeCounter"),label="dadadadadada")
print(ico)
dialog.iconphoto(False, tk.PhotoImage(file=ico))
dialog.ShowModal()
class BaseAppForTest:#这是用于测试的基底界面,插件安装后不会运行。
def __init__(self, master):
self.master = master
self.initWidgets()
def initWidgets(self):
ttk.Button(self.master, text='打开Dialog',
command=self.openDialog # 绑定 openDialog 方法
).pack(side=tk.LEFT, ipadx=5, ipady=5, padx=10)
def openDialog(self):
startDialog(self.master)
def main(ico):#当插件未装入IDE的时候,因其无基底界面,所以无法调试。为了解决这个
#问题,便要新建一个临时APP的基底,用于界面测试。
app = GetApp()
startDialog(app,ico)
if __name__ == "__main__":
getSupportedDocuments()
#main()
| 38.411444
| 143
| 0.603391
|
5d45203726472e049638580605603bf430b2c683
| 5,153
|
py
|
Python
|
Server/src/virtualenv/Lib/site-packages/setuptools/command/setopt.py
|
ppyordanov/HCI_4_Future_Cities
|
4dc7dc59acccf30357bde66524c2d64c29908de8
|
[
"MIT"
] | null | null | null |
Server/src/virtualenv/Lib/site-packages/setuptools/command/setopt.py
|
ppyordanov/HCI_4_Future_Cities
|
4dc7dc59acccf30357bde66524c2d64c29908de8
|
[
"MIT"
] | null | null | null |
Server/src/virtualenv/Lib/site-packages/setuptools/command/setopt.py
|
ppyordanov/HCI_4_Future_Cities
|
4dc7dc59acccf30357bde66524c2d64c29908de8
|
[
"MIT"
] | null | null | null |
import os
import distutils
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == 'local':
return 'setup.cfg'
if kind == 'global':
return os.path.join(
os.path.dirname(distutils.__file__), 'distutils.cfg'
)
if kind == 'user':
dot = os.name == 'posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
from setuptools.compat import ConfigParser
log.debug("Reading configuration from %s", filename)
opts = ConfigParser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option, value in options.items():
if value is None:
log.debug(
"Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section, option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section, option, value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, 'w') as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames) > 1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-', '_'): self.set_value}
},
self.dry_run
)
| 34.583893
| 79
| 0.570347
|
9cc9deffa83bc9f8e4c42fb4a7ac6f372040bf67
| 950
|
py
|
Python
|
example_code/gcp/example_code/dags/virtual_env_op.py
|
jbampton/airflow-guides
|
b56e90d1f4e39456c2d2eb818c32d996f03957e8
|
[
"Apache-2.0"
] | 1
|
2021-02-21T01:18:57.000Z
|
2021-02-21T01:18:57.000Z
|
example_code/gcp/example_code/dags/virtual_env_op.py
|
vlasvlasvlas/airflow-guides
|
b61e69669d8573570d81219b5b6f70d375a7a5a9
|
[
"Apache-2.0"
] | null | null | null |
example_code/gcp/example_code/dags/virtual_env_op.py
|
vlasvlasvlas/airflow-guides
|
b61e69669d8573570d81219b5b6f70d375a7a5a9
|
[
"Apache-2.0"
] | 1
|
2020-05-20T19:12:04.000Z
|
2020-05-20T19:12:04.000Z
|
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import PythonVirtualenvOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2018, 1, 1),
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'catchup':False
}
dag = DAG('example_dag_python',
schedule_interval=timedelta(minutes=5),
default_args=default_args)
def test_func(**kwargs):
print("HELLO")
def test_func_two():
import sys
print(sys.version)
print("hi")
t1 = PythonOperator(
task_id='test_task',
python_callable=test_func,
provide_context=True,
dag=dag)
t2 = PythonVirtualenvOperator(
task_id='test_two',
python_version='2',
python_callable=test_func_two,
dag=dag
)
t1 >> t2
| 20.652174
| 70
| 0.697895
|
07a3fb3432f5e062b3bbe2e4100dacfa99c5f783
| 4,722
|
py
|
Python
|
tools/report_ci_failure.py
|
quantumkoen/qiskit-terra
|
495046d07471e64eab6ddbdfdf8bdef88f0c644f
|
[
"Apache-2.0"
] | null | null | null |
tools/report_ci_failure.py
|
quantumkoen/qiskit-terra
|
495046d07471e64eab6ddbdfdf8bdef88f0c644f
|
[
"Apache-2.0"
] | null | null | null |
tools/report_ci_failure.py
|
quantumkoen/qiskit-terra
|
495046d07471e64eab6ddbdfdf8bdef88f0c644f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Utility module to open an issue on the repository when CIs fail."""
import os
from github import Github
class CIFailureReporter:
"""Instances of this class can report to GitHub that the CI is failing.
Properties:
key_label (str): the label that identifies CI failures reports.
"""
key_label = 'master failing'
def __init__(self, repository, token):
"""
Args:
repository (str): a string in the form 'owner/repository-name'
indicating the GitHub repository to report against.
token (str): a GitHub token obtained following:
https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/
"""
self._repo = repository
self._api = Github(token)
def report(self, branch, commit, infourl=None):
"""Report on GitHub that the specified branch is failing to build at
the specified commit. The method will open an issue indicating that
the branch is failing. If there is an issue already open, it will add a
comment avoiding to report twice about the same failure.
Args:
branch (str): branch name to report about.
commit (str): commit hash at which the build fails.
infourl (str): URL with extra info about the failure such as the
build logs.
"""
issue_number = self._get_report_issue_number()
if issue_number:
self._report_as_comment(issue_number, branch, commit, infourl)
else:
self._report_as_issue(branch, commit, infourl)
def _get_report_issue_number(self):
query = 'state:open label:"{}" repo:{}'.format(self.key_label, self._repo)
results = self._api.search_issues(query=query)
try:
return results[0].number
except IndexError:
return None
def _report_as_comment(self, issue_number, branch, commit, infourl):
stamp = _master_is_failing_stamp(branch, commit)
report_exists = self._check_report_existence(issue_number, stamp)
if not report_exists:
_, body = _master_is_failing_template(branch, commit, infourl)
message_body = '{}\n{}'.format(stamp, body)
self._post_new_comment(issue_number, message_body)
def _check_report_existence(self, issue_number, target):
repo = self._api.get_repo(self._repo)
issue = repo.get_issue(issue_number)
if target in issue.body:
return True
for comment in issue.get_comments():
if target in comment.body:
return True
return False
def _report_as_issue(self, branch, commit, infourl):
repo = self._api.get_repo(self._repo)
stamp = _master_is_failing_stamp(branch, commit)
title, body = _master_is_failing_template(branch, commit, infourl)
message_body = '{}\n{}'.format(stamp, body)
repo.create_issue(title=title, body=message_body, labels=[self.key_label])
def _post_new_comment(self, issue_number, body):
repo = self._api.get_repo(self._repo)
issue = repo.get_issue(issue_number)
issue.create_comment(body)
def _master_is_failing_template(branch, commit, infourl):
title = 'Branch `{}` is failing'.format(branch)
body = 'Trying to build `{}` at commit {} failed.'.format(branch, commit)
if infourl:
body += '\nMore info at: {}'.format(infourl)
return title, body
def _master_is_failing_stamp(branch, commit):
return '<!-- commit {}@{} -->'.format(commit, branch)
_REPOSITORY = 'qiskit/qiskit-terra'
_GH_TOKEN = os.getenv('GH_TOKEN')
def _get_repo_name():
return os.getenv('TRAVIS_REPO_SLUG') or os.getenv('APPVEYOR_REPO_NAME')
def _get_branch_name():
return os.getenv('TRAVIS_BRANCH') or os.getenv('APPVEYOR_REPO_BRANCH')
def _get_commit_hash():
return os.getenv('TRAVIS_COMMIT') or os.getenv('APPVEYOR_REPO_COMMIT')
def _get_info_url():
if os.getenv('TRAVIS'):
job_id = os.getenv('TRAVIS_JOB_ID')
return 'https://travis-ci.org/{}/builds/{}'.format(_REPOSITORY, job_id)
if os.getenv('APPVEYOR'):
build_id = os.getenv('APPVEYOR_BUILD_ID')
return 'https://ci.appveyor.com/project/{}/build/{}'.format(_REPOSITORY, build_id)
return None
if __name__ == '__main__':
_REPORTER = CIFailureReporter(_get_repo_name(), _GH_TOKEN)
_REPORTER.report(_get_branch_name(), _get_commit_hash(), _get_info_url())
| 34.977778
| 103
| 0.661584
|
d7b78435beea7d3b2d45b1e41ae831107968e4b6
| 878
|
py
|
Python
|
src/mailer/management/commands/send_mail.py
|
srtab/django-mailer
|
42eb41bf5e09df601512b1c2a8e6ef6624633328
|
[
"MIT"
] | 1
|
2021-05-17T18:19:56.000Z
|
2021-05-17T18:19:56.000Z
|
src/mailer/management/commands/send_mail.py
|
srtab/django-mailer
|
42eb41bf5e09df601512b1c2a8e6ef6624633328
|
[
"MIT"
] | 1
|
2020-03-05T23:47:02.000Z
|
2020-03-05T23:47:02.000Z
|
src/mailer/management/commands/send_mail.py
|
srtab/django-mailer
|
42eb41bf5e09df601512b1c2a8e6ef6624633328
|
[
"MIT"
] | null | null | null |
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
from mailer.engine import send_all
from mailer.management.helpers import CronArgMixin
# allow a sysadmin to pause the sending of mail temporarily.
PAUSE_SEND = getattr(settings, "MAILER_PAUSE_SEND", False)
class Command(CronArgMixin, BaseCommand):
help = "Do one pass through the mail queue, attempting to send all mail."
def handle(self, *args, **options):
if options['cron'] == 0:
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
else:
logging.basicConfig(level=logging.ERROR, format="%(message)s")
logging.info("-" * 72)
# if PAUSE_SEND is turned on don't do anything.
if not PAUSE_SEND:
send_all()
else:
logging.info("sending is paused, quitting.")
| 31.357143
| 77
| 0.678815
|
4c9f4d5470e63397ab078c35d0fea07952ec1acd
| 682
|
py
|
Python
|
retrievers/DANCE/ANCE_setup.py
|
vishalbelsare/OpenMatch
|
84b25502bf52c58b9e71bd0754b2fc192d9b448f
|
[
"MIT"
] | 403
|
2020-01-17T06:54:46.000Z
|
2022-03-30T05:47:42.000Z
|
retrievers/DANCE/ANCE_setup.py
|
vishalbelsare/OpenMatch
|
84b25502bf52c58b9e71bd0754b2fc192d9b448f
|
[
"MIT"
] | 30
|
2020-06-07T12:28:07.000Z
|
2022-03-20T05:26:03.000Z
|
retrievers/DANCE/ANCE_setup.py
|
vishalbelsare/OpenMatch
|
84b25502bf52c58b9e71bd0754b2fc192d9b448f
|
[
"MIT"
] | 48
|
2020-07-15T09:45:46.000Z
|
2022-03-01T07:27:59.000Z
|
from setuptools import setup
with open('README.md') as f:
readme = f.read()
setup(
name='ANCE',
version='0.1.0',
description='Approximate Nearest Neighbor Negative Contrastive Learning for Dense Text Retrieval',
url='https://github.com/microsoft/ANCE',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
license="MIT",
long_description=readme,
install_requires=[
'transformers==2.3.0',
'pytrec-eval',
'wget'
],
)
| 28.416667
| 102
| 0.608504
|
5368aeb9765dc68477b50279ee653c79f1967cc8
| 363
|
py
|
Python
|
database/admin.py
|
erischon/p8_eri_schon
|
b64de7acad28463a39b40ce9d537ceec4227202b
|
[
"MIT"
] | null | null | null |
database/admin.py
|
erischon/p8_eri_schon
|
b64de7acad28463a39b40ce9d537ceec4227202b
|
[
"MIT"
] | null | null | null |
database/admin.py
|
erischon/p8_eri_schon
|
b64de7acad28463a39b40ce9d537ceec4227202b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Categorie, Shop, Brand, Nutriscore, Product, Prodbrand, Prodcat, Prodshop
admin.site.register(Categorie)
admin.site.register(Shop)
admin.site.register(Brand)
admin.site.register(Nutriscore)
admin.site.register(Product)
admin.site.register(Prodbrand)
admin.site.register(Prodcat)
admin.site.register(Prodshop)
| 30.25
| 93
| 0.820937
|
fd06bc455e9bd3f4fb0d35d75da268e12e227bea
| 4,851
|
py
|
Python
|
var/spack/repos/builtin/packages/blis/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2020-05-24T15:23:12.000Z
|
2020-05-24T15:23:12.000Z
|
var/spack/repos/builtin/packages/blis/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6
|
2022-02-26T11:44:34.000Z
|
2022-03-12T12:14:50.000Z
|
var/spack/repos/builtin/packages/blis/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2019-10-29T09:08:17.000Z
|
2019-10-29T09:08:17.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# Although this looks like an Autotools package, it's not one. Refer to:
# https://github.com/flame/blis/issues/17
# https://github.com/flame/blis/issues/195
# https://github.com/flame/blis/issues/197
class BlisBase(Package):
"""Base class for building BLIS, shared with the AMD optimized version
of the library in the 'amdblis' package.
"""
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
variant(
'threads', default='none',
description='Multithreading support',
values=('pthreads', 'openmp', 'none'),
multi=False
)
variant(
'blas', default=True,
description='BLAS compatibility',
)
variant(
'cblas', default=True,
description='CBLAS compatibility',
)
variant(
'shared', default=True,
description='Build shared library',
)
variant(
'static', default=True,
description='Build static library',
)
# TODO: add cpu variants. Currently using auto.
# If one knl, should the default be memkind ?
# BLIS has it's own API but can be made compatible with BLAS
# enabling CBLAS automatically enables BLAS.
provides('blas', when="+blas")
provides('blas', when="+cblas")
phases = ['configure', 'build', 'install']
def configure(self, spec, prefix):
config_args = []
config_args.append("--enable-threading=" +
spec.variants['threads'].value)
if '+cblas' in spec:
config_args.append("--enable-cblas")
else:
config_args.append("--disable-cblas")
if '+blas' in spec:
config_args.append("--enable-blas")
else:
config_args.append("--disable-blas")
if '+shared' in spec:
config_args.append("--enable-shared")
else:
config_args.append("--disable-shared")
if '+static' in spec:
config_args.append("--enable-static")
else:
config_args.append("--disable-static")
# FIXME: add cpu isa variants.
config_args.append("auto")
configure("--prefix=" + prefix,
*config_args)
def build(self, spec, prefix):
make()
@run_after('build')
@on_package_attributes(run_tests=True)
def check(self):
make('check')
def install(self, spec, prefix):
make('install')
@run_after('install')
def darwin_fix(self):
# The shared library is not installed correctly on Darwin; fix this
if self.spec.satisfies('platform=darwin'):
fix_darwin_install_name(self.prefix.lib)
@property
def libs(self):
return find_libraries(
["libblis", "libblis-mt"], root=self.prefix, recursive=True
)
class Blis(BlisBase):
"""BLIS is a portable software framework for instantiating high-performance
BLAS-like dense linear algebra libraries.
The framework was designed to isolate essential kernels of computation
that, when optimized, immediately enable optimized implementations of
most of its commonly used and computationally intensive operations. BLIS
is written in ISO C99 and available under a new/modified/3-clause BSD
license. While BLIS exports a new BLAS-like API, it also includes a
BLAS compatibility layer which gives application developers access to
BLIS implementations via traditional BLAS routine calls.
An object-based API unique to BLIS is also available.
"""
homepage = "https://github.com/flame/blis"
url = "https://github.com/flame/blis/archive/0.4.0.tar.gz"
git = "https://github.com/flame/blis.git"
version('master', branch='master')
version('0.6.1', sha256='76b22f29b7789cf117c0873d2a6b2a6d61f903869168148f2e7306353c105c37')
version('0.6.0', sha256='ad5765cc3f492d0c663f494850dafc4d72f901c332eb442f404814ff2995e5a9')
version('0.5.0', sha256='1a004d69c139e8a0448c6a6007863af3a8c3551b8d9b8b73fe08e8009f165fa8')
version('0.4.0', sha256='9c7efd75365a833614c01b5adfba93210f869d92e7649e0b5d9edc93fc20ea76')
version('0.3.2', sha256='b87e42c73a06107d647a890cbf12855925777dc7124b0c7698b90c5effa7f58f')
version('0.3.1', sha256='957f28d47c5cf71ffc62ce8cc1277e17e44d305b1c2fa8506b0b55617a9f28e4')
version('0.3.0', sha256='d34d17df7bdc2be8771fe0b7f867109fd10437ac91e2a29000a4a23164c7f0da')
version('0.2.2', sha256='4a7ecb56034fb20e9d1d8b16e2ef587abbc3d30cb728e70629ca7e795a7998e8')
# Problems with permissions on installed libraries:
# https://github.com/flame/blis/issues/343
patch('Makefile_0.6.0.patch', when='@0.4.0:0.6.0')
| 33.923077
| 95
| 0.666254
|
49e0a4fee3d17c20041fa6667a6a74e488039aef
| 1,288
|
py
|
Python
|
app/api/routers/country.py
|
Mohammed785/Emergency-Numbers-fastapi
|
345a6a77eea36e5dcac34b103ddfe0f0a7d17bb6
|
[
"MIT"
] | null | null | null |
app/api/routers/country.py
|
Mohammed785/Emergency-Numbers-fastapi
|
345a6a77eea36e5dcac34b103ddfe0f0a7d17bb6
|
[
"MIT"
] | null | null | null |
app/api/routers/country.py
|
Mohammed785/Emergency-Numbers-fastapi
|
345a6a77eea36e5dcac34b103ddfe0f0a7d17bb6
|
[
"MIT"
] | null | null | null |
from typing import List
from fastapi import APIRouter,Depends,status
from api import schemas,database,oauth2
from sqlalchemy.orm import Session
from api.repository import country
router = APIRouter(
prefix='/country',
tags=['Country']
)
get_db = database.get_db
@router.post('/create',status_code=status.HTTP_201_CREATED)
def create_country(request: schemas.Country, db: Session = Depends(get_db),current_user:schemas.Admin=Depends(oauth2.get_current_user)):
return country.create_country(request,db)
@router.get('/show/{iso_code}', status_code=status.HTTP_302_FOUND, response_model=schemas.ShowCountry)
def show_country(iso_code: str, db: Session = Depends(get_db)):
return country.get_country(iso_code, db)
@router.put('/update/{iso_code}',status_code=status.HTTP_202_ACCEPTED)
def update_country(iso_code: str,request:schemas.Country,db:Session=Depends(get_db),current_user:schemas.Admin=Depends(oauth2.get_current_user)):
return country.update_country(iso_code,request,db)
@router.delete('/delete/{iso_code}',status_code=status.HTTP_204_NO_CONTENT)
def delete_country(iso_code: str, db: Session = Depends(get_db), current_user: schemas.Admin = Depends(oauth2.get_current_user)):
return country.delete_country(iso_code,db)
| 41.548387
| 146
| 0.778727
|
9ef37e2e2008ffb13a6cbecf443c017c58b4f968
| 1,957
|
py
|
Python
|
iati_parse_n_process.py
|
brainsqueeze/IATI-parse
|
136458f690ba7fc0f6c7573564989117cfb92fff
|
[
"MIT"
] | null | null | null |
iati_parse_n_process.py
|
brainsqueeze/IATI-parse
|
136458f690ba7fc0f6c7573564989117cfb92fff
|
[
"MIT"
] | null | null | null |
iati_parse_n_process.py
|
brainsqueeze/IATI-parse
|
136458f690ba7fc0f6c7573564989117cfb92fff
|
[
"MIT"
] | null | null | null |
import json
from urllib import request
from xml.etree import ElementTree as Et
def strip_text(xml_array, tag, text_type, default):
"""
Parses XML attributes along the same level
:param xml_array: XML attributes (list)
:param tag: name of XML tag to search for (str)
:param text_type: what type the field contents are expected to be (data type)
:param default: default value if tag field is empty or does not match data type
:return: str if text_type is str, else list
"""
data = [item.text if isinstance(item.text, text_type) else default for item in xml_array if xml_array.tag == tag]
if text_type == str:
return ". ".join(data)
return data
def process_xml(xml_string):
"""
Processes XML from a string literal
:param xml_string: (str)
:return: desired attributes in JSON form (dict)
"""
root = Et.fromstring(xml_string)
docs = {}
for child in root:
# get ID, text pair out of XML
pair = [field.text if field.tag == 'iati-identifier' else strip_text(field, 'description', str, "")
for field in child if field.tag in {'iati-identifier', 'description'}]
idx, text = pair
temp_dict = {idx: {"text": text}}
docs = {**docs, **temp_dict}
return docs
def classify(data):
"""
Sample batch request to the auto-classification API
:param data: (dict)
:return: (JSON)
"""
url = "http://hostname:9091/batch"
opts = {"data": data,
"chunk": "true",
"threshold": "low",
"rollup": "false"
}
req = request.Request(url, data=json.dumps(opts).encode('utf8'), headers={"Content-Type": "application/json"})
return request.urlopen(req).read().decode('utf8')
if __name__ == '__main__':
with open('iati_sample_1.xml', 'r') as f:
d = f.read()
parsed_data = process_xml(d)
p = classify(data=parsed_data)
print(p)
| 28.779412
| 117
| 0.621359
|
4ec6f98a8f77362c0ab939db72c778c0245bf43e
| 1,201
|
py
|
Python
|
src/solutions/common/to/discussion_groups.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/solutions/common/to/discussion_groups.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/solutions/common/to/discussion_groups.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from mcfw.properties import unicode_property, long_property
class DiscussionGroupTO(object):
id = long_property('1')
topic = unicode_property('2')
description = unicode_property('3')
message_key = unicode_property('4')
creation_timestamp = long_property('5')
@classmethod
def from_model(cls, model):
to = cls()
for prop in dir(DiscussionGroupTO):
if not prop.startswith('__') and not callable(getattr(DiscussionGroupTO, prop)):
setattr(to, prop, getattr(model, prop))
return to
| 34.314286
| 92
| 0.706911
|
5b498abdf1c6495d375d516e0451627f84cf2857
| 70,310
|
py
|
Python
|
tf2onnx/graph.py
|
zerollzeng/tensorflow-onnx
|
79afdbcebac61c7aea2905790479806b83664b52
|
[
"Apache-2.0"
] | null | null | null |
tf2onnx/graph.py
|
zerollzeng/tensorflow-onnx
|
79afdbcebac61c7aea2905790479806b83664b52
|
[
"Apache-2.0"
] | null | null | null |
tf2onnx/graph.py
|
zerollzeng/tensorflow-onnx
|
79afdbcebac61c7aea2905790479806b83664b52
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.graph - class to manage graph manipulation on top of onnx
"""
import collections
import copy
import logging
import six
import numpy as np
from onnx import helper, numpy_helper, shape_inference, OperatorSetIdProto, AttributeProto, TensorProto
from tf2onnx import utils, __version__
from tf2onnx.utils import make_name, port_name, find_opset
from tf2onnx import optimizer
from tf2onnx.schemas import get_schema, infer_onnx_shape_dtype
from tf2onnx import constants
logger = logging.getLogger(__name__)
# todo(pengwa): remove protected-access later
# pylint: disable=broad-except,protected-access
class ExternalTensorStorage():
"""Passed into graph and node methods to accumulate tensors to save externally"""
def __init__(self):
self.name_to_tensor_data = {}
self.name_counter = 0
self.external_tensor_size_threshold = 1024
self.node_to_modified_value_attr = {}
class Node(object):
"""A Node - wrapper around onnx nodes that we use for graph manipulations."""
def __init__(self, node, graph, skip_conversion=False):
"""Create Node.
Args:
node: Onnx node in NodeProto
graph: Graph() we are part of
"""
self._op = node
self.graph = graph
self._input = list(node.input)
self._output = list(node.output)
self._attr = {}
graph.set_node_by_name(self)
# dict to original attributes
for a in node.attribute:
self._attr[a.name] = a
self._skip_conversion = skip_conversion
@property
def input(self):
return self._input
@input.setter
def input(self, val):
# The setter can catch that all inputs are change
# but it cannot catch that one input is changed.
# That's method replace_input and replace_inputs must
# be used to change inputs to let the graph instance
# update its internal indices.
self._input = copy.deepcopy(val)
@property
def output(self):
return self._output
@output.setter
def output(self, val):
"""Set op output. Output should be updated explicitly,
changing it would require output mapping changed.
"""
self._graph_check()
for o in self._output:
del self.graph._output_to_node_name[o]
self._output = val.copy()
for o in self._output:
utils.make_sure(o not in self.graph._output_to_node_name, "output %s already in output mapping", o)
self.graph._output_to_node_name[o] = self.name
@property
def inputs(self):
"""Input node objects."""
self._graph_check()
val = [self.graph.get_node_by_output(n) for n in self._input]
return val
@property
def attr(self):
return self._attr
def get_value_attr(self, external_tensor_storage=None):
"""Return onnx attr for value property of node.
Attr is modified to point to external tensor data stored in external_tensor_storage, if included.
"""
a = self._attr["value"]
if external_tensor_storage is not None and self in external_tensor_storage.node_to_modified_value_attr:
return external_tensor_storage.node_to_modified_value_attr[self]
if external_tensor_storage is None or a.type != AttributeProto.TENSOR:
return a
if np.product(a.t.dims) > external_tensor_storage.external_tensor_size_threshold:
a = copy.copy(a)
tensor_name = self.name.strip() + "_" + str(external_tensor_storage.name_counter)
for c in '~"#%&*:<>?/\\{|}':
tensor_name = tensor_name.replace(c, '_')
external_tensor_storage.name_counter += 1
external_tensor_storage.name_to_tensor_data[tensor_name] = a.t.raw_data
external_tensor_storage.node_to_modified_value_attr[self] = a
a.t.raw_data = b''
a.t.ClearField("raw_data")
location = a.t.external_data.add()
location.key = "location"
location.value = tensor_name
a.t.data_location = TensorProto.EXTERNAL
return a
def get_onnx_attrs(self, external_tensor_storage=None):
"""Return onnx valid attributes.
Attrs point to external tensor data stored in external_tensor_storage, if included."""
schema = get_schema(self.type, self.graph.opset, self.domain)
if schema is None and not (self.is_const() or self.is_graph_input()):
logger.debug("Node %s uses non-stardard onnx op <%s, %s>, skip attribute check",
self.name, self.domain, self.type)
onnx_attrs = {}
for a in self._attr.values():
if a.name == "value":
onnx_attrs[a.name] = self.get_value_attr(external_tensor_storage)
elif schema is None or schema.has_attribute(a.name):
onnx_attrs[a.name] = a
return onnx_attrs
@property
def name(self):
return self._op.name
def child_name(self):
return utils.make_name(self.name)
@property
def op(self):
"""TODO: have a better interface for this."""
return self._op
@property
def type(self):
"""Return Op type."""
return self._op.op_type
@type.setter
def type(self, val):
"""Set Op type."""
self._op.op_type = val
@property
def domain(self):
"""Return Op type."""
return self._op.domain
@domain.setter
def domain(self, val):
"""Set Op type."""
self._op.domain = val
@property
def data_format(self):
"""Return data_format."""
attr_str = self.get_attr_value("data_format")
return "unkown" if attr_str is None else attr_str.decode("utf-8")
@data_format.setter
def data_format(self, val):
"""Set data_format."""
self.set_attr("data_format", val)
def is_nhwc(self):
"""Return True if node is in NHWC format."""
utils.make_sure('D' not in self.data_format, "is_nhwc called on %s with spatial=2 but data_format=%s",
self.name, self.data_format)
return self.data_format == "NHWC"
def is_const(self):
"""Return True if node is a constant."""
return self.type in ["Const", "ConstV2"]
def is_scalar(self):
"""Return True if node is a constant with a scalar value."""
if not self.is_const():
return False
t = self.get_attr("value", default=None)
if t is None:
return False
t = numpy_helper.to_array(helper.get_attribute_value(t))
return t.shape == tuple()
def is_graph_input(self):
return self.type in ["Placeholder", "PlaceholderWithDefault", "PlaceholderV2"]
def is_graph_input_default_const(self):
return self.is_const() and any(
out.is_graph_input() for out in self.graph.find_output_consumers(self.output[0])
)
def is_while(self):
return self.type in ["While", "StatelessWhile", "Loop"]
def __str__(self):
return str(self._op)
def __repr__(self):
return "<onnx op type='%s' name=%s>" % (self.type, self._op.name)
@property
def summary(self):
"""Return node summary information."""
lines = []
lines.append("OP={}".format(self.type))
lines.append("Name={}".format(self.name))
g = self.graph
if self.input:
lines.append("Inputs:")
for name in self.input:
node = g.get_node_by_output(name)
op = node.type if node else "N/A"
lines.append("\t{}={}, {}, {}".format(name, op, g.get_shape(name), g.get_dtype(name)))
if self.output:
for name in self.output:
lines.append("Outpus:")
lines.append("\t{}={}, {}".format(name, g.get_shape(name), g.get_dtype(name)))
return '\n'.join(lines)
def get_attr(self, name, default=None):
"""Get raw attribute value."""
attr = self.attr.get(name, default)
return attr
def get_attr_value(self, name, default=None):
attr = self.get_attr(name)
if attr:
return helper.get_attribute_value(attr)
return default
def get_attr_int(self, name):
"""Get attribute value as int."""
attr_int = self.get_attr_value(name)
utils.make_sure(
attr_int is not None and isinstance(attr_int, int),
"attribute %s is None", name
)
return attr_int
def get_attr_str(self, name, encoding="utf-8"):
"""Get attribute value as string."""
attr_str = self.get_attr_value(name)
utils.make_sure(
attr_str is not None and isinstance(attr_str, bytes),
"attribute %s is None", name
)
return attr_str.decode(encoding)
def set_attr(self, name, value):
self.attr[name] = helper.make_attribute(name, value)
def set_attr_onnx(self, value):
self.attr[value.name] = value
@property
def skip_conversion(self):
return self._skip_conversion
@skip_conversion.setter
def skip_conversion(self, val):
self._skip_conversion = val
# If some Node is created as onnx_node, then we don't need convert it
def need_skip(self):
return self._skip_conversion
@property
def output_shapes(self):
"""Get output shapes."""
self._graph_check()
val = [self.graph.get_shape(n) for n in self._output]
return val
@property
def output_dtypes(self):
"""Get output dtypes."""
self._graph_check()
val = [self.graph.get_dtype(n) for n in self._output]
return val
def get_tensor_value(self, as_list=True):
"""Get value for onnx tensor.
Args:
as_list: whether return numpy ndarray in list.
Returns:
If as_list=True, return the array as a (possibly nested) list.
Otherwise, return data of type np.ndarray.
If a tensor is a scalar having value 1,
when as_list=False, return np.array(1), type is <class 'numpy.ndarray'>
when as_list=True, return 1, type is <class 'int'>.
"""
if not self.is_const():
raise ValueError("get tensor value: '{}' must be Const".format(self.name))
t = self.get_attr("value")
if t:
t = numpy_helper.to_array(helper.get_attribute_value(t))
if as_list is True:
t = t.tolist() # t might be scalar after tolist()
return t
def scalar_to_dim1(self):
"""Get value for onnx tensor."""
if not self.is_const():
raise ValueError("get tensor value: {} must be Const".format(self.name))
t = self.get_attr("value")
if t:
t = helper.get_attribute_value(t)
if not t.dims:
t.dims.extend([1])
return t.dims
def set_tensor_value(self, new_val):
"""Set new value for existing onnx tensor.
Args:
new_val: value of type numpy ndarray
"""
if not self.is_const():
raise ValueError("set tensor value: {} must be Const".format(self.name))
t = self.get_attr("value")
if not t:
raise ValueError("set tensor value: {} is None".format(self.name))
t = helper.get_attribute_value(t)
onnx_tensor = numpy_helper.from_array(new_val, t.name)
del t
self.set_attr("value", onnx_tensor)
# track shapes in _output_shapes
self._graph_check()
self.graph.set_shape(onnx_tensor.name, list(onnx_tensor.dims))
def get_body_graphs(self):
self._graph_check()
return self.graph.contained_graphs.get(self.name, None)
def set_body_graph_as_attr(self, attr_name, graph):
self._graph_check()
if self.name not in self.graph.contained_graphs:
self.graph.contained_graphs[self.name] = {}
self.graph.contained_graphs[self.name].update({attr_name: graph})
graph.parent_graph = self.graph
def update_proto(self, external_tensor_storage=None):
"""Update protobuf from internal structure."""
nodes = list(self._op.input)
for node in nodes:
self._op.input.remove(node)
self._op.input.extend(self.input)
nodes = list(self._op.output)
for node in nodes:
self._op.output.remove(node)
self._op.output.extend(self.output)
# update attributes to proto
del self._op.attribute[:]
# check attribute of type GraphProto
attr_graphs = self.get_body_graphs()
if attr_graphs:
for attr_name, sub_graph in attr_graphs.items():
graph_proto = sub_graph.make_graph("graph for " + self.name + " " + attr_name,
external_tensor_storage=external_tensor_storage)
self.set_attr(attr_name, graph_proto)
attr = list(self.get_onnx_attrs(external_tensor_storage).values())
if attr:
self._op.attribute.extend(attr)
def get_implicit_inputs(self, recursive=True):
"""Get implicit inputs if the node has attributes being GraphProto."""
output_available_in_cur_graph = set()
all_node_inputs = set()
graphs = []
body_graphs = self.get_body_graphs()
if body_graphs:
graphs.extend(body_graphs.values())
while graphs:
graph = graphs.pop()
for n in graph.get_nodes():
output_available_in_cur_graph |= set(n.output)
for i in n.input:
all_node_inputs.add(i)
if recursive:
b_graphs = n.get_body_graphs()
if b_graphs:
graphs.extend(b_graphs.values())
outer_scope_node_input_ids = all_node_inputs - output_available_in_cur_graph
return list(outer_scope_node_input_ids)
def _graph_check(self):
utils.make_sure(self.graph is not None, "Node %s not belonging any graph",
self.name)
def maybe_cast_input(self, supported, type_map):
""".maybe_cast_input
Args:
supported: list of supported types for inputs
type_map: dict type to supported type mapping
"""
did_cast = False
for i, name in enumerate(self.input):
dtype = self.graph.get_dtype(name)
if dtype not in supported[i]:
tdtype = type_map.get(dtype)
if tdtype is None:
raise RuntimeError("don't know how to cast type {} on node {}".format(dtype, name))
shape = self.graph.get_shape(name)
cast_node = self.graph.insert_new_node_on_input(
self, "Cast", name, to=tdtype)
self.graph.set_dtype(cast_node.output[0], tdtype)
self.graph.set_shape(cast_node.output[0], shape)
did_cast = True
return did_cast
class Graph(object):
""""Class that provides graph manipulation and matching."""
def __init__(self, nodes, output_shapes=None, dtypes=None, target=None, opset=None, extra_opset=None,
input_names=None, output_names=None, is_subgraph=False, graph_name=None):
"""Create Graph.
Args:
nodes: list of Node()
output_shapes: dict of tensorflow output shapes
dtypes: dict of tensorflow dtype
"""
if target is None:
target = []
self._nodes = []
self._nodes_by_name = {}
self._output_to_node_name = {}
self._output_to_consumers = {}
self._input_to_graph = {}
self.shapes = {}
self.graph_name = graph_name or utils.make_name("tf2onnx")
self._is_subgraph = is_subgraph
self.ta_reads = []
# A list of index, output tuples of potential scan outputs in this graph
# Used by the tflite while loop handler
self.scan_outputs = []
self.func_inputs = []
self.ragged_variant_list_reads = []
self.ragged_variant_list_writes = []
self._target = set(target)
self._dtypes = dtypes
self._output_shapes = output_shapes
self._opset = find_opset(opset)
if extra_opset is not None:
utils.make_sure(isinstance(extra_opset, list), "invalid extra_opset")
self._extra_opset = extra_opset
self.outputs = output_names if output_names is not None else []
self.parent_graph = None
self.contained_graphs = {} # {node_name: {node_attribute_name: Graph}}
ops = [Node(node, self) for node in nodes]
if input_names is not None:
input_names_set = set(input_names)
for n in ops:
for i, out in enumerate(n.output):
if out in input_names_set and not n.is_graph_input():
n.output[i] = utils.make_name("@@ALLOC")
ops.append(Node(helper.make_node("Placeholder", [], outputs=[out], name=out), self))
logger.info("Created placeholder for input %s", out)
input_nodes = {n.output[0]: n for n in ops if n.is_graph_input()}
if input_names is not None:
self.inputs = [input_nodes[n] for n in input_names]
else:
self.inputs = list(input_nodes.values())
self.reset_nodes(ops)
if not is_subgraph:
# add identity node after each output, in case it is renamed during conversion.
for o in self.outputs:
n = self.get_node_by_output_in_current_graph(o)
if n.is_graph_input():
# Don't add identity if the node is also an input. We want to keep input names the same.
continue
new_output_name = port_name(n.name + "_" + utils.make_name("raw_output_"))
n_shapes = n.output_shapes
n_dtypes = n.output_dtypes
body_graphs = n.graph.contained_graphs.pop(n.name, None)
self.remove_node(n.name)
new_outputs = [output if output != o else new_output_name for output in n.output]
# domain should be passed to new node
branches = {}
if body_graphs:
for attr_name, body_graph in body_graphs.items():
body_graph.parent_graph = self
branches[attr_name] = body_graph
_ = self.make_node(n.type, n.input, outputs=new_outputs, attr=n.attr, name=n.name,
skip_conversion=n._skip_conversion, dtypes=n_dtypes, shapes=n_shapes,
domain=n.domain, branches=branches)
self.replace_all_inputs(o, new_output_name, ops=self.get_nodes())
self.make_node("Identity", [new_output_name], outputs=[o], op_name_scope=n.name + "_" + "graph_outputs")
self.copy_shape(new_output_name, o)
self.copy_dtype(new_output_name, o)
def create_new_graph_with_same_config(self):
"""Create a clean graph inheriting current graph's configuration."""
return Graph([], output_shapes={}, dtypes={}, target=self._target, opset=self._opset,
extra_opset=self.extra_opset, output_names=[])
@property
def input_names(self):
"""Placeholder node outputs"""
return [node.output[0] for node in self.inputs]
@property
def opset(self):
return self._opset
@property
def extra_opset(self):
return self._extra_opset
def is_target(self, *names):
"""Return True if target platform contains any name."""
return any(name in self._target for name in names)
def make_consts(self, values, np_type=np.int64, skip_conversion=False, raw=True):
"""create list of consts of same type"""
consts = []
for value in values:
np_val = np.array(value).astype(np_type)
consts.append(self.make_const(utils.make_name("const"), np_val, skip_conversion, raw))
return consts
def make_const(self, name, np_val, skip_conversion=False, raw=True):
"""Make a new constant in the graph.
Args:
name: const node name, must be unique.
np_val: value of type numpy ndarray.
skip_conversion: bool, indicate whether this created node would be mapped during conversion.
raw: whether to store data at field of raw_data or the specific field according to its dtype
"""
np_val_flat = np_val.flatten()
is_bytes = np_val.dtype == np.object and len(np_val_flat) > 0 and isinstance(np_val_flat[0], bytes)
if raw and not is_bytes:
onnx_tensor = numpy_helper.from_array(np_val, name)
else:
onnx_tensor = helper.make_tensor(name, utils.map_numpy_to_onnx_dtype(np_val.dtype),
np_val.shape, np_val_flat, raw=False)
dtype = onnx_tensor.data_type
node = self.make_node("Const", [], outputs=[name], name=name, attr={"value": onnx_tensor},
skip_conversion=skip_conversion, dtypes=[dtype], infer_shape_dtype=False)
self.set_shape(name, np_val.shape)
self.set_dtype(name, utils.map_numpy_to_onnx_dtype(np_val.dtype))
return node
def copy_const(self, node, name=None):
"""Copy a const node, using name if specified"""
# TODO: support attr copy starting at opset 12
if name is None:
name = utils.make_name(node.name)
return self.make_const(name, node.get_tensor_value(as_list=False))
def make_node(self, op_type, inputs, attr=None, output_count=1, outputs=None, skip_conversion=True,
op_name_scope=None, name=None, shapes=None, dtypes=None, domain=constants.ONNX_DOMAIN,
infer_shape_dtype=True, branches=None):
"""Make a new onnx node in the graph"""
if attr is None:
attr = {}
if shapes is None:
shapes = []
if dtypes is None:
dtypes = []
if branches is None:
branches = {}
if name is None:
name = utils.make_name(op_type)
if op_name_scope:
name = "_".join([op_name_scope, name])
logger.debug("Making node: Name=%s, OP=%s", name, op_type)
if outputs is None:
outputs = [name + ":" + str(i) for i in range(output_count)]
output_count = len(outputs)
raw_attr = {}
onnx_attrs = []
for a, v in attr.items():
if isinstance(v, AttributeProto):
onnx_attrs.append(v)
else:
raw_attr[a] = v
n = self.get_node_by_name(name)
utils.make_sure(n is None, "name %s already exists in node: \n%s", name, n)
for o in outputs:
n = self.get_node_by_output_in_current_graph(o)
utils.make_sure(n is None, "output tensor named %s already exists in node: \n%s", o, n)
onnx_node = helper.make_node(op_type, inputs, outputs, name=name, domain=domain, **raw_attr)
for name2 in onnx_node.input:
self._register_input_name(name2, onnx_node)
if op_type in ["If", "Loop", "Scan"]:
# we force the op containing inner graphs not skipped during conversion.
skip_conversion = False
node = Node(onnx_node, self, skip_conversion=skip_conversion)
if onnx_attrs:
_ = [node.set_attr_onnx(a) for a in onnx_attrs]
for branch, body in branches.items():
node.set_body_graph_as_attr(branch, body)
if shapes:
utils.make_sure(len(shapes) == output_count,
"output shape count %s not equal to output count %s", len(shapes), output_count)
for i in range(output_count):
self.set_shape(node.output[i], shapes[i])
if dtypes:
utils.make_sure(len(dtypes) == output_count,
"output dtypes count %s not equal to output count %s", len(dtypes), output_count)
for i in range(output_count):
self.set_dtype(node.output[i], dtypes[i])
if (not shapes or not dtypes) and infer_shape_dtype:
self.update_node_shape_dtype(node, override=False)
logger.debug("Made node: %s\n%s", node.name, node.summary)
self._nodes.append(node)
return node
def append_node(self, node):
"Add a node to the graph."
output_shapes = node.output_shapes
output_dtypes = node.output_dtypes
node.graph = self
self._nodes.append(node)
self._nodes_by_name[node.name] = node
for i, name in enumerate(node.output):
self._output_to_node_name[name] = node.name
self.set_dtype(name, output_dtypes[i])
self.set_shape(name, output_shapes[i])
for name in node.input:
self._register_input_name(name, node)
def remove_node(self, node_name):
"""Remove node in current graph."""
utils.make_sure(node_name in self._nodes_by_name, "node %s not in current graph, cannot remove", node_name)
node = self.get_node_by_name(node_name)
del self._nodes_by_name[node_name]
if node_name in self.contained_graphs:
del self.contained_graphs[node_name]
if node in self.inputs:
self.inputs.remove(node)
for op_output in node.output:
if op_output == "":
continue
del self._output_to_node_name[op_output]
if op_output in self._output_shapes:
del self._output_shapes[op_output]
if op_output in self._dtypes:
del self._dtypes[op_output]
for op_input in node.input:
if op_input == "":
continue
utils.make_sure(
op_input in self._output_to_consumers,
"Input %r of node %r not found.", op_input, node_name)
self._unregister_input_name(op_input, node)
self._nodes.remove(node)
node.graph = None
def reset_nodes(self, ops):
"""Reset the graph with node list."""
remained_dtypes = {}
remained_shapes = {}
remained_sub_graphs = {}
for op in ops:
for op_output in op.output:
# this check should be removed once we make sure all output tensors have dtype/shape.
if op_output in self._dtypes:
remained_dtypes[op_output] = self._dtypes[op_output]
if op_output in self._output_shapes:
remained_shapes[op_output] = self._output_shapes[op_output]
if op.name in self.contained_graphs:
remained_sub_graphs[op.name] = self.contained_graphs[op.name]
self._nodes = ops
self.contained_graphs = remained_sub_graphs
self._nodes_by_name = {op.name: op for op in ops}
self._output_to_node_name = {}
self._output_to_consumers = {}
for op in ops:
for op_output in op.output:
self._output_to_node_name[op_output] = op.name
inps = op.input
for op_input in inps:
self._register_input_name(op_input, op)
for n in self.inputs:
if n not in ops:
raise ValueError("graph input " + n + " not exist")
for o in self.outputs:
if o not in self._output_to_node_name:
raise ValueError("graph output " + o + " not exist")
self._dtypes = remained_dtypes
self._output_shapes = remained_shapes
def is_empty_input(self, name):
# in ONNX, operation may have optional input and an empty string may be used
# in the place of an actual argument's name to indicate a missing argument
return name == utils.ONNX_EMPTY_INPUT
def check_integrity(self):
"""
Check graph integrity. Every node's input needs to associate with a node.
Return broken outputs.
"""
broken_outputs = set()
for node in self.get_nodes():
for inp in node.input:
if self.get_node_by_output(inp) is None and not self.is_empty_input(inp):
broken_outputs.add(inp)
return list(broken_outputs)
def update_node_shape_dtype(self, node, override=False):
"""Try the best to infer shapes and dtypes for outputs of the node,
by default, we respect TF shapes and dtypes.
"""
if node.is_const() or node.is_graph_input():
return
# NOTE: only support onnx node for now
if not utils.is_onnx_domain(node.domain):
return
logger.debug("Infer shape and dtype for [%s]", node.name)
# NOTE: shape inference for some ops need the input values of the op, e.g., Reshape
# op needs the "Shape" value to infer output shape.
initializers = []
for i, inp in enumerate(node.inputs):
if inp is None:
if not self.is_empty_input(node.input[i]):
if logger.isEnabledFor(logging.INFO):
logger.warning(
"[%s] infer a inexistent node: [%s], please check the code",
node.name, node.input[i]
)
continue
if inp.is_const():
t = inp.get_attr("value")
tensor = helper.get_attribute_value(t)
tensor.name = inp.output[0]
initializers.append(tensor)
input_shapes = [self.get_shape(i) for i in node.input]
input_dtypes = [self.get_dtype(i) for i in node.input]
shapes, dtypes = infer_onnx_shape_dtype(node, self._opset, input_shapes, input_dtypes, initializers)
if not shapes or not dtypes:
return
for output, shape, dtype in zip(node.output, shapes, dtypes):
if dtype == TensorProto.UNDEFINED:
logger.debug("Inferred dtype for [%s, type: %s] is UNDEFINED, SKIP", node.name, node.type)
else:
existing_dtype = self.get_dtype(output)
if existing_dtype is not None and existing_dtype != dtype and not override:
dtype = existing_dtype
self.set_dtype(output, dtype)
logger.debug("Set dtype of [%s] to %s", output, dtype)
if shape is None:
logger.debug("Inferred shape for [%s, type: %s] is None, SKIP", node.name, node.type)
else:
existing_shape = self.get_shape(output)
if existing_shape is not None and not utils.are_shapes_equal(existing_shape, shape) and not override:
shape = existing_shape
self.set_shape(output, shape)
logger.debug("Set shape of [%s] to %s", output, shape)
def update_proto(self, external_tensor_storage=None):
"""Update the onnx protobuf from out internal Node structure."""
for node in self._nodes:
node.update_proto(external_tensor_storage)
def get_nodes(self):
"""Get node list."""
return self._nodes
def get_node_by_output(self, output, search_in_parent_graphs=True):
"""Get node by node output id recursively going through nested graphs.
Args:
search_in_parent_graphs: search in all parent graphs
"""
ret = None
g = self
while not ret and g:
ret = g.get_node_by_output_in_current_graph(output)
if ret:
return ret
if not search_in_parent_graphs:
break
g = g.parent_graph
return ret
def get_node_by_output_in_current_graph(self, output):
"""Get node by node output id."""
name = self._output_to_node_name.get(output)
ret = None
if name:
ret = self._nodes_by_name.get(name)
return ret
def get_node_by_name(self, name):
"""Get node by name."""
ret = self._nodes_by_name.get(name)
return ret
def set_node_by_name(self, node):
"""Set node by name."""
self._nodes_by_name[node.name] = node
for op_output in node.output:
self._output_to_node_name[op_output] = node.name
for name in node.input:
self._register_input_name(name, node)
def is_const(self, output):
return self.get_node_by_output(output).is_const()
def get_tensor_value(self, output, as_list=True):
return self.get_node_by_output(output).get_tensor_value(as_list)
def change_node_name(self, node, new_name):
"""Remove node in current graph."""
utils.make_sure(new_name not in self._nodes_by_name, "node %s not unique ", new_name)
dtypes = node.output_dtypes
shapes = node.output_shapes
self.remove_node(node.name)
new_node = self.make_node(node.type, node.input, output_count=len(node.output),
attr=node.attr, dtypes=dtypes, shapes=shapes, name=new_name)
for i, old_output in enumerate(node.output):
new_output = port_name(new_name, i)
for j, k in enumerate(self.outputs):
if k == old_output:
self.outputs[j] = new_output
break
self.replace_all_inputs(old_output, new_output, ops=self.get_nodes())
return new_node
def add_graph_input(self, name, dtype=None, shape=None):
"""Add placeholder node as graph's input. Order matters only for subgraph.
Placeholders in original graph are assumed for main graph, order not matters.
"""
if dtype is None:
dtype = self.get_dtype(name)
if shape is None:
shape = self.get_shape(name)
new_node = self.make_node("Placeholder", [], outputs=[name], dtypes=[dtype], shapes=[shape])
self.inputs.append(new_node)
def add_graph_input_with_default(self, name, default_const, dtype=None, shape=None):
"""Add placeholderwithdefault."""
if dtype is None:
dtype = self.get_dtype(name)
if shape is None:
shape = self.get_shape(name)
default_const_name = port_name(make_name("{}_default".format(name)))
default_const.output = [default_const_name]
new_node = self.make_node("PlaceholderWithDefault", [default_const_name], outputs=[name],
dtypes=[dtype], shapes=[shape])
self.inputs.append(new_node)
def add_graph_output(self, name, dtype=None, shape=None):
"""Add node output as graph's output."""
utils.make_sure(name in self._output_to_node_name, "output %s not exist in the graph", name)
if dtype is None:
dtype = self.get_dtype(name)
if shape is None:
shape = self.get_shape(name)
if name not in self.outputs:
utils.make_sure(shape is not None, "shape for output %s should not be None", name)
utils.make_sure(dtype is not None, "dtype for output %s should not be None", name)
self.outputs.append(name)
self.set_shape(name, shape)
self.set_dtype(name, dtype)
else:
raise ValueError("graph output " + name + " already exists")
def get_dtype(self, name):
"""Get dtype for node."""
node = self.get_node_by_output(name, search_in_parent_graphs=True)
return node.graph._dtypes.get(name) if node else None
def set_dtype(self, name, dtype):
"""Set dtype for node."""
node = self.get_node_by_output(name, search_in_parent_graphs=True)
node.graph._dtypes[name] = dtype
def copy_dtype(self, src_name, dst_name):
"""Copy dtype from another node."""
dtype = self.get_dtype(src_name)
self.set_dtype(dst_name, dtype)
def get_shape(self, name):
"""Get shape for node."""
utils.make_sure(isinstance(name, six.text_type), "get_shape name is invalid type: %s", name)
node = self.get_node_by_output(name, search_in_parent_graphs=True)
shape = node.graph._output_shapes.get(name) if node else None
if shape:
for i, v in enumerate(shape):
if v is None:
# pylint: disable=unsupported-assignment-operation
shape[i] = -1
# hack to allow utils.ONNX_UNKNOWN_DIMENSION to override batchsize if needed.
# default is -1.
if shape[0] == -1:
# pylint: disable=unsupported-assignment-operation
shape[0] = utils.ONNX_UNKNOWN_DIMENSION
return shape
return shape
def get_rank(self, name):
"""Returns len(get_shape(name)) or None if shape is None"""
shape = self.get_shape(name)
if shape is None:
return None
return len(shape)
def set_shape(self, name, val):
"""Set new shape of node."""
if isinstance(val, np.ndarray):
val = val.tolist()
if isinstance(val, tuple):
val = list(val)
node = self.get_node_by_output(name, search_in_parent_graphs=True)
utils.make_sure(node is not None, "cannot find node by output id %s", name)
node.graph._output_shapes[name] = val
def copy_shape(self, input_name, output_name):
"""Copy shape from another node."""
shape = self.get_shape(input_name)
# assert shape is not None
if shape is not None:
self.set_shape(output_name, shape)
def topological_sort(self, ops):
"""Topological sort of graph."""
# sort by name, the result will be reversed alphabeta
ops.sort(key=lambda op: op.name)
def _push_stack(stack, node, in_stack):
stack.append(node)
if node in in_stack:
raise ValueError('Graph has cycles, node=' + ops[node].name)
in_stack[node] = True
def _get_unvisited_child(g, node, not_visited):
for child in g[node]:
if child in not_visited:
return child
return -1
n = len(ops)
g = [[] for _ in range(n)]
op_name_to_index = {}
for i, op in enumerate(ops):
op_name_to_index[op.name] = i
for i, op in enumerate(ops):
all_input = set(op.input)
implicit_inputs = op.get_implicit_inputs()
all_input |= set(implicit_inputs)
# remove those empty inputs
all_input = list(filter(lambda a: a != '', all_input))
for inp in sorted(all_input):
j = self.get_node_by_output(inp)
utils.make_sure(j is not None, "Cannot find node with output %r in graph %r", inp, self.graph_name)
if self.parent_graph and j.name not in op_name_to_index:
# there might be some outer-scoped inputs for an inner Graph.
pass
else:
g[op_name_to_index[j.name]].append(i)
# label for each op. highest = sink nodes.
label = [-1 for _ in range(n)]
stack = []
in_stack = dict()
not_visited = dict.fromkeys(range(n))
label_counter = n - 1
while not_visited:
node = list(not_visited.keys())[0]
_push_stack(stack, node, in_stack)
while stack:
node = _get_unvisited_child(g, stack[-1], not_visited)
if node != -1:
_push_stack(stack, node, in_stack)
else:
node = stack.pop()
in_stack.pop(node)
not_visited.pop(node)
label[node] = label_counter
label_counter -= 1
ret = [x for _, x in sorted(zip(label, ops))]
self.reset_nodes(ret)
def make_graph(self, doc, graph_name=None, external_tensor_storage=None):
"""
Create GraphProto for onnx from internal graph.
Args:
optimize: optimize graph via onnx
doc: text for doc string of the graph
"""
graph_name = graph_name or self.graph_name
self.delete_unused_nodes(self.outputs)
self.topological_sort(self.get_nodes())
self.update_proto(external_tensor_storage)
# TODO: we'd want to do something like this so that transpose optimizer is active
# for all (unit) tests
# if optimize:
# from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer
# optimizer = TransposeOptimizer(self, False)
# optimizer.optimize()
ops = []
const_ops = []
graph_inputs = self.inputs.copy()
for op in self.get_nodes():
if op.is_const():
const_ops.append(op)
elif op.is_graph_input():
if op not in graph_inputs:
graph_inputs.append(op)
else:
ops.append(op)
# create initializers for placeholder with default nodes
initializers = []
placeholder_default_const_ops = []
for op in graph_inputs:
if op.type == "PlaceholderWithDefault":
utils.make_sure(op.inputs[0] is not None, "Cannot find node with output {}".format(op.input[0]))
utils.make_sure(op.inputs[0].is_const(),
"non-const default value for PlaceholderWithDefault node '%s' is not supported. "
"Use the --use_default or --ignore_default flags to convert this node.", op.name)
# copy the tensor value, set its name to current node's output, add as initializer
value = op.inputs[0].get_tensor_value(as_list=False)
tensor = numpy_helper.from_array(value, op.output[0])
initializers.append(tensor)
placeholder_default_const_ops.append(op.inputs[0])
# create initializers for constant nodes
const_ops = [op for op in const_ops if op not in placeholder_default_const_ops]
for op in const_ops:
# not to use numpy_helper.from_array to create a new tensor
# because sometimes onnx will have a bug that only check the tensor data in specific field
# such as at upsample it only checks the float_data field.
t = op.get_value_attr(external_tensor_storage)
tensor = helper.get_attribute_value(t)
tensor.name = op.output[0]
initializers.append(tensor)
# create input_tensor_values
input_ids = [op.output[0] for op in graph_inputs]
# onnx with IR version below 4 requires initializer should be in inputs.
# here we check opset version rather than IR version for the reason:
# https://github.com/onnx/tensorflow-onnx/pull/557
# opset 9 come with IR 4.
if self.opset < 9:
input_ids += [op.output[0] for op in const_ops]
input_tensor_values = self.make_onnx_graph_io(input_ids)
# create output_tensor_values
output_tensor_values = self.make_onnx_graph_io(self.outputs)
# create graph proto
graph = helper.make_graph([op.op for op in ops],
graph_name,
input_tensor_values,
output_tensor_values,
initializer=initializers,
doc_string=doc)
return graph
def make_model(self, graph_doc, optimize=False, graph_name="tf2onnx", external_tensor_storage=None, **kwargs):
"""
Create final ModelProto for onnx from internal graph.
Args:
optimize: optimize graph via onnx
doc: text for doc string of the model
"""
graph = self.make_graph(graph_doc, graph_name, external_tensor_storage)
if "producer_name" not in kwargs:
kwargs = {"producer_name": "tf2onnx",
"producer_version": __version__}
if "opset_imports" not in kwargs:
opsets = []
imp = OperatorSetIdProto()
imp.version = self._opset
opsets.append(imp)
if self.extra_opset is not None:
opsets.extend(self.extra_opset)
kwargs["opset_imports"] = opsets
model_proto = helper.make_model(graph, **kwargs)
utils.make_sure(self.opset in constants.OPSET_TO_IR_VERSION,
"Opset %s is not supported yet. Please use a lower opset" % self.opset)
# set the IR version based on opset
try:
model_proto.ir_version = constants.OPSET_TO_IR_VERSION.get(self.opset, model_proto.ir_version)
except: # pylint: disable=bare-except
logger.error("ir_version override failed - install the latest onnx version")
# optimize the model proto.
# TODO: this is disabled by default because of bugs in fuse_consecutive_transposes
if optimize:
model_proto = optimizer.optimize(model_proto)
return model_proto
def make_onnx_graph_io(self, ids):
"""Create tensor_value_info for passed input/output ids."""
tensor_value_infos = []
for name in ids:
dtype = self.get_dtype(name)
shape = self.get_shape(name)
utils.make_sure(dtype is not None, "missing output dtype for " + name)
# TODO: allow None output shape or not? e.g. shape=(?,)
#utils.make_sure(shape is not None, "missing output shape for " + name)
if shape is None: logger.warning("missing output shape for %s", name)
v = utils.make_onnx_inputs_outputs(name, dtype, shape)
tensor_value_infos.append(v)
return tensor_value_infos
def dump_graph(self):
"""Dump graph with shapes (helpful for debugging)."""
for node in self.get_nodes():
input_names = ["{}{}".format(n, self.get_shape(n)) for n in node.input]
logger.debug("%s %s %s %s",
node.type,
self.get_shape(node.output[0]),
node.name,
", ".join(input_names))
def follow_inputs(self, node, num, space=""):
"""Follow inputs for (helpful for debugging)."""
val = []
top = space == ""
if num == 0:
return []
val.append("{}{} {} {}".format(space, node.type, node.name, self.get_shape(port_name(node.name))))
space += " "
for j in node.inputs:
val.extend(self.follow_inputs(j, num - 1, space))
if top:
print("\n".join(reversed(val)))
print()
return []
return val
def dump_node_statistics(self):
op_cnt = collections.Counter()
for n in self.get_nodes():
op_cnt[n.type] += 1
body_graphs = n.get_body_graphs()
if body_graphs:
for b_g in body_graphs.values():
op_cnt += b_g.dump_node_statistics()
return op_cnt
def remove_input(self, node, to_be_removed, input_index=None):
"""Remove input from Node.
Args:
node: the node we expect the input on
to_be_removed: the node name we want to remove
input_index: if not None, index of the input to be removed,
the method is more efficient if *input_index* is specified,
otherwise, it has to look for every input named *old_input*.
"""
assert isinstance(node, Node) and isinstance(to_be_removed, six.text_type)
if input_index is not None:
assert node.input[input_index] == to_be_removed
if node.input[input_index] in self._output_to_consumers:
to_ops = self._output_to_consumers[node.input[input_index]]
if node.name in to_ops:
to_ops.remove(node.name)
del node.input[input_index]
return
for i, name in enumerate(node.input):
if name == to_be_removed:
utils.make_sure(
node.input.count(node.input[i]) <= 1,
"Node %r takes multiple times the same input %r. This case is not handled.",
node.name, node.input[i])
self._unregister_input_name(node.input[i], node)
del node.input[i]
break
# don't remove output from parent since others might depend on it
def insert_new_node_on_input(self, node, op_type, input_name, name=None, domain=None, input_index=None, **kwargs):
"""Create and insert a new node into the graph.
Args:
node: we want to replace the input for this node
op_type: type for new operation
input_name: the name(s) of the outputs above us
if scalar, new node placed above input_name
if list, new node placed above input_name[0]. list is inputs into new node
name: the name of the new op
kwargs: attributes of the new node
Returns:
node that was inserted
"""
if name is None:
name = utils.make_name(node.name)
new_output = port_name(name)
if not isinstance(input_name, list):
input_name = [input_name]
new_node = self.make_node(op_type, input_name, attr=kwargs, outputs=[new_output], name=name, domain=domain)
if input_index is None:
for i, n in enumerate(node.input):
if n == input_name[0]:
self.replace_input(node, node.input[i], new_output, i)
break
else:
self.replace_input(node, node.input[input_index], new_output, input_index)
return new_node
def insert_node_on_output(self, node, output_name=None):
"""
The inserted node takes the *output_name* as input and produces a
new output. The function goes through every node taking *output_name*
and replaces it by the new output name.
"""
if output_name is None:
output_name = node.input[0]
new_output = node.output[0]
to_replace = [self.get_node_by_name(n) for n in self._output_to_consumers[output_name]]
to_replace = [n for n in to_replace if n != node]
self.replace_all_inputs(output_name, new_output, ops=to_replace)
return node
def insert_new_node_on_output(self, op_type, output_name=None, name=None, inputs=None, domain=None, **kwargs):
"""Create and insert a new node into the graph.
It then calls insert_node_on_output.
Args:
op_type: type for new operation
output_name: the names of the outputs above us
name: the name of the new op
kwargs: attributes of the new node
Returns:
node that was inserted
"""
utils.make_sure(isinstance(output_name, six.text_type), "output_name's type is not expected: %s",
type(output_name))
utils.make_sure(isinstance(op_type, six.text_type), "op_type's type is not expected: %s",
type(op_type))
utils.make_sure(output_name is not None, "output_name cannot be None for op_type=%r.", op_type)
if inputs is None:
inputs = [output_name]
if name is None:
name = utils.make_name(op_type)
new_output = port_name(name)
new_node = self.make_node(op_type, inputs, attr=kwargs, outputs=[new_output], name=name, domain=domain)
return self.insert_node_on_output(new_node, output_name)
def find_output_consumers(self, output_name):
"""Find all nodes consuming a given output."""
if output_name in self._output_to_consumers:
ops = self._output_to_consumers[output_name]
ops = [self.get_node_by_name(n) for n in ops]
else:
ops = [] # self.get_nodes()
nodes = []
for node in ops:
if node is None:
continue
if output_name in node.input:
nodes.append(node)
# find consumers in sub graphs
if output_name in self._input_to_graph:
for g in self._input_to_graph[output_name].values():
nodes.extend(g.find_output_consumers(output_name))
return nodes
def _register_input_name(self, input_name, node, only_graph=False):
"Register node taking a specific input."
if not only_graph:
if input_name not in self._output_to_consumers:
self._output_to_consumers[input_name] = set()
self._output_to_consumers[input_name].add(node.name)
if self.parent_graph is not None:
if input_name not in self.parent_graph._input_to_graph:
self.parent_graph._input_to_graph[input_name] = {}
self.parent_graph._input_to_graph[input_name][id(self)] = self
self.parent_graph._register_input_name(input_name, node, only_graph=True)
def _unregister_input_name(self, input_name, node, only_graph=False):
"Unregister node taking a specific input."
node_name = node.name
if not only_graph:
if input_name in self._output_to_consumers[input_name]:
if node_name in self._output_to_consumers[input_name]:
self._output_to_consumers[input_name].remove(node_name)
if (self.parent_graph is not None and
input_name in self.parent_graph._input_to_graph and
id(self) in self.parent_graph._input_to_graph[input_name]):
del self.parent_graph._input_to_graph[input_name][id(self)]
self.parent_graph._unregister_input_name(input_name, node, only_graph=True)
def replace_all_inputs(self, old_input, new_input, ops=None):
"""
Replace all inputs pointing to old_input with new_input.
*ops* is used if defined, otherwise `_output_to_consumers`
is used to determine the impacted nodes.
"""
if old_input == new_input:
return
if new_input not in self._output_to_consumers:
self._output_to_consumers[new_input] = set()
if ops is not None:
keep_ops = True
elif old_input in self._output_to_consumers:
ops = list(
filter(lambda a: a is not None,
map(self.get_node_by_name, self._output_to_consumers[old_input])))
keep_ops = False
else:
ops = []
keep_ops = False
for node in ops:
assert node is not None
if old_input in node.input and new_input in node.output:
raise RuntimeError("creating a circle in the graph is not allowed: " + node.name)
self._register_input_name(new_input, node)
for i, input_name in enumerate(node.input):
if input_name == old_input:
self.replace_input(node, node.input[i], new_input, i)
# modify references in sub graphs
if old_input in self._input_to_graph:
for g in self._input_to_graph[old_input].values():
g.replace_all_inputs(old_input, new_input,
ops=g.get_nodes() if keep_ops else None)
def replace_input(self, node, old_input, new_input, input_index=None):
"""
Replace one input in a node.
The method is more efficient if *input_index* is specified.
Otherwise, it renames every output named *old_input*.
"""
assert isinstance(node, Node) and isinstance(old_input, six.text_type) and isinstance(new_input, six.text_type)
is_replaced = False
if input_index is None:
for i, input_name in enumerate(node.input):
if input_name == old_input:
node.input[i] = new_input
is_replaced = True
elif node.input[input_index] == old_input:
node.input[input_index] = new_input
is_replaced = True
else:
raise RuntimeError("Unable to replace input %r into %r for node %r." % (old_input, new_input, node.name))
to_ops = self._output_to_consumers.get(old_input, None)
if to_ops is not None:
if node.name in to_ops:
# A node may take twice the same entry.
to_ops.remove(node.name)
self._register_input_name(new_input, node)
return is_replaced
def replace_inputs(self, node, new_inputs):
"""Replace node inputs."""
assert isinstance(node, Node) and isinstance(new_inputs, list)
for old_input in node.input:
to_ops = self._output_to_consumers.get(old_input, None)
if to_ops is not None and old_input in to_ops:
# To avoid issues when a node
# takes twice the same entry.
to_ops.remove(old_input)
for input_name in new_inputs:
assert isinstance(input_name, six.text_type)
self._register_input_name(input_name, node)
node.input = new_inputs
return True
def _extract_sub_graph_nodes(self, dest_node, input_checker=None):
"""Return nodes of subgraph ending with dest_node.
Args:
dest_node: output node of the subgraph to find
input_checker: customized input check function: bool func(node)
Return:
a set of nodes
"""
res_set = set()
if not dest_node or (input_checker and input_checker(dest_node) is False):
return res_set
processing_set = set([dest_node])
while processing_set:
top_node = processing_set.pop()
res_set.add(top_node)
all_inputs = top_node.input + list(top_node.get_implicit_inputs())
for input_id in all_inputs:
# we don't care about nested graph here, just handle current graph cropping.
node = self.get_node_by_output(input_id, search_in_parent_graphs=False)
if not node:
# some nodes (for example Scan) have optional inputs, which
# might have empty input.
# subgraph might have input defined in outer graph
continue
if node not in res_set:
if input_checker and input_checker(node) is False:
continue
processing_set.add(node)
return res_set
def extract_sub_graph_nodes(self, outputs_name, input_checker=None, remove_unused_inputs=True):
"""Return nodes of subgraph having output_ids as outputs.
Args:
output_ids: output node output id of the subgraph to find
input_checker: customized input check function: bool func(node)
remove_unused_inputs: bool, indicates whether unused placeholder inputs will be removed
in the resulting nodes.
Return:
a list of nodes
"""
res_set = set()
outputs_to_keep = list(outputs_name)
if not remove_unused_inputs:
# add placeholder nodes even if they are not connected to outputs.
# placeholder nodes with defaults can have inputs themselves
outputs_to_keep += [inp.output[0] for inp in self.inputs]
for output in outputs_to_keep:
node = self.get_node_by_output(output, search_in_parent_graphs=False)
res_set = res_set.union(self._extract_sub_graph_nodes(node, input_checker))
return list(res_set)
def delete_unused_nodes(self, outputs_name):
"""Delete nodes not in subgraph ending with output_names."""
if not outputs_name:
logger.debug("Outputs not specified, delete_unused_nodes not taking effect.")
return
# we need keep those placeholders that are used as input of Loop's body graph.
# some of them are not used in the graph, but still need be there to keep the graph complete.
related_nodes = self.extract_sub_graph_nodes(outputs_name, remove_unused_inputs=False)
for node in related_nodes:
attr_body_graphs = node.get_body_graphs()
if attr_body_graphs:
for body_graph in attr_body_graphs.values():
body_graph.delete_unused_nodes(body_graph.outputs)
self.reset_nodes(related_nodes)
def safe_to_remove_nodes(self, to_delete):
""" List of nodes that safe to delete (i.e. outputs not consumed by other nodes.)"""
safe_to_remove = []
delete_set = set(to_delete)
for n in delete_set:
out_consumers = set()
for out in n.output:
out_consumers |= set(self.find_output_consumers(out))
if out_consumers.issubset(delete_set):
safe_to_remove.append(n)
return safe_to_remove
# TODO(tomwildenhain): Remove this function
def safe_remove_nodes(self, to_delete):
"""Delete nodes in `to_delete` without third-party node consuming it."""
delete_set = set(to_delete)
for n in delete_set:
out_consumers = set()
for out in n.output:
out_consumers |= set(self.find_output_consumers(out))
if out_consumers.issubset(delete_set):
self.remove_node(n.name)
def is_safe_to_remove_nodes(self, to_delete, outputs_to_ignore=None):
"""Returns true if the outputs of all the nodes in to_delete have no third-party nodes consuming them"""
delete_set = set(to_delete)
outputs_to_ignore_set = set(outputs_to_ignore or [])
for n in delete_set:
out_consumers = set()
for out in n.output:
if out in outputs_to_ignore_set:
continue
out_consumers |= set(self.find_output_consumers(out))
if not out_consumers.issubset(delete_set):
return False
return True
class GraphUtil(object):
"""Utilities for Graph manipulation."""
@staticmethod
def optimize_graph(graph, catch_errors=True):
return optimizer.optimize_graph(graph, catch_errors)
@staticmethod
def optimize_model_proto(onnx_model_proto, catch_errors=True, return_graph=False):
"""Optimize the model proto, for example: eliminating all useless Transpose pairs.
Returns:
model proto (and possibly graph) after optimization, if optimizer run successfully
or onnx_model_proto, if exceptions happens
"""
try:
kwargs = GraphUtil.get_onnx_model_properties(onnx_model_proto)
graph = GraphUtil.create_graph_from_onnx_model(onnx_model_proto)
graph = GraphUtil.optimize_graph(graph, catch_errors)
model_proto = graph.make_model(onnx_model_proto.graph.doc_string,
graph_name=onnx_model_proto.graph.name, **kwargs)
if onnx_model_proto.metadata_props:
metadata_props = {p.key: p.value for p in onnx_model_proto.metadata_props}
helper.set_model_props(model_proto, metadata_props)
if return_graph:
return model_proto, graph
return model_proto
except Exception as e:
if not catch_errors:
raise e
# sometimes, onnx shape inference will fail for some reason,
# return onnx_model_proto for this case
logger.warning("Failed to optimize model proto", exc_info=1)
if return_graph:
return onnx_model_proto, None
return onnx_model_proto
@staticmethod
def get_onnx_model_properties(onnx_model_proto):
"""Get ModelProto properties"""
kwargs = {}
if onnx_model_proto.HasField('ir_version'):
kwargs["ir_version"] = onnx_model_proto.ir_version
if onnx_model_proto.HasField('producer_name'):
kwargs["producer_name"] = onnx_model_proto.producer_name
if onnx_model_proto.HasField('producer_version'):
kwargs["producer_version"] = onnx_model_proto.producer_version
if onnx_model_proto.HasField('domain'):
kwargs["domain"] = onnx_model_proto.domain
if onnx_model_proto.HasField('model_version'):
kwargs["model_version"] = onnx_model_proto.model_version
if onnx_model_proto.HasField('doc_string'):
kwargs["doc_string"] = onnx_model_proto.doc_string
kwargs["opset_imports"] = onnx_model_proto.opset_import
return kwargs
@staticmethod
def create_graph_from_onnx_model(onnx_model_proto):
"""Create Graph loading onnx model proto."""
# apply shape inference on the model
inferred_model = shape_inference.infer_shapes(onnx_model_proto)
graph_proto = inferred_model.graph
opset_version = None
extra_opset = []
for opset in onnx_model_proto.opset_import:
if not opset.domain:
# domain field is None or empty means it is onnx domain
opset_version = opset.version
else:
extra_opset.append(opset)
utils.make_sure(opset_version is not None, "opset version is not specified for onnx domain")
main_graph = GraphUtil.create_graph_from_onnx_graph(graph_proto, opset_version, extra_opset)
return main_graph
@staticmethod
def create_graph_from_onnx_graph(graph_proto, opset_version=None, extra_opset=None):
"""Create Graph loading onnx graph proto."""
output_shapes = {}
output_dtypes = {}
shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.value_info)
output_shapes.update(shapes)
output_dtypes.update(dtypes)
shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.output)
output_shapes.update(shapes)
output_dtypes.update(dtypes)
nodes_to_append = []
for n in graph_proto.node:
if n.op_type == "Constant":
n.op_type = "Const"
# some pytorch model had empty names - make one up
if not n.name:
n.name = utils.make_name("was_empty")
nodes_to_append.append(n)
output_names = []
for n in graph_proto.output:
output_names.append(n.name)
g = Graph(nodes_to_append, output_shapes, output_dtypes, None, opset_version, extra_opset, None, output_names)
const_nodes = GraphUtil._parse_graph_initializer(g, graph_proto)
GraphUtil._parse_graph_input(g, graph_proto, [n.name for n in const_nodes])
for n in g.get_nodes():
for attr_name, attr_val in n.attr.items():
if attr_val.HasField('g'):
# it was assumed that the a.g has inferred shapes/dtypes.
sub_g = GraphUtil.create_graph_from_onnx_graph(attr_val.g, opset_version, extra_opset)
n.set_body_graph_as_attr(attr_name, sub_g)
return g
@staticmethod
def get_node_count_from_onnx_graph(graph_proto):
op_cnt = collections.Counter()
for n in graph_proto.node:
op_cnt[n.op_type] += 1
return op_cnt
@staticmethod
def _parse_shape_and_type_from_value_infos(value_infos):
"""Get nodes output shapes and types from value infos."""
output_shapes = {}
output_dtypes = {}
for shape_info in value_infos:
type_proto = shape_info.type
elem_type = type_proto.tensor_type.elem_type
shape = type_proto.tensor_type.shape
tuned_shape = []
for d in shape.dim:
if d.HasField('dim_param'):
tuned_shape.append(-1)
elif d.HasField('dim_value'):
tuned_shape.append(d.dim_value)
else:
# it is found, some unknown dims is missing after inference.
tuned_shape.append(-1)
output_shapes[shape_info.name] = tuned_shape
output_dtypes[shape_info.name] = elem_type
return output_shapes, output_dtypes
@staticmethod
def _parse_graph_initializer(g, graph_proto):
"""Get graph initializers and put into Graph object."""
const_nodes = []
for initializer in graph_proto.initializer:
np_val = numpy_helper.to_array(initializer)
const_nodes.append(g.make_const(initializer.name, np_val))
return const_nodes
@staticmethod
def _parse_graph_input(g, graph_proto, const_node_names):
"""Get graph inputs not defined as initializers and put into Graph object."""
shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.input)
# make sure the input is added in order we read from graph_proto,
# because for subgraphs, the input orders matter.
for graph_input in graph_proto.input:
name = graph_input.name
shape = shapes[name]
dtype = dtypes[name]
if name not in const_node_names:
g.add_graph_input(name, dtype, shape)
else:
g.add_graph_input_with_default(name, g.get_node_by_name(name), dtype, shape)
| 40.338497
| 120
| 0.604452
|
0b6ff5289d2648b181db81ce7676b8ec6adf5d5b
| 10,629
|
py
|
Python
|
mybic/labs/migrations/0026_auto__add_sitearticle.py
|
chop-dbhi/mybic
|
c6c99c002dacc9c181b640e62a1943943b396561
|
[
"BSD-2-Clause"
] | 1
|
2016-09-19T15:55:00.000Z
|
2016-09-19T15:55:00.000Z
|
mybic/labs/migrations/0026_auto__add_sitearticle.py
|
chop-dbhi/mybic
|
c6c99c002dacc9c181b640e62a1943943b396561
|
[
"BSD-2-Clause"
] | null | null | null |
mybic/labs/migrations/0026_auto__add_sitearticle.py
|
chop-dbhi/mybic
|
c6c99c002dacc9c181b640e62a1943943b396561
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SiteArticle'
db.create_table(u'labs_sitearticle', (
(u'article_ptr',
self.gf('django.db.models.fields.related.OneToOneField')(to=orm['news.Article'], unique=True,
primary_key=True)),
))
db.send_create_signal(u'labs', ['SiteArticle'])
def backwards(self, orm):
# Deleting model 'SiteArticle'
db.delete_table(u'labs_sitearticle')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')",
'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': (
'django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True',
'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True',
'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'labs.childindex': {
'Meta': {'object_name': 'ChildIndex'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.CharField', [],
{'default': "'/mnt/variome/'", 'max_length': '300', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['labs.Project']"})
},
u'labs.lab': {
'Meta': {'object_name': 'Lab'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': (
'django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'pi': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'labs.labarticle': {
'Meta': {'ordering': "['-created']", 'object_name': 'LabArticle', '_ormbases': [u'news.Article']},
u'article_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': u"orm['news.Article']", 'unique': 'True', 'primary_key': 'True'}),
'lab': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['labs.Lab']"})
},
u'labs.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'de_dir': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '300', 'null': 'True', 'blank': 'True'}),
'git_branch': (
'django.db.models.fields.CharField', [], {'default': "'master'", 'max_length': '100', 'db_index': 'True'}),
'git_repo': (
'django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '300', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index_page': ('django.db.models.fields.CharField', [],
{'default': "'/mnt/variome/'", 'max_length': '300', 'db_index': 'True'}),
'lab': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['labs.Lab']"}),
'modified': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'static_dir': ('django.db.models.fields.CharField', [],
{'default': "'/mnt/variome/'", 'max_length': '300', 'db_index': 'True'})
},
u'labs.projectarticle': {
'Meta': {'ordering': "['-created']", 'object_name': 'ProjectArticle', '_ormbases': [u'news.Article']},
u'article_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': u"orm['news.Article']", 'unique': 'True', 'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['labs.Project']"})
},
u'labs.sitearticle': {
'Meta': {'ordering': "['-created']", 'object_name': 'SiteArticle', '_ormbases': [u'news.Article']},
u'article_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': u"orm['news.Article']", 'unique': 'True', 'primary_key': 'True'})
},
u'labs.staff': {
'Meta': {'object_name': 'staff', '_ormbases': [u'auth.User']},
'projects': ('django.db.models.fields.related.ManyToManyField', [],
{'to': u"orm['labs.Project']", 'symmetrical': 'False'}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
u'news.article': {
'Meta': {'ordering': "['-created']", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [],
{'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'category': ('django.db.models.fields.related.ManyToManyField', [],
{'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'symmetrical': 'False',
'to': u"orm['news.Category']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'markup_filter': (
'django.db.models.fields.PositiveIntegerField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'news.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': u"orm['news.Category']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['labs']
| 66.43125
| 119
| 0.523003
|
4e68a5d39b49c938a25fbcd781a8c10da18b3256
| 200
|
py
|
Python
|
main.py
|
sahooj105/Major-Project
|
297c9e89c19e9143a992c90839c27ce40fd2e6f6
|
[
"MIT"
] | null | null | null |
main.py
|
sahooj105/Major-Project
|
297c9e89c19e9143a992c90839c27ce40fd2e6f6
|
[
"MIT"
] | null | null | null |
main.py
|
sahooj105/Major-Project
|
297c9e89c19e9143a992c90839c27ce40fd2e6f6
|
[
"MIT"
] | null | null | null |
import process_image
occupied_grids, planned_path = process_image.main("test_images/test_image4.jpg")
print ("Occupied Grids : ")
print (occupied_grids)
print ("Planned Path :")
print (planned_path)
| 25
| 80
| 0.785
|
5067c361b6cff5fbb0f984340ffd77f426125edc
| 603
|
py
|
Python
|
app/__init__.py
|
ShadrackNdolo/MovieDatabaseAP
|
38d88daa13e72487a63654d38e7544041359228f
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
ShadrackNdolo/MovieDatabaseAP
|
38d88daa13e72487a63654d38e7544041359228f
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
ShadrackNdolo/MovieDatabaseAP
|
38d88daa13e72487a63654d38e7544041359228f
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_bootstrap import Bootstrap
from config import config_options
bootstrap = Bootstrap()
def create_app(config_name):
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
bootstrap.init_app(app)
# Will add the views and forms
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# setting config
from .requests import configure_request
configure_request(app)
return app
| 22.333333
| 55
| 0.752902
|
87ef4a92365ac21403c1027da566ea80a98ddbbc
| 408
|
py
|
Python
|
samples/blender-cartography-addon-exec.py
|
akyruu/blender-cartography-addon
|
4f34b029d9b6a72619227ab3ceaed9393506934e
|
[
"Apache-2.0"
] | null | null | null |
samples/blender-cartography-addon-exec.py
|
akyruu/blender-cartography-addon
|
4f34b029d9b6a72619227ab3ceaed9393506934e
|
[
"Apache-2.0"
] | null | null | null |
samples/blender-cartography-addon-exec.py
|
akyruu/blender-cartography-addon
|
4f34b029d9b6a72619227ab3ceaed9393506934e
|
[
"Apache-2.0"
] | null | null | null |
import os
import platform
import sys
print('Python version : ' + platform.python_version())
print('Script arguments: ' + ' '.join(arg for i, arg in enumerate(sys.argv) if i > sys.argv.index('--')))
folder = '.'
if folder not in sys.path:
sys.path.append(folder)
init_file = '__init__.py'
sys.argv.extend(['DEBUG_MODE'])
exec(compile(open(os.path.join(folder, init_file)).read(), init_file, 'exec'))
| 25.5
| 105
| 0.691176
|
58b4c0265ec54bedd60bec97b70ef2f117338c3a
| 26,219
|
py
|
Python
|
ooni/utils/onion.py
|
meejah/ooni-probe
|
f46dc5879da409763718cfa5aa2635ddf5332a54
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
ooni/utils/onion.py
|
meejah/ooni-probe
|
f46dc5879da409763718cfa5aa2635ddf5332a54
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
ooni/utils/onion.py
|
meejah/ooni-probe
|
f46dc5879da409763718cfa5aa2635ddf5332a54
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
#
# onion.py
# ----------
# Utilities for working with Tor.
#
# This code is largely taken from txtorcon and its documentation, and as such
# any and all credit should go to Meejah. Minor adjustments have been made to
# use OONI's logging system, and to build custom circuits without actually
# attaching streams.
#
# :author: Meejah, Isis Lovecruft
# :license: see included LICENSE file
# :copyright: copyright (c) 2012 The Tor Project, Inc.
# :version: 0.1.0-alpha
#
# XXX TODO add report keys for onion methods
import random
import sys
from twisted.internet import defer
from zope.interface import implements
from txtorcon import CircuitListenerMixin, IStreamAttacher
from txtorcon import TorState, TorConfig
from ooni.utils import log
from ooni.utils.timer import deferred_timeout, TimeoutError
# XXX This can be refactored to os.path.abspath
# os.path.abspath(path)
# Return a normalized absolutized version of the pathname path. On most
# platforms, this is equivalent to normpath(join(os.getcwd(), path)).
def parse_data_dir(data_dir):
"""
Parse a string that a has been given as a DataDirectory and determine
its absolute path on the filesystem.
:param data_dir:
A directory for Tor's DataDirectory, to be parsed.
:return:
The absolute path of :param:data_dir.
"""
from os import path, getcwd
import sys
try:
assert isinstance(data_dir, str), \
"Parameter type(data_dir) must be str"
except AssertionError, ae:
log.err(ae)
if data_dir.startswith('~'):
data_dir = path.expanduser(data_dir)
elif data_dir.startswith('/'):
data_dir = path.join(getcwd(), data_dir)
elif data_dir.startswith('./'):
data_dir = path.abspath(data_dir)
else:
data_dir = path.join(getcwd(), data_dir)
try:
assert path.isdir(data_dir), "Could not find %s" % data_dir
except AssertionError, ae:
log.err(ae)
sys.exit()
else:
return data_dir
# XXX txtorcon handles this already.
# Also this function is called write_torrc but it has hardcoded inside of it
# bridget-tordata.
def write_torrc(conf, data_dir=None):
"""
Create a torrc in our data_dir. If we don't yet have a data_dir, create a
temporary one. Any temporary files or folders are added to delete_list.
:param conf:
A :class:`ooni.lib.txtorcon.TorConfig` object, with all configuration
values saved.
:param data_dir:
The Tor DataDirectory to use.
:return: torrc, data_dir, delete_list
"""
try:
from os import write, close
from tempfile import mkstemp, mkdtemp
except ImportError, ie:
log.err(ie)
delete_list = []
if data_dir is None:
data_dir = mkdtemp(prefix='bridget-tordata')
delete_list.append(data_dir)
conf.DataDirectory = data_dir
(fd, torrc) = mkstemp(dir=data_dir)
delete_list.append(torrc)
write(fd, conf.create_torrc())
close(fd)
return torrc, data_dir, delete_list
def delete_files_or_dirs(delete_list):
"""
Given a list of files or directories to delete, delete all and suppress
all errors.
:param delete_list:
A list of files or directories to delete.
"""
try:
from os import unlink
from shutil import rmtree
except ImportError, ie:
log.err(ie)
for temp in delete_list:
try:
unlink(temp)
except OSError:
rmtree(temp, ignore_errors=True)
def remove_node_from_list(node, list):
for item in list: ## bridges don't match completely
if item.startswith(node): ## due to the :<port>.
try:
log.msg("Removing %s because it is a public relay" % node)
list.remove(item)
except ValueError, ve:
log.err(ve)
def remove_public_relays(state, bridges):
"""
Remove bridges from our bridge list which are also listed as public
relays. This must be called after Tor has fully bootstrapped and we have a
:class:`ooni.lib.txtorcon.TorState` with the
:attr:`ooni.lib.txtorcon.TorState.routers` attribute assigned.
XXX Does state.router.values() have all of the relays in the consensus, or
just the ones we know about so far?
XXX FIXME: There is a problem in that Tor needs a Bridge line to already be
configured in order to bootstrap. However, after bootstrapping, we grab the
microdescriptors of all the relays and check if any of our bridges are
listed as public relays. Because of this, the first bridge does not get
checked for being a relay.
"""
IPs = map(lambda addr: addr.split(':',1)[0], bridges['all'])
both = set(state.routers.values()).intersection(IPs)
if len(both) > 0:
try:
updated = map(lambda node: remove_node_from_list(node), both)
log.debug("Bridges in both: %s" % both)
log.debug("Updated = %s" % updated)
#if not updated:
# defer.returnValue(state)
#else:
# defer.returnValue(state)
return state
except Exception, e:
log.err("Removing public relays %s from bridge list failed:\n%s"
% (both, e))
# XXX It is unclear to me how all of these functions would be reused. Why must
# hey be inside of a module?
def setup_done(proto):
log.msg("Setup Complete")
state = TorState(proto.tor_protocol)
state.post_bootstrap.addCallback(state_complete)
state.post_bootstrap.addErrback(setup_fail)
def setup_fail(proto):
log.msg("Setup Failed:\n%s" % proto)
return proto
#reactor.stop()
def state_complete(state):
"""Called when we've got a TorState."""
log.msg("We've completely booted up a Tor version %s at PID %d"
% (state.protocol.version, state.tor_pid))
log.msg("This Tor has the following %d Circuits:"
% len(state.circuits))
for circ in state.circuits.values():
log.msg("%s" % circ)
return state
def updates(_progress, _tag, _summary):
"""Log updates on the Tor bootstrapping process."""
log.msg("%d%%: %s" % (_progress, _summary))
def bootstrap(ctrl):
"""
Bootstrap Tor from an instance of
:class:`ooni.lib.txtorcon.TorControlProtocol`.
"""
conf = TorConfig(ctrl)
conf.post_bootstrap.addCallback(setup_done).addErrback(setup_fail)
log.msg("Tor process connected, bootstrapping ...")
# XXX txtorcon does this already for us.
def start_tor(reactor, config, control_port, tor_binary, data_dir,
report=None, progress=updates,
process_cb=None, process_eb=None):
"""
Use a txtorcon.TorConfig() instance, config, to write a torrc to a
tempfile in our DataDirectory, data_dir. If data_dir is None, a temp
directory will be created. Finally, create a TCP4ClientEndpoint at our
control_port, and connect it to our reactor and a spawned Tor
process. Compare with :meth:`txtorcon.launch_tor` for differences.
:param reactor:
An instance of class:`twisted.internet.reactor`.
:param config:
An instance of class:`txtorcon.TorConfig` with all torrc options
already configured. ivar:`config.ControlPort`,
ivar:`config.SocksPort`, ivar:`config.CookieAuthentication`, should
already be set, as well as ivar:`config.UseBridges` and
ivar:`config.Bridge` if bridges are to be used.
ivar:`txtorcon.DataDirectory` does not need to be set.
:param control_port:
The port number to use for Tor's ControlPort.
:param tor_binary:
The full path to the Tor binary to use.
:param data_dir:
The directory to use as Tor's DataDirectory.
:param report:
The class:`ooni.plugoo.reports.Report` instance.
:param progress:
A non-blocking function to handle bootstrapping updates, which takes
three parameters: _progress, _tag, and _summary.
:param process_cb:
The function to callback to after
class:`ooni.lib.txtorcon.TorProcessProtocol` returns with the fully
bootstrapped Tor process.
:param process_eb:
The function to errback to if
class:`ooni.lib.txtorcon.TorProcessProtocol` fails.
:return:
The result of the callback of a
class:`ooni.lib.txtorcon.TorProcessProtocol` which callbacks with a
class:`txtorcon.TorControlProtocol` as .protocol.
"""
try:
from functools import partial
from twisted.internet.endpoints import TCP4ClientEndpoint
from ooni.lib.txtorcon import TorProtocolFactory
from ooni.lib.txtorcon import TorProcessProtocol
except ImportError, ie:
log.err(ie)
## TODO: add option to specify an already existing torrc, which
## will require prior parsing to enforce necessary lines
(torrc, data_dir, to_delete) = write_torrc(config, data_dir)
log.msg("Starting Tor ...")
log.msg("Using the following as our torrc:\n%s" % config.create_torrc())
if report is None:
report = {'torrc': config.create_torrc()}
else:
report.update({'torrc': config.create_torrc()})
end_point = TCP4ClientEndpoint(reactor, 'localhost', control_port)
connection_creator = partial(end_point.connect, TorProtocolFactory())
process_protocol = TorProcessProtocol(connection_creator, progress)
process_protocol.to_delete = to_delete
if process_cb is not None and process_eb is not None:
process_protocol.connected_cb.addCallbacks(process_cb, process_eb)
reactor.addSystemEventTrigger('before', 'shutdown',
partial(delete_files_or_dirs, to_delete))
try:
transport = reactor.spawnProcess(process_protocol,
tor_binary,
args=(tor_binary,'-f',torrc),
env={'HOME': data_dir},
path=data_dir)
transport.closeStdin()
except RuntimeError, e:
log.err("Starting Tor failed:")
process_protocol.connected_cb.errback(e)
except NotImplementedError, e:
url = "http://starship.python.net/crew/mhammond/win32/Downloads.html"
log.msg("Running bridget on Windows requires pywin32: %s" % url)
process_protocol.connected_cb.errback(e)
return process_protocol.connected_cb
@defer.inlineCallbacks
def start_tor_filter_nodes(reactor, config, control_port, tor_binary,
data_dir, bridges):
"""
Bootstrap a Tor process and return a fully-setup
:class:`ooni.lib.txtorcon.TorState`. Then search for our bridges
to test in the list of known public relays,
:ivar:`ooni.lib.txtorcon.TorState.routers`, and remove any bridges
which are known public relays.
:param reactor:
The :class:`twisted.internet.reactor`.
:param config:
An instance of :class:`ooni.lib.txtorcon.TorConfig`.
:param control_port:
The port to use for Tor's ControlPort. If already configured in
the TorConfig instance, this can be given as
TorConfig.config.ControlPort.
:param tor_binary:
The full path to the Tor binary to execute.
:param data_dir:
The full path to the directory to use as Tor's DataDirectory.
:param bridges:
A dictionary which has a key 'all' which is a list of bridges to
test connecting to, e.g.:
bridges['all'] = ['1.1.1.1:443', '22.22.22.22:9001']
:return:
A fully initialized :class:`ooni.lib.txtorcon.TorState`.
"""
setup = yield start_tor(reactor, config, control_port,
tor_binary, data_dir,
process_cb=setup_done, process_eb=setup_fail)
filter_nodes = yield remove_public_relays(setup, bridges)
defer.returnValue(filter_nodes)
# XXX Why is this needed?
@defer.inlineCallbacks
def start_tor_with_timer(reactor, config, control_port, tor_binary, data_dir,
bridges, timeout):
"""
Start bootstrapping a Tor process wrapped with an instance of the class
decorator :func:`ooni.utils.timer.deferred_timeout` and complete callbacks
to either :func:`setup_done` or :func:`setup_fail`. Return a fully-setup
:class:`ooni.lib.txtorcon.TorState`. Then search for our bridges to test
in the list of known public relays,
:ivar:`ooni.lib.txtorcon.TorState.routers`, and remove any bridges which
are listed as known public relays.
:param reactor:
The :class:`twisted.internet.reactor`.
:param config:
An instance of :class:`ooni.lib.txtorcon.TorConfig`.
:param control_port:
The port to use for Tor's ControlPort. If already configured in
the TorConfig instance, this can be given as
TorConfig.config.ControlPort.
:param tor_binary:
The full path to the Tor binary to execute.
:param data_dir:
The full path to the directory to use as Tor's DataDirectory.
:param bridges:
A dictionary which has a key 'all' which is a list of bridges to
test connecting to, e.g.:
bridges['all'] = ['1.1.1.1:443', '22.22.22.22:9001']
:param timeout:
The number of seconds to attempt to bootstrap the Tor process before
raising a :class:`ooni.utils.timer.TimeoutError`.
:return:
If the timeout limit is not exceeded, return a fully initialized
:class:`ooni.lib.txtorcon.TorState`, else return None.
"""
error_msg = "Bootstrapping has exceeded the timeout limit..."
with_timeout = deferred_timeout(timeout, e=error_msg)(start_tor)
try:
setup = yield with_timeout(reactor, config, control_port, tor_binary,
data_dir, process_cb=setup_done,
process_eb=setup_fail)
except TimeoutError, te:
log.err(te)
defer.returnValue(None)
#except Exception, e:
# log.err(e)
# defer.returnValue(None)
else:
state = yield remove_public_relays(setup, bridges)
defer.returnValue(state)
# XXX This is a copy and paste of the above class with just an extra argument.
@defer.inlineCallbacks
def start_tor_filter_nodes_with_timer(reactor, config, control_port,
tor_binary, data_dir, bridges, timeout):
"""
Start bootstrapping a Tor process wrapped with an instance of the class
decorator :func:`ooni.utils.timer.deferred_timeout` and complete callbacks
to either :func:`setup_done` or :func:`setup_fail`. Then, filter our list
of bridges to remove known public relays by calling back to
:func:`remove_public_relays`. Return a fully-setup
:class:`ooni.lib.txtorcon.TorState`. Then search for our bridges to test
in the list of known public relays,
:ivar:`ooni.lib.txtorcon.TorState.routers`, and remove any bridges which
are listed as known public relays.
:param reactor:
The :class:`twisted.internet.reactor`.
:param config:
An instance of :class:`ooni.lib.txtorcon.TorConfig`.
:param control_port:
The port to use for Tor's ControlPort. If already configured in
the TorConfig instance, this can be given as
TorConfig.config.ControlPort.
:param tor_binary:
The full path to the Tor binary to execute.
:param data_dir:
The full path to the directory to use as Tor's DataDirectory.
:param bridges:
A dictionary which has a key 'all' which is a list of bridges to
test connecting to, e.g.:
bridges['all'] = ['1.1.1.1:443', '22.22.22.22:9001']
:param timeout:
The number of seconds to attempt to bootstrap the Tor process before
raising a :class:`ooni.utils.timer.TimeoutError`.
:return:
If the timeout limit is not exceeded, return a fully initialized
:class:`ooni.lib.txtorcon.TorState`, else return None.
"""
error_msg = "Bootstrapping has exceeded the timeout limit..."
with_timeout = deferred_timeout(timeout, e=error_msg)(start_tor_filter_nodes)
try:
state = yield with_timeout(reactor, config, control_port,
tor_binary, data_dir, bridges)
except TimeoutError, te:
log.err(te)
defer.returnValue(None)
#except Exception, e:
# log.err(e)
# defer.returnValue(None)
else:
defer.returnValue(state)
class CustomCircuit(CircuitListenerMixin):
"""
Utility class for controlling circuit building. See
'attach_streams_by_country.py' in the txtorcon documentation.
:param state:
A fully bootstrapped instance of :class:`ooni.lib.txtorcon.TorState`.
:param relays:
A dictionary containing a key 'all', which is a list of relays to
test connecting to.
:ivar waiting_circuits:
The list of circuits which we are waiting to attach to. You shouldn't
need to touch this.
"""
# XXX
# 14:57 < meejah> to build a custom circuit (with no streams) in txtorcon,
# call TorState.build_circuit -- the Deferred callbacks with the circid
implements(IStreamAttacher)
def __init__(self, state, relays=None):
self.state = state
self.waiting_circuits = []
self.relays = relays
def waiting_on(self, circuit):
"""
Whether or not we are waiting on the given circuit before attaching to
it.
:param circuit:
An item from :ivar:`ooni.lib.txtorcon.TorState.circuits`.
:return:
True if we are waiting on the circuit, False if not waiting.
"""
for (circid, d) in self.waiting_circuits:
if circuit.id == circid:
return True
return False
def circuit_extend(self, circuit, router):
"ICircuitListener"
if circuit.purpose != 'GENERAL':
return
if self.waiting_on(circuit):
log.msg("Circuit %d (%s)" % (circuit.id, router.id_hex))
def circuit_built(self, circuit):
"ICircuitListener"
if circuit.purpose != 'GENERAL':
return
log.msg("Circuit %s built ..." % circuit.id)
log.msg("Full path of %s: %s" % (circuit.id, circuit.path))
for (circid, d) in self.waiting_circuits:
if circid == circuit.id:
self.waiting_circuits.remove((circid, d))
d.callback(circuit)
def circuit_failed(self, circuit, reason):
"""
If building a circuit has failed, try to remove it from our list of
:ivar:`waiting_circuits`, else request to build it.
:param circuit:
An item from :ivar:`ooni.lib.txtorcon.TorState.circuits`.
:param reason:
A :class:`twisted.python.fail.Failure` instance.
:return:
None
"""
if self.waiting_on(circuit):
log.msg("Circuit %s failed for reason %s" % (circuit.id, reason))
circid, d = None, None
for c in self.waiting_circuits:
if c[0] == circuit.id:
circid, d = c
if d is None:
raise Exception("Expected to find circuit.")
self.waiting_circuits.remove((circid, d))
log.msg("Trying to build a circuit for %s" % circid)
self.request_circuit_build(d)
def check_circuit_route(self, router):
"""
Check if a relay is a hop in one of our already built circuits.
:param router:
An item from the list
:func:`ooni.lib.txtorcon.TorState.routers.values()`.
"""
for circ in self.state.circuits.values():
if router in circ.path:
#router.update() ## XXX can i use without args? no.
TorInfo.dump(self)
def request_circuit_build(self, deferred, path=None):
"""
Request a custom circuit.
:param deferred:
A :class:`twisted.internet.defer.Deferred` for this circuit.
:param path:
A list of router ids to build a circuit from. The length of this
list must be at least three.
"""
if path is None:
pick = self.relays['all'].pop
n = self.state.entry_guards.values()
choose = random.choice
first, middle, last = (None for i in range(3))
if self.relays['remaining']() >= 3:
first, middle, last = (pick() for i in range(3))
elif self.relays['remaining']() < 3:
first = choose(n)
middle = pick()
if self.relays['remaining'] == 2:
middle, last = (pick() for i in range(2))
elif self.relay['remaining'] == 1:
middle = pick()
last = choose(n)
else:
log.msg("Qu'est-que fuque?")
else:
middle, last = (random.choice(self.state.routers.values())
for i in range(2))
path = [first, middle, last]
else:
assert isinstance(path, list), \
"Circuit path must be a list of relays!"
assert len(path) >= 3, \
"Circuit path must be at least three hops!"
log.msg("Requesting a circuit: %s"
% '->'.join(map(lambda node: node, path)))
class AppendWaiting:
def __init__(self, attacher, deferred):
self.attacher = attacher
self.d = deferred
def __call__(self, circ):
"""
Return from build_circuit is a Circuit, however,
we want to wait until it is built before we can
issue an attach on it and callback to the Deferred
we issue here.
"""
log.msg("Circuit %s is in progress ..." % circ.id)
self.attacher.waiting_circuits.append((circ.id, self.d))
return self.state.build_circuit(path).addCallback(
AppendWaiting(self, deferred)).addErrback(
log.err)
class TxtorconImportError(ImportError):
"""
Raised when ooni.lib.txtorcon cannot be imported from. Checks our current
working directory and the path given to see if txtorcon has been
initialized via /ooni/lib/Makefile.
"""
from os import getcwd, path
cwd, tx = getcwd(), 'lib/txtorcon/torconfig.py'
try:
log.msg("Unable to import from ooni.lib.txtorcon")
if cwd.endswith('ooni'):
check = path.join(cwd, tx)
elif cwd.endswith('utils'):
check = path.join(cwd, '../'+tx)
else:
check = path.join(cwd, 'ooni/'+tx)
assert path.isfile(check)
except:
log.msg("Error: Some OONI libraries are missing!")
log.msg("Please go to /ooni/lib/ and do \"make all\"")
class PTNoBridgesException(Exception):
"""Raised when a pluggable transport is specified, but not bridges."""
def __init__(self):
log.msg("Pluggable transport requires the bridges option")
return sys.exit()
class PTNotFoundException(Exception):
def __init__(self, transport_type):
m = "Pluggable Transport type %s was unaccounted " % transport_type
m += "for, please contact isis(at)torproject(dot)org and it will "
m += "get included."
log.msg("%s" % m)
return sys.exit()
@defer.inlineCallbacks
def __start_tor_with_timer__(reactor, config, control_port, tor_binary,
data_dir, bridges=None, relays=None, timeout=None,
retry=None):
"""
A wrapper for :func:`start_tor` which wraps the bootstrapping of a Tor
process and its connection to a reactor with a
:class:`twisted.internet.defer.Deferred` class decorator utility,
:func:`ooni.utils.timer.deferred_timeout`, and a mechanism for resets.
## XXX fill me in
"""
raise NotImplementedError
class RetryException(Exception):
pass
import sys
from ooni.utils.timer import deferred_timeout, TimeoutError
def __make_var__(old, default, _type):
if old is not None:
assert isinstance(old, _type)
new = old
else:
new = default
return new
reactor = reactor
timeout = __make_var__(timeout, 120, int)
retry = __make_var__(retry, 1, int)
with_timeout = deferred_timeout(timeout)(start_tor)
@defer.inlineCallbacks
def __start_tor__(rc=reactor, cf=config, cp=control_port, tb=tor_binary,
dd=data_dir, br=bridges, rl=relays, cb=setup_done,
eb=setup_fail, af=remove_public_relays, retry=retry):
try:
setup = yield with_timeout(rc,cf,cp,tb,dd)
except TimeoutError:
retry -= 1
defer.returnValue(retry)
else:
if setup.callback:
setup = yield cb(setup)
elif setup.errback:
setup = yield eb(setup)
else:
setup = setup
if br is not None:
state = af(setup,br)
else:
state = setup
defer.returnValue(state)
@defer.inlineCallbacks
def __try_until__(tries):
result = yield __start_tor__()
try:
assert isinstance(result, int)
except AssertionError:
defer.returnValue(result)
else:
if result >= 0:
tried = yield __try_until__(result)
defer.returnValue(tried)
else:
raise RetryException
try:
tried = yield __try_until__(retry)
except RetryException:
log.msg("All retry attempts to bootstrap Tor have timed out.")
log.msg("Exiting ...")
defer.returnValue(sys.exit())
else:
defer.returnValue(tried)
| 37.190071
| 81
| 0.625119
|
3fadc85aee3db6bf35271557e6c831d34f114617
| 746
|
py
|
Python
|
PyInstaller/hooks/hook-pandas.io.formats.style.py
|
hawkhai/pyinstaller
|
016a24479b34de161792c72dde455a81ad4c78ae
|
[
"Apache-2.0"
] | 9,267
|
2015-01-01T04:08:45.000Z
|
2022-03-31T11:42:38.000Z
|
PyInstaller/hooks/hook-pandas.io.formats.style.py
|
hawkhai/pyinstaller
|
016a24479b34de161792c72dde455a81ad4c78ae
|
[
"Apache-2.0"
] | 5,150
|
2015-01-01T12:09:56.000Z
|
2022-03-31T18:06:12.000Z
|
PyInstaller/hooks/hook-pandas.io.formats.style.py
|
hawkhai/pyinstaller
|
016a24479b34de161792c72dde455a81ad4c78ae
|
[
"Apache-2.0"
] | 2,101
|
2015-01-03T10:25:27.000Z
|
2022-03-30T11:04:42.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_data_files
# This module indirectly imports jinja2
hiddenimports = ['jinja2']
# It also requires template file stored in pandas/io/formats/templates
datas = collect_data_files('pandas.io.formats')
| 39.263158
| 78
| 0.620643
|
ce97328e2889f0a8a156a8b65bbc7de8f47a862f
| 18,373
|
py
|
Python
|
NeuroPy2.py
|
vlstyxz/Brain-Computer-Interface-with-Neurosky
|
185d31a6e8d044fb766838947c37eec0af8f84f4
|
[
"MIT"
] | 9
|
2019-09-29T20:26:04.000Z
|
2022-02-25T19:01:03.000Z
|
NeuroPy2.py
|
vlstyxz/Brain-Computer-Interface-with-Neurosky
|
185d31a6e8d044fb766838947c37eec0af8f84f4
|
[
"MIT"
] | null | null | null |
NeuroPy2.py
|
vlstyxz/Brain-Computer-Interface-with-Neurosky
|
185d31a6e8d044fb766838947c37eec0af8f84f4
|
[
"MIT"
] | 10
|
2019-09-26T18:21:36.000Z
|
2021-09-10T19:05:22.000Z
|
import serial
import time
import sys
from threading import Thread
# Byte codes
CONNECT = '\xc0'
DISCONNECT = '\xc1'
AUTOCONNECT = '\xc2'
SYNC = '\xaa'
EXCODE = '\x55'
POOR_SIGNAL = '\x02'
ATTENTION = '\x04'
MEDITATION = '\x05'
BLINK = '\x16'
HEADSET_CONNECTED = '\xd0'
HEADSET_NOT_FOUND = '\xd1'
HEADSET_DISCONNECTED = '\xd2'
REQUEST_DENIED = '\xd3'
STANDBY_SCAN = '\xd4'
RAW_VALUE = '\x80'
class NeuroPy(object):
"""NeuroPy libraby, to get data from neurosky mindwave.
Initialising: object1=NeuroPy("COM6",57600) #windows
After initialising , if required the callbacks must be set
then using the start method the library will start fetching data from mindwave
i.e. object1.start()
similarly stop method can be called to stop fetching the data
i.e. object1.stop()
The data from the device can be obtained using either of the following methods or both of them together:
Obtaining value: variable1=object1.attention #to get value of attention
#other variables: attention,meditation,rawValue,delta,theta,lowAlpha,highAlpha,lowBeta,highBeta,lowGamma,midGamma, poorSignal and blinkStrength
Setting callback:a call back can be associated with all the above variables so that a function is called when the variable is updated. Syntax: setCallBack("variable",callback_function)
for eg. to set a callback for attention data the syntax will be setCallBack("attention",callback_function)"""
__attention = 0
__meditation = 0
__rawValue = 0
__delta = 0
__theta = 0
__lowAlpha = 0
__highAlpha = 0
__lowBeta = 0
__highBeta = 0
__lowGamma = 0
__midGamma = 0
__poorSignal = 0
__blinkStrength = 0
callBacksDictionary = {} # keep a track of all callbacks
def __init__(self, port = None, baudRate=115200, devid=None):
if port == None:
platform = sys.platform
if platform == "win32":
port = "COM6"
elif platform.startswith("linux"):
port = "/dev/rfcomm0"
else:
port = "/dev/cu.MindWaveMobile-SerialPo"
self.__devid = devid
self.__serialPort = port
self.__serialBaudRate = baudRate
self.__packetsReceived = 0
self.__parserThread = None
self.__threadRun = False
self.__srl = None
self.__connected = False
def __del__(self):
if self.__threadRun == True:
self.stop()
def disconnect(self):
self.__srl.write(DISCONNECT)
def connect(self):
if not self.__devid:
self.__connected = True
return # Only connect RF devices
self.__srl.write(''.join([CONNECT, self.__devid.decode('hex')]))
def start(self):
# Try to connect to serial port and start a separate thread
# for data collection
if self.__threadRun == True:
print("Mindwave has already started!")
return
if self.__srl == None:
try:
self.__srl = serial.Serial(
self.__serialPort, self.__serialBaudRate)
except Exception as e:
print (str(e))
return
else:
self.__srl.open()
self.__srl.flushInput()
if self.__devid:
self.connect();
self.__packetsReceived = 0
self.__verbosePacketsReceived = 0
self.__parserThread = Thread(target=self.__packetParser, args=())
self.__threadRun = True
self.__parserThread.start()
def __packetParser(self):
"packetParser runs continously in a separate thread to parse packets from mindwave and update the corresponding variables"
while self.__threadRun:
p1 = self.__srl.read(1).hex() # read first 2 packets
p2 = self.__srl.read(1).hex()
while (p1 != 'aa' or p2 != 'aa') and self.__threadRun:
p1 = p2
p2 = self.__srl.read(1).hex()
else:
if self.__threadRun == False:
break
# a valid packet is available
self.__packetsReceived += 1
payload = []
checksum = 0
payloadLength = int(self.__srl.read(1).hex(), 16) #PLENGTH
#print('payloadLength: ' + str(payloadLength))
for i in range(payloadLength):
tempPacket = self.__srl.read(1).hex()
payload.append(tempPacket)
checksum += int(tempPacket, 16) #sum every byte in the payload
checksum = ~checksum & 0x000000ff #take the lowest 8 bits and invert them
if checksum == int(self.__srl.read(1).hex(), 16): #read the next byte of the package after the payload and check it with the checksum just calculated
i = 0
# print('payload ' + str(i) + ' = ' + str(payload[i]))
while i < payloadLength:
while payload[i] == '55':
i = i+1
code = payload[i]
#print('packet ' + str(self.__packetsReceived) + ' code==' + str(code))
if (code == 'd0'):
print("Headset connected!")
self.__connected = True
elif (code == 'd1'):
print("Headset not found, reconnecting")
self.connect()
elif(code == 'd2'):
print("Disconnected!")
self.connect()
elif(code == 'd3'):
print("Headset denied operation!")
elif(code == 'd4'):
if payload[2] == 0 and not self.__connected:
print("Idle, trying to reconnect");
self.connect();
elif(code == '02'): # poorSignal
i = i + 1
self.poorSignal = int(payload[i], 16)
elif(code == 'ba'): # unknown
i = i + 1
self.unknown_ba = int(payload[i], 16)
# print('self.unknown_ba = ' + str(self.unknown_ba))
elif(code == 'bc'): # unknown
i = i + 1
self.unknown_bc = int(payload[i], 16)
# print('self.unknown_bc = ' + str(self.unknown_bc))
elif(code == '04'): # attention
i = i + 1
self.attention = int(payload[i], 16)
elif(code == '05'): # meditation
i = i + 1
self.meditation = int(payload[i], 16)
elif(code == '16'): # blink strength
i = i + 1
self.blinkStrength = int(payload[i], 16)
elif(code == '80'): # raw value
i = i + 1 # for length/it is not used since length =1 byte long and always=2
#print('verbose packet length: ' + str(int(payload[i], 16)) + ' code==' + str(code))
i = i + 1
val0 = int(payload[i], 16)
i = i + 1
rawVal = val0 * 256 + int(payload[i], 16)
if rawVal > 32768:
rawVal = rawVal - 65536
self.rawValue = rawVal
#print('self.rawValue = ' + str(self.rawValue))
#print('self.rawValue = ' + str(self.rawValue))
elif(code == '83'): # ASIC_EEG_POWER
self.__verbosePacketsReceived += 1
#print('raw packet ' + str(self.__packetsReceived) + ' code==' + str(code))
#print('verbose packet ' + str(self.__verbosePacketsReceived) + ' code==' + str(code))
i = i + 1 # for length/it is not used since length =1 byte long and always=2
# delta:
#print('verbose packet length: ' + str(int(payload[i], 16)) + ' code==' + str(code))
i = i + 1
val0 = int(payload[i], 16)
i = i + 1
val1 = int(payload[i], 16)
i = i + 1
self.delta = val0 * 65536 + \
val1 * 256 + int(payload[i], 16)
#print('self.delta = ' + str(self.delta))
# theta:
i = i + 1
val0 = int(payload[i], 16)
i = i + 1
val1 = int(payload[i], 16)
i = i + 1
self.theta = val0 * 65536 + \
val1 * 256 + int(payload[i], 16)
#print('self.theta = ' + str(self.theta))
# lowAlpha:
i = i + 1
val0 = int(payload[i], 16)
i = i + 1
val1 = int(payload[i], 16)
i = i + 1
self.lowAlpha = val0 * 65536 + \
val1 * 256 + int(payload[i], 16)
#print('self.lowAlpha = ' + str(self.lowAlpha))
# highAlpha:
i = i + 1
val0 = int(payload[i], 16)
i = i + 1
val1 = int(payload[i], 16)
i = i + 1
self.highAlpha = val0 * 65536 + \
val1 * 256 + int(payload[i], 16)
#print('self.highAlpha = ' + str(self.highAlpha))
# lowBeta:
i = i + 1
val0 = int(payload[i], 16)
i = i + 1
val1 = int(payload[i], 16)
i = i + 1
self.lowBeta = val0 * 65536 + \
val1 * 256 + int(payload[i], 16)
#print('self.lowBeta = ' + str(self.lowBeta))
# highBeta:
i = i + 1
val0 = int(payload[i], 16)
i = i + 1
val1 = int(payload[i], 16)
i = i + 1
self.highBeta = val0 * 65536 + \
val1 * 256 + int(payload[i], 16)
#print('self.highBeta = ' + str(self.highBeta))
# lowGamma:
i = i + 1
val0 = int(payload[i], 16)
i = i + 1
val1 = int(payload[i], 16)
i = i + 1
self.lowGamma = val0 * 65536 + \
val1 * 256 + int(payload[i], 16)
#print('self.lowGamma = ' + str(self.lowGamma))
# midGamma:
i = i + 1
val0 = int(payload[i], 16)
i = i + 1
val1 = int(payload[i], 16)
i = i + 1
self.midGamma = val0 * 65536 + \
val1 * 256 + int(payload[i], 16)
#print('self.midGamma = ' + str(self.midGamma))
else:
pass
i = i + 1
else:
print('wrong checksum!!!')
def stop(self):
# Stops a running parser thread
if self.__threadRun == True:
self.__threadRun = False
self.__parserThread.join()
self.__srl.close()
def setCallBack(self, variable_name, callback_function):
"""Setting callback:a call back can be associated with all the above variables so that a function is called when the variable is updated. Syntax: setCallBack("variable",callback_function)
for eg. to set a callback for attention data the syntax will be setCallBack("attention",callback_function)"""
self.callBacksDictionary[variable_name] = callback_function
# setting getters and setters for all variables
# packets received
@property
def packetsReceived(self):
return self.__packetsReceived
@property
def verbosePacketsReceived(self):
return self.__verbosePacketsReceived
@property
def bytesAvailable(self):
if self.__threadRun:
return self.__srl.inWaiting()
else:
return -1
# attention
@property
def attention(self):
"Get value for attention"
return self.__attention
@attention.setter
def attention(self, value):
self.__attention = value
# if callback has been set, execute the function
if "attention" in self.callBacksDictionary:
self.callBacksDictionary["attention"](self.__attention)
# meditation
@property
def meditation(self):
"Get value for meditation"
return self.__meditation
@meditation.setter
def meditation(self, value):
self.__meditation = value
# if callback has been set, execute the function
if "meditation" in self.callBacksDictionary:
self.callBacksDictionary["meditation"](self.__meditation)
# rawValue
@property
def rawValue(self):
"Get value for rawValue"
return self.__rawValue
@rawValue.setter
def rawValue(self, value):
self.__rawValue = value
# if callback has been set, execute the function
if "rawValue" in self.callBacksDictionary:
self.callBacksDictionary["rawValue"](self.__rawValue)
# delta
@property
def delta(self):
"Get value for delta"
return self.__delta
@delta.setter
def delta(self, value):
self.__delta = value
# if callback has been set, execute the function
if "delta" in self.callBacksDictionary:
self.callBacksDictionary["delta"](self.__delta)
# theta
@property
def theta(self):
"Get value for theta"
return self.__theta
@theta.setter
def theta(self, value):
self.__theta = value
# if callback has been set, execute the function
if "theta" in self.callBacksDictionary:
self.callBacksDictionary["theta"](self.__theta)
# lowAlpha
@property
def lowAlpha(self):
"Get value for lowAlpha"
return self.__lowAlpha
@lowAlpha.setter
def lowAlpha(self, value):
self.__lowAlpha = value
# if lowAlpha has been set, execute the function
if "meditation" in self.callBacksDictionary:
self.callBacksDictionary["lowAlpha"](self.__lowAlpha)
# highAlpha
@property
def highAlpha(self):
"Get value for highAlpha"
return self.__highAlpha
@highAlpha.setter
def highAlpha(self, value):
self.__highAlpha = value
# if callback has been set, execute the function
if "highAlpha" in self.callBacksDictionary:
self.callBacksDictionary["highAlpha"](self.__highAlpha)
# lowBeta
@property
def lowBeta(self):
"Get value for lowBeta"
return self.__lowBeta
@lowBeta.setter
def lowBeta(self, value):
self.__lowBeta = value
# if callback has been set, execute the function
if "lowBeta" in self.callBacksDictionary:
self.callBacksDictionary["lowBeta"](self.__lowBeta)
# highBeta
@property
def highBeta(self):
"Get value for highBeta"
return self.__highBeta
@highBeta.setter
def highBeta(self, value):
self.__highBeta = value
# if callback has been set, execute the function
if "highBeta" in self.callBacksDictionary:
self.callBacksDictionary["highBeta"](self.__highBeta)
# lowGamma
@property
def lowGamma(self):
"Get value for lowGamma"
return self.__lowGamma
@lowGamma.setter
def lowGamma(self, value):
self.__lowGamma = value
# if callback has been set, execute the function
if "lowGamma" in self.callBacksDictionary:
self.callBacksDictionary["lowGamma"](self.__lowGamma)
# midGamma
@property
def midGamma(self):
"Get value for midGamma"
return self.__midGamma
@midGamma.setter
def midGamma(self, value):
self.__midGamma = value
# if callback has been set, execute the function
if "midGamma" in self.callBacksDictionary:
self.callBacksDictionary["midGamma"](self.__midGamma)
# poorSignal
@property
def poorSignal(self):
"Get value for poorSignal"
return self.__poorSignal
@poorSignal.setter
def poorSignal(self, value):
self.__poorSignal = value
# if callback has been set, execute the function
if "poorSignal" in self.callBacksDictionary:
self.callBacksDictionary["poorSignal"](self.__poorSignal)
# blinkStrength
@property
def blinkStrength(self):
"Get value for blinkStrength"
return self.__blinkStrength
@blinkStrength.setter
def blinkStrength(self, value):
self.__blinkStrength = value
# if callback has been set, execute the function
if "blinkStrength" in self.callBacksDictionary:
self.callBacksDictionary["blinkStrength"](self.__blinkStrength)
| 38.277083
| 195
| 0.49834
|
6a372a1be2122681cd19d9a990a07ced5ea41733
| 9,497
|
py
|
Python
|
tools/compare_acetz/zptdgenerator.py
|
arkhipenko/AceTime
|
bc6e6aa530e309b62a204b7574322ba013066b06
|
[
"MIT"
] | 1
|
2021-02-23T06:17:36.000Z
|
2021-02-23T06:17:36.000Z
|
tools/compare_acetz/zptdgenerator.py
|
arkhipenko/AceTime
|
bc6e6aa530e309b62a204b7574322ba013066b06
|
[
"MIT"
] | null | null | null |
tools/compare_acetz/zptdgenerator.py
|
arkhipenko/AceTime
|
bc6e6aa530e309b62a204b7574322ba013066b06
|
[
"MIT"
] | null | null | null |
# Copyright 2019 Brian T. Park
#
# MIT License
"""
Implements the TestDataGenerator to generate the validation test data using
acetz, which uses ZoneSpecififer. Pulling in ZoneSpecifier also means that it
pulls in the data structures defined by zonedbpy.
"""
from typing import Dict
from typing import List
from typing import Optional
from typing import cast
import logging
from datetime import tzinfo, datetime, timezone, timedelta
import acetz
from data_types.at_types import SECONDS_SINCE_UNIX_EPOCH
from zone_processor.zone_specifier import ZoneSpecifier
from zone_processor.zone_specifier import DateTuple
from zone_processor.inline_zone_info import ZoneInfoMap
from zonedbpy.zone_infos import ZONE_INFO_MAP
from validation.data import TestItem, TestData, ValidationData
class TestDataGenerator:
"""Generate the validation test data for all zones specified by the
'zone_infos'. The Transitions are extracted from the ZoneSpecifier and the
UTC offsets determined by acetz.
"""
def __init__(
self,
start_year: int,
until_year: int,
sampling_interval: int,
zone_infos: ZoneInfoMap = cast(ZoneInfoMap, ZONE_INFO_MAP),
):
self.start_year = start_year
self.until_year = until_year
self.sampling_interval = timedelta(hours=sampling_interval)
self.zone_infos = zone_infos
self.viewing_months = 14
def create_test_data(self, zones: List[str]) -> None:
test_data: TestData = {}
for zone_name in zones:
test_items = self._create_test_data_for_zone(zone_name)
if test_items:
test_data[zone_name] = test_items
self.test_data = test_data
def get_validation_data(self) -> ValidationData:
return {
'start_year': self.start_year,
'until_year': self.until_year,
'source': 'acetz',
'version': str(acetz.__version__),
'has_valid_abbrev': True,
'has_valid_dst': True,
'test_data': self.test_data,
}
def _create_test_data_for_zone(
self,
zone_name: str,
) -> Optional[List[TestItem]]:
"""Create the TestItems for a specific zone.
"""
logging.info(f"_create_test_items(): {zone_name}")
zone_info = self.zone_infos.get(zone_name)
if not zone_info:
logging.error(f"Zone '{zone_name}' not found in acetz package")
return None
tz = acetz.gettz(zone_name)
zone_specifier = ZoneSpecifier(zone_info)
return self._create_transition_test_items(
zone_name, tz, zone_specifier)
def _create_transition_test_items(
self,
zone_name: str,
tz: tzinfo,
zone_specifier: ZoneSpecifier
) -> List[TestItem]:
"""Create a TestItem for the tz for each zone, for each year from
start_year to until_year, exclusive. The following test samples are
created:
* One test point for each month, on the first of the month.
* One test point for Dec 31, 23:00 for each year.
* A test point at the transition from DST to Standard, or vise versa.
* A test point one second before the transition.
Each TestData is annotated as:
* 'A', 'a': pre-transition
* 'B', 'b': post-transition
* 'S': a monthly test sample
* 'Y': end of year test sample
For [2000, 2038], this generates about 100,000 data points.
"""
items_map: Dict[int, TestItem] = {}
for year in range(self.start_year, self.until_year):
# Skip start_year when viewing months is 36, because it needs data
# for (start_year - 3), but ZoneSpecifier won't generate data for
# years that old.
if self.viewing_months == 36 and year == self.start_year:
continue
# Add samples just before and just after the DST transition.
zone_specifier.init_for_year(year)
for transition in zone_specifier.transitions:
# Some Transitions from ZoneSpecifier are in previous or post
# years (e.g. viewing_months = [14, 36]), so skip those.
start = transition.start_date_time
transition_year = start.y
if transition_year != year:
continue
# If viewing_months== (13 or 36), don't look at Transitions at
# the beginning of the year since those have been already added.
if self.viewing_months in [13, 36]:
if start.M == 1 and start.d == 1 and start.ss == 0:
continue
epoch_seconds = transition.start_epoch_second
# Add a test data just before the transition
test_item = self._create_test_item_from_epoch_seconds(
tz, epoch_seconds - 1, 'A')
self._add_test_item(items_map, test_item)
# Add a test data at the transition itself (which will
# normally be shifted forward or backwards).
test_item = self._create_test_item_from_epoch_seconds(
tz, epoch_seconds, 'B')
self._add_test_item(items_map, test_item)
# Add one sample test point on the first of each month
for month in range(1, 13):
tt = DateTuple(y=year, M=month, d=1, ss=0, f='w')
test_item = self._create_test_item_from_datetime(
tz, tt, type='S')
self._add_test_item(items_map, test_item)
# Add a sample test point at the end of the year.
tt = DateTuple(y=year, M=12, d=31, ss=23 * 3600, f='w')
test_item = self._create_test_item_from_datetime(
tz, tt, type='Y')
self._add_test_item(items_map, test_item)
# Return the TestItems ordered by epoch
return [items_map[x] for x in sorted(items_map)]
def _create_test_item_from_datetime(
self,
tz: tzinfo,
tt: DateTuple,
type: str,
) -> TestItem:
"""Create a TestItem for the given DateTuple in the local time zone.
"""
# TODO(bpark): It is not clear that this produces the desired
# datetime for the given tzinfo if tz is an acetz. But I hope it
# gives a datetime that's roughtly around that time, which is good
# enough for unit testing.
dt = datetime(tt.y, tt.M, tt.d, tt.ss // 3600, tzinfo=tz)
unix_seconds = int(dt.timestamp())
epoch_seconds = unix_seconds - SECONDS_SINCE_UNIX_EPOCH
return self._create_test_item_from_epoch_seconds(
tz, epoch_seconds, type)
def _create_test_item_from_epoch_seconds(
self,
tz: tzinfo,
epoch_seconds: int,
type: str,
) -> TestItem:
"""Determine the expected date and time components for the given
'epoch_seconds' for the given 'tz'. The 'epoch_seconds' is the
transition time calculated by the ZoneSpecifier class (which is the
Python implementation of the C++ ExtendedZoneSpecifier class).
Return the TestItem with the following fields:
epoch: epoch seconds from AceTime epoch (2000-01-01T00:00:00Z)
total_offset: the expected total UTC offset at epoch_seconds
dst_offset: the expected DST offset at epoch_seconds
y, M, d, h, m, s: expected date&time components at epoch_seconds
type: 'a', 'b', 'A', 'B', 'S', 'Y'
"""
# Convert AceTime epoch_seconds to Unix epoch_seconds.
unix_seconds = epoch_seconds + SECONDS_SINCE_UNIX_EPOCH
# Get the transition time, then feed that into acetz to get the
# total offset and DST shift
utc_dt = datetime.fromtimestamp(unix_seconds, tz=timezone.utc)
dt = utc_dt.astimezone(tz)
total_offset = int(dt.utcoffset().total_seconds()) # type: ignore
dst_offset = int(dt.dst().total_seconds()) # type: ignore
assert dt.tzinfo
abbrev = dt.tzinfo.tzname(dt)
return {
'epoch': epoch_seconds,
'total_offset': total_offset,
'dst_offset': dst_offset,
'y': dt.year,
'M': dt.month,
'd': dt.day,
'h': dt.hour,
'm': dt.minute,
's': dt.second,
'abbrev': abbrev,
'type': type,
}
@staticmethod
def _add_test_item(items_map: Dict[int, TestItem], item: TestItem) -> None:
current = items_map.get(item['epoch'])
if current:
# If a duplicate TestItem exists for epoch, then check that the
# data is exactly the same.
if (
current['total_offset'] != item['total_offset']
or current['dst_offset'] != item['dst_offset']
or current['y'] != item['y'] or current['M'] != item['M']
or current['d'] != item['d'] or current['h'] != item['h']
or current['m'] != item['m'] or current['s'] != item['s']
):
raise Exception(f'Item {current} does not match item {item}')
# 'A' and 'B' takes precedence over 'S' or 'Y'
if item['type'] in ['A', 'B']:
items_map[item['epoch']] = item
else:
items_map[item['epoch']] = item
| 39.570833
| 80
| 0.604717
|
cef18d96e6bb5b3bcea5ab0bc764ab562701d926
| 30,773
|
py
|
Python
|
test/functional/test_admin_features.py
|
BalthazarPavot/galaxy_project_reports
|
aa397de11a9a3425f85c701087af4c5d165b571f
|
[
"CC-BY-3.0"
] | 1
|
2019-07-03T08:13:57.000Z
|
2019-07-03T08:13:57.000Z
|
test/functional/test_admin_features.py
|
LainOldAccound/galaxy_project_reports
|
aa397de11a9a3425f85c701087af4c5d165b571f
|
[
"CC-BY-3.0"
] | null | null | null |
test/functional/test_admin_features.py
|
LainOldAccound/galaxy_project_reports
|
aa397de11a9a3425f85c701087af4c5d165b571f
|
[
"CC-BY-3.0"
] | null | null | null |
from base.twilltestcase import TwillTestCase
from functional import database_contexts
import galaxy.model
from base.test_db_util import (
get_user,
get_private_role,
get_all_histories_for_user,
get_latest_history_for_user,
get_default_history_permissions_by_history,
get_latest_dataset,
refresh,
flush,
get_group_by_name,
get_role_by_name,
get_user_group_associations_by_group,
get_default_history_permissions_by_role,
get_default_user_permissions_by_role,
get_user_role_associations_by_role,
get_group_role_associations_by_group,
get_dataset_permissions_by_role,
get_group_role_associations_by_role,
)
# Globals setup by these tests.
regular_user1 = regular_user2 = regular_user3 = admin_user = None
role_one = role_two = role_three = None
group_zero = group_one = group_two = None
class TestDataSecurity( TwillTestCase ):
def test_000_initiate_users( self ):
"""Ensuring all required user accounts exist"""
self.logout()
self.login( email='test1@bx.psu.edu', username='regular-user1' )
global regular_user1
regular_user1 = get_user( 'test1@bx.psu.edu' )
assert regular_user1 is not None, 'Problem retrieving user with email "test1@bx.psu.edu" from the database'
self.logout()
self.login( email='test2@bx.psu.edu', username='regular-user2' )
global regular_user2
regular_user2 = get_user( 'test2@bx.psu.edu' )
assert regular_user2 is not None, 'Problem retrieving user with email "test2@bx.psu.edu" from the database'
self.logout()
self.login( email='test@bx.psu.edu', username='admin-user' )
global admin_user
admin_user = get_user( 'test@bx.psu.edu' )
assert admin_user is not None, 'Problem retrieving user with email "test@bx.psu.edu" from the database'
def test_005_create_new_user_account_as_admin( self ):
"""Testing creating a new user account as admin"""
# Logged in as admin_user
email = 'test3@bx.psu.edu'
password = 'testuser'
# Test setting the user name to one that is already taken. Note that the account must not exist in order
# for this test to work as desired, so the email we're passing is important...
previously_created, username_taken, invalid_username = self.create_new_account_as_admin( email='diff@you.com',
password=password,
username='admin-user',
redirect='' )
if not username_taken:
error_msg = "The public name (%s) is already being used by another user, but no error was displayed" % 'admin-user'
raise AssertionError( error_msg )
# Test setting the user name to an invalid one. Note that the account must not exist in order
# for this test to work as desired, so the email we're passing is important...
previously_created, username_taken, invalid_username = self.create_new_account_as_admin( email='diff@you.com',
password=password,
username='h',
redirect='' )
if not invalid_username:
raise AssertionError( "The public name (%s) is is invalid, but no error was displayed" % 'diff@you.com' )
previously_created, username_taken, invalid_username = self.create_new_account_as_admin( email=email,
password=password,
username='regular-user3',
redirect='' )
# Get the user object for later tests
global regular_user3
regular_user3 = get_user( email )
assert regular_user3 is not None, 'Problem retrieving user with email "%s" from the database' % email
global regular_user3_private_role
regular_user3_private_role = get_private_role( regular_user3 )
# Make sure DefaultUserPermissions were created
if not regular_user3.default_permissions:
raise AssertionError( 'No DefaultUserPermissions were created for user %s when the admin created the account' % email )
# Make sure a private role was created for the user
if not regular_user3.roles:
raise AssertionError( 'No UserRoleAssociations were created for user %s when the admin created the account' % email )
if not previously_created and len( regular_user3.roles ) != 1:
raise AssertionError( '%d UserRoleAssociations were created for user %s when the admin created the account ( should have been 1 )'
% ( len( regular_user3.roles ), regular_user3.email ) )
for ura in regular_user3.roles:
role = database_contexts.galaxy_context.query( galaxy.model.Role ).get( ura.role_id )
if not previously_created and role.type != 'private':
raise AssertionError( 'Role created for user %s when the admin created the account is not private, type is'
% str( role.type ) )
if not previously_created:
# Make sure a history was not created ( previous test runs may have left deleted histories )
histories = get_all_histories_for_user( regular_user3 )
if histories:
raise AssertionError( 'Histories were incorrectly created for user %s when the admin created the account' % email )
# Make sure the user was not associated with any groups
if regular_user3.groups:
raise AssertionError( 'Groups were incorrectly associated with user %s when the admin created the account' % email )
def test_010_reset_password_as_admin( self ):
"""Testing reseting a user password as admin"""
self.reset_password_as_admin( user_id=self.security.encode_id( regular_user3.id ), password='testreset' )
def test_015_login_after_password_reset( self ):
"""Testing logging in after an admin reset a password - tests DefaultHistoryPermissions for accounts created by an admin"""
# logged in as admin_user
self.logout()
self.login( email=regular_user3.email, password='testreset' )
# Make sure a History and HistoryDefaultPermissions exist for the user
latest_history = get_latest_history_for_user( regular_user3 )
if not latest_history.user_id == regular_user3.id:
raise AssertionError( 'A history was not created for user %s when he logged in' % regular_user3.email )
if not latest_history.default_permissions:
raise AssertionError( 'No DefaultHistoryPermissions were created for history id %d when it was created' % latest_history.id )
dhps = get_default_history_permissions_by_history( latest_history )
if len( dhps ) > 1:
raise AssertionError( 'More than 1 DefaultHistoryPermissions were created for history id %d when it was created' % latest_history.id )
dhp = dhps[0]
if not dhp.action == galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DefaultHistoryPermission.action for history id %d is "%s", but it should be "manage permissions"'
% ( latest_history.id, dhp.action ) )
# Upload a file to create a HistoryDatasetAssociation
self.upload_file( '1.bed' )
latest_dataset = get_latest_dataset()
for dp in latest_dataset.actions:
# Should only have 1 DatasetPermissions
if dp.action != galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DatasetPermissions for dataset id %d is %s ( should have been %s )'
% ( latest_dataset.id,
latest_dataset.actions.action,
galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) )
self.logout()
# Reset the password to the default for later tests
self.login( email='test@bx.psu.edu' )
self.reset_password_as_admin( user_id=self.security.encode_id( regular_user3.id ), password='testuser' )
def test_020_mark_user_deleted( self ):
"""Testing marking a user account as deleted"""
# Logged in as admin_user
self.mark_user_deleted( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
if not regular_user3.active_histories:
raise AssertionError( 'HistoryDatasetAssociations for regular_user3 were incorrectly deleted when the user was marked deleted' )
def test_025_undelete_user( self ):
"""Testing undeleting a user account"""
# Logged in as admin_user
self.undelete_user( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
def test_030_create_role( self ):
"""Testing creating new role with 3 members ( and a new group named the same ), then renaming the role"""
# Logged in as admin_user
name = 'Role One'
description = "This is Role Ones description"
in_user_ids = [ str( admin_user.id ), str( regular_user1.id ), str( regular_user3.id ) ]
in_group_ids = []
# Add 1 to the number of associated groups since we are creating a new one with the same name as the role
num_gras = len( in_group_ids ) + 1
self.create_role( name=name,
description=description,
in_user_ids=in_user_ids,
in_group_ids=in_group_ids,
create_group_for_role='yes',
private_role=admin_user.email,
strings_displayed=[ "Role '%s' has been created with %d associated users and %d associated groups." % ( name, len( in_user_ids ), num_gras ),
"One of the groups associated with this role is the newly created group with the same name." ] )
# Get the role object for later tests
global role_one
role_one = database_contexts.galaxy_context.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name == name ).first()
assert role_one is not None, 'Problem retrieving role named "Role One" from the database'
# Make sure UserRoleAssociations are correct
if len( role_one.users ) != len( in_user_ids ):
raise AssertionError( '%d UserRoleAssociations were created for role id %d when it was created ( should have been %d )'
% ( len( role_one.users ), role_one.id, len( in_user_ids ) ) )
# Each of the following users should now have 2 role associations, their private role and role_one
for user in [ admin_user, regular_user1, regular_user3 ]:
refresh( user )
if len( user.roles ) != 2:
raise AssertionError( '%d UserRoleAssociations are associated with user %s ( should be 2 )'
% ( len( user.roles ), user.email ) )
# Make sure the group was created
self.visit_url( '%s/admin/groups' % self.url )
self.check_page_for_string( name )
global group_zero
group_zero = get_group_by_name( name )
# Rename the role
rename = "Role One's been Renamed"
new_description = "This is Role One's Re-described"
self.rename_role( self.security.encode_id( role_one.id ), name=rename, description=new_description )
self.visit_url( '%s/admin/roles' % self.url )
self.check_page_for_string( rename )
self.check_page_for_string( new_description )
# Reset the role back to the original name and description
self.rename_role( self.security.encode_id( role_one.id ), name=name, description=description )
def test_035_create_group( self ):
"""Testing creating new group with 3 members and 2 associated roles, then renaming it"""
# Logged in as admin_user
name = "Group One's Name"
in_user_ids = [ str( admin_user.id ), str( regular_user1.id ), str( regular_user3.id ) ]
in_role_ids = [ str( role_one.id ) ]
# The number of GroupRoleAssociations should be 2, role_one and the newly created role named 'Group One's Name'
num_gras = len( in_role_ids ) + 1
self.create_group( name=name,
in_user_ids=in_user_ids,
in_role_ids=in_role_ids,
create_role_for_group=True,
strings_displayed=[ "Group '%s' has been created with %d associated users and %d associated roles." % ( name, len( in_user_ids ), num_gras ),
"One of the roles associated with this group is the newly created role with the same name." ] )
# Get the group object for later tests
global group_one
group_one = get_group_by_name( name )
assert group_one is not None, 'Problem retrieving group named "Group One" from the database'
# Make sure UserGroupAssociations are correct
if len( group_one.users ) != len( in_user_ids ):
raise AssertionError( '%d UserGroupAssociations were created for group id %d when it was created ( should have been %d )'
% ( len( group_one.users ), group_one.id, len( in_user_ids ) ) )
# Each user should now have 1 group association, group_one
for user in [ admin_user, regular_user1, regular_user3 ]:
refresh( user )
if len( user.groups ) != 1:
raise AssertionError( '%d UserGroupAssociations are associated with user %s ( should be 1 )' % ( len( user.groups ), user.email ) )
# Make sure GroupRoleAssociations are correct
if len( group_one.roles ) != num_gras:
raise AssertionError( '%d GroupRoleAssociations were created for group id %d when it was created ( should have been %d )'
% ( len( group_one.roles ), group_one.id, num_gras ) )
# Rename the group
rename = "Group One's been Renamed"
self.rename_group( self.security.encode_id( group_one.id ), name=rename, )
self.visit_url( '%s/admin/groups' % self.url )
self.check_page_for_string( rename )
# Reset the group back to the original name
self.rename_group( self.security.encode_id( group_one.id ), name=name )
def test_040_add_members_and_role_to_group( self ):
"""Testing editing user membership and role associations of an existing group"""
# Logged in as admin_user
name = 'Group Two'
self.create_group( name=name, in_user_ids=[], in_role_ids=[] )
# Get the group object for later tests
global group_two
group_two = get_group_by_name( name )
assert group_two is not None, 'Problem retrieving group named "Group Two" from the database'
# group_two should have no associations
if group_two.users:
raise AssertionError( '%d UserGroupAssociations were created for group id %d when it was created ( should have been 0 )'
% ( len( group_two.users ), group_two.id ) )
if group_two.roles:
raise AssertionError( '%d GroupRoleAssociations were created for group id %d when it was created ( should have been 0 )'
% ( len( group_two.roles ), group_two.id ) )
user_ids = [ str( regular_user1.id ) ]
role_ids = [ str( role_one.id ) ]
self.associate_users_and_roles_with_group( self.security.encode_id( group_two.id ),
group_two.name,
user_ids=user_ids,
role_ids=role_ids )
def test_045_create_role_with_user_and_group_associations( self ):
"""Testing creating a role with user and group associations"""
# Logged in as admin_user
# NOTE: To get this to work with twill, all select lists on the ~/admin/role page must contain at least
# 1 option value or twill throws an exception, which is: ParseError: OPTION outside of SELECT
# Due to this bug in twill, we create the role, we bypass the page and visit the URL in the
# associate_users_and_groups_with_role() method.
name = 'Role Two'
description = 'This is Role Two'
user_ids = [ str( admin_user.id ) ]
group_ids = [ str( group_two.id ) ]
private_role = admin_user.email
# Create the role
self.create_role( name=name,
description=description,
in_user_ids=user_ids,
in_group_ids=group_ids,
private_role=private_role )
# Get the role object for later tests
global role_two
role_two = get_role_by_name( name )
assert role_two is not None, 'Problem retrieving role named "Role Two" from the database'
# Make sure UserRoleAssociations are correct
if len( role_two.users ) != len( user_ids ):
raise AssertionError( '%d UserRoleAssociations were created for role id %d when it was created with %d members'
% ( len( role_two.users ), role_two.id, len( user_ids ) ) )
# admin_user should now have 3 role associations, private role, role_one, role_two
refresh( admin_user )
if len( admin_user.roles ) != 3:
raise AssertionError( '%d UserRoleAssociations are associated with user %s ( should be 3 )' % ( len( admin_user.roles ), admin_user.email ) )
# Make sure GroupRoleAssociations are correct
refresh( role_two )
if len( role_two.groups ) != len( group_ids ):
raise AssertionError( '%d GroupRoleAssociations were created for role id %d when it was created ( should have been %d )'
% ( len( role_two.groups ), role_two.id, len( group_ids ) ) )
# group_two should now be associated with 2 roles: role_one, role_two
refresh( group_two )
if len( group_two.roles ) != 2:
raise AssertionError( '%d GroupRoleAssociations are associated with group id %d ( should be 2 )' % ( len( group_two.roles ), group_two.id ) )
def test_050_change_user_role_associations( self ):
"""Testing changing roles associated with a user"""
# Logged in as admin_user
# Create a new role with no associations
name = 'Role Three'
description = 'This is Role Three'
user_ids = []
group_ids = []
private_role = admin_user.email
self.create_role( name=name,
description=description,
in_user_ids=user_ids,
in_group_ids=group_ids,
private_role=private_role )
# Get the role object for later tests
global role_three
role_three = get_role_by_name( name )
assert role_three is not None, 'Problem retrieving role named "Role Three" from the database'
# Associate the role with a user
refresh( admin_user )
role_ids = []
for ura in admin_user.non_private_roles:
role_ids.append( str( ura.role_id ) )
role_ids.append( str( role_three.id ) )
group_ids = []
for uga in admin_user.groups:
group_ids.append( str( uga.group_id ) )
strings_displayed = [ "User '%s' has been updated with %d associated roles and %d associated groups" %
( admin_user.email, len( role_ids ), len( group_ids ) ) ]
self.manage_roles_and_groups_for_user( self.security.encode_id( admin_user.id ),
in_role_ids=role_ids,
in_group_ids=group_ids,
strings_displayed=strings_displayed )
refresh( admin_user )
# admin_user should now be associated with 4 roles: private, role_one, role_two, role_three
if len( admin_user.roles ) != 4:
raise AssertionError( '%d UserRoleAssociations are associated with %s ( should be 4 )' %
( len( admin_user.roles ), admin_user.email ) )
def test_055_mark_group_deleted( self ):
"""Testing marking a group as deleted"""
# Logged in as admin_user
self.browse_groups( strings_displayed=[ group_two.name ] )
self.mark_group_deleted( self.security.encode_id( group_two.id ), group_two.name )
refresh( group_two )
if not group_two.deleted:
raise AssertionError( '%s was not correctly marked as deleted.' % group_two.name )
# Deleting a group should not delete any associations
if not group_two.members:
raise AssertionError( '%s incorrectly lost all members when it was marked as deleted.' % group_two.name )
if not group_two.roles:
raise AssertionError( '%s incorrectly lost all role associations when it was marked as deleted.' % group_two.name )
def test_060_undelete_group( self ):
"""Testing undeleting a deleted group"""
# Logged in as admin_user
self.undelete_group( self.security.encode_id( group_two.id ), group_two.name )
refresh( group_two )
if group_two.deleted:
raise AssertionError( '%s was not correctly marked as not deleted.' % group_two.name )
def test_065_mark_role_deleted( self ):
"""Testing marking a role as deleted"""
# Logged in as admin_user
self.browse_roles( strings_displayed=[ role_two.name ] )
self.mark_role_deleted( self.security.encode_id( role_two.id ), role_two.name )
refresh( role_two )
if not role_two.deleted:
raise AssertionError( '%s was not correctly marked as deleted.' % role_two.name )
# Deleting a role should not delete any associations
if not role_two.users:
raise AssertionError( '%s incorrectly lost all user associations when it was marked as deleted.' % role_two.name )
if not role_two.groups:
raise AssertionError( '%s incorrectly lost all group associations when it was marked as deleted.' % role_two.name )
def test_070_undelete_role( self ):
"""Testing undeleting a deleted role"""
# Logged in as admin_user
self.undelete_role( self.security.encode_id( role_two.id ), role_two.name )
def test_075_purge_user( self ):
"""Testing purging a user account"""
# Logged in as admin_user
self.mark_user_deleted( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
refresh( regular_user3 )
self.purge_user( self.security.encode_id( regular_user3.id ), regular_user3.email )
refresh( regular_user3 )
if not regular_user3.purged:
raise AssertionError( 'User %s was not marked as purged.' % regular_user3.email )
# Make sure DefaultUserPermissions deleted EXCEPT FOR THE PRIVATE ROLE
if len( regular_user3.default_permissions ) != 1:
raise AssertionError( 'DefaultUserPermissions for user %s were not deleted.' % regular_user3.email )
for dup in regular_user3.default_permissions:
role = database_contexts.galaxy_context.query( galaxy.model.Role ).get( dup.role_id )
if role.type != 'private':
raise AssertionError( 'DefaultUserPermissions for user %s are not related with the private role.' % regular_user3.email )
# Make sure History deleted
for history in regular_user3.histories:
refresh( history )
if not history.deleted:
raise AssertionError( 'User %s has active history id %d after their account was marked as purged.' % ( regular_user3.email, history.id ) )
# NOTE: Not all hdas / datasets will be deleted at the time a history is deleted - the cleanup_datasets.py script
# is responsible for this.
# Make sure UserGroupAssociations deleted
if regular_user3.groups:
raise AssertionError( 'User %s has active group after their account was marked as purged.' % ( regular_user3.email ) )
# Make sure UserRoleAssociations deleted EXCEPT FOR THE PRIVATE ROLE
if len( regular_user3.roles ) != 1:
raise AssertionError( 'UserRoleAssociations for user %s were not deleted.' % regular_user3.email )
for ura in regular_user3.roles:
role = database_contexts.galaxy_context.query( galaxy.model.Role ).get( ura.role_id )
if role.type != 'private':
raise AssertionError( 'UserRoleAssociations for user %s are not related with the private role.' % regular_user3.email )
def test_080_manually_unpurge_user( self ):
"""Testing manually un-purging a user account"""
# Logged in as admin_user
# Reset the user for later test runs. The user's private Role and DefaultUserPermissions for that role
# should have been preserved, so all we need to do is reset purged and deleted.
# TODO: If we decide to implement the GUI feature for un-purging a user, replace this with a method call
regular_user3.purged = False
regular_user3.deleted = False
flush( regular_user3 )
def test_085_purge_group( self ):
"""Testing purging a group"""
# Logged in as admin_user
self.mark_group_deleted( self.security.encode_id( group_two.id ), group_two.name )
self.purge_group( self.security.encode_id( group_two.id ), group_two.name )
# Make sure there are no UserGroupAssociations
if get_user_group_associations_by_group( group_two ):
raise AssertionError( "Purging the group did not delete the UserGroupAssociations for group_id '%s'" % group_two.id )
# Make sure there are no GroupRoleAssociations
if get_group_role_associations_by_group( group_two ):
raise AssertionError( "Purging the group did not delete the GroupRoleAssociations for group_id '%s'" % group_two.id )
# Undelete the group for later test runs
self.undelete_group( self.security.encode_id( group_two.id ), group_two.name )
def test_090_purge_role( self ):
"""Testing purging a role"""
# Logged in as admin_user
self.mark_role_deleted( self.security.encode_id( role_two.id ), role_two.name )
self.purge_role( self.security.encode_id( role_two.id ), role_two.name )
# Make sure there are no UserRoleAssociations
if get_user_role_associations_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the UserRoleAssociations for role_id '%s'" % role_two.id )
# Make sure there are no DefaultUserPermissions associated with the Role
if get_default_user_permissions_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the DefaultUserPermissions for role_id '%s'" % role_two.id )
# Make sure there are no DefaultHistoryPermissions associated with the Role
if get_default_history_permissions_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the DefaultHistoryPermissions for role_id '%s'" % role_two.id )
# Make sure there are no GroupRoleAssociations
if get_group_role_associations_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the GroupRoleAssociations for role_id '%s'" % role_two.id )
# Make sure there are no DatasetPermissionss
if get_dataset_permissions_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the DatasetPermissionss for role_id '%s'" % role_two.id )
def test_095_manually_unpurge_role( self ):
"""Testing manually un-purging a role"""
# Logged in as admin_user
# Manually unpurge, then undelete the role for later test runs
# TODO: If we decide to implement the GUI feature for un-purging a role, replace this with a method call
role_two.purged = False
flush( role_two )
self.undelete_role( self.security.encode_id( role_two.id ), role_two.name )
def test_999_reset_data_for_later_test_runs( self ):
"""Reseting data to enable later test runs to pass"""
# Logged in as admin_user
##################
# Eliminate all non-private roles
##################
for role in [ role_one, role_two, role_three ]:
self.mark_role_deleted( self.security.encode_id( role.id ), role.name )
self.purge_role( self.security.encode_id( role.id ), role.name )
# Manually delete the role from the database
refresh( role )
database_contexts.galaxy_context.delete( role )
database_contexts.galaxy_context.flush()
##################
# Eliminate all groups
##################
for group in [ group_zero, group_one, group_two ]:
self.mark_group_deleted( self.security.encode_id( group.id ), group.name )
self.purge_group( self.security.encode_id( group.id ), group.name )
# Manually delete the group from the database
refresh( group )
database_contexts.galaxy_context.delete( group )
database_contexts.galaxy_context.flush()
##################
# Make sure all users are associated only with their private roles
##################
for user in [ admin_user, regular_user1, regular_user2, regular_user3 ]:
refresh( user )
if len( user.roles) != 1:
raise AssertionError( '%d UserRoleAssociations are associated with %s ( should be 1 )' % ( len( user.roles ), user.email ) )
| 61.423154
| 168
| 0.63052
|
b530c46e9803b7106220deecd74308df3a4ac3a0
| 802
|
py
|
Python
|
sa/profiles/Eltex/MA4000/get_config.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
sa/profiles/Eltex/MA4000/get_config.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
sa/profiles/Eltex/MA4000/get_config.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Eltex.MA4000.get_config
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetconfig import IGetConfig
class Script(BaseScript):
name = "Eltex.MA4000.get_config"
interface = IGetConfig
def execute_cli(self, policy="r"):
assert policy in ("r", "s")
if policy == "s":
config = self.cli("show startup-config")
else:
config = self.cli("show running-config")
return self.cleaned_config(config)
| 32.08
| 71
| 0.477556
|
a6bf774297d147396daa8fc783252e19cd48dc6c
| 1,833
|
py
|
Python
|
cities/admin.py
|
swappsco/django-cities
|
b74c42af7d236988a86f5bebb9e7aea6dc88a405
|
[
"MIT"
] | null | null | null |
cities/admin.py
|
swappsco/django-cities
|
b74c42af7d236988a86f5bebb9e7aea6dc88a405
|
[
"MIT"
] | null | null | null |
cities/admin.py
|
swappsco/django-cities
|
b74c42af7d236988a86f5bebb9e7aea6dc88a405
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
class CitiesAdmin(admin.ModelAdmin):
raw_id_fields = ['alt_names']
class CountryAdmin(CitiesAdmin):
list_display = ['name', 'code', 'code3', 'tld', 'phone', 'continent', 'area', 'population']
search_fields = ['name', 'code', 'code3', 'tld', 'phone']
admin.site.register(Country, CountryAdmin)
class RegionAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'code', 'country']
search_fields = ['name', 'name_std', 'code']
admin.site.register(Region, RegionAdmin)
class SubregionAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'code', 'region']
search_fields = ['name', 'name_std', 'code']
raw_id_fields = ['alt_names', 'region']
admin.site.register(Subregion, SubregionAdmin)
class CityAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'subregion', 'region', 'country', 'population']
search_fields = ['name', 'name_std']
raw_id_fields = ['alt_names', 'region', 'subregion']
admin.site.register(City, CityAdmin)
class DistrictAdmin(CitiesAdmin):
raw_id_fields = ['alt_names', 'city']
list_display = ['name_std', 'city']
search_fields = ['name', 'name_std']
admin.site.register(District, DistrictAdmin)
class AltNameAdmin(admin.ModelAdmin):
ordering = ['name']
list_display = ['name', 'language', 'is_preferred', 'is_short']
list_filter = ['is_preferred', 'is_short', 'language']
search_fields = ['name']
admin.site.register(AlternativeName, AltNameAdmin)
class PostalCodeAdmin(CitiesAdmin):
ordering = ['code']
list_display = ['code', 'subregion_name', 'region_name', 'country']
search_fields = ['code', 'country__name', 'region_name', 'subregion_name']
admin.site.register(PostalCode, PostalCodeAdmin)
| 32.157895
| 95
| 0.68958
|
74f5009581e43f201202e5e66b19b3f850e27aff
| 5,635
|
py
|
Python
|
examples/src/main/python/sql/basic.py
|
ericl/spark
|
f5ea7fe53974a7e8cbfc222b9a6f47669b53ccfd
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 1
|
2016-07-29T06:38:10.000Z
|
2016-07-29T06:38:10.000Z
|
examples/src/main/python/sql/basic.py
|
hgd250/spark
|
274f3b9ec86e4109c7678eef60f990d41dc3899f
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null |
examples/src/main/python/sql/basic.py
|
hgd250/spark
|
274f3b9ec86e4109c7678eef60f990d41dc3899f
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on:init_session$
from pyspark.sql import SparkSession
# $example off:init_session$
# $example on:schema_inferring$
from pyspark.sql import Row
# $example off:schema_inferring$
# $example on:programmatic_schema$
# Import data types
from pyspark.sql.types import *
# $example off:programmatic_schema$
"""
A simple example demonstrating basic Spark SQL features.
Run with:
./bin/spark-submit examples/src/main/python/sql/basic.py
"""
def basic_df_example(spark):
# $example on:create_df$
# spark is an existing SparkSession
df = spark.read.json("examples/src/main/resources/people.json")
# Displays the content of the DataFrame to stdout
df.show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:create_df$
# $example on:untyped_ops$
# spark, df are from the previous example
# Print the schema in a tree format
df.printSchema()
# root
# |-- age: long (nullable = true)
# |-- name: string (nullable = true)
# Select only the "name" column
df.select("name").show()
# +-------+
# | name|
# +-------+
# |Michael|
# | Andy|
# | Justin|
# +-------+
# Select everybody, but increment the age by 1
df.select(df['name'], df['age'] + 1).show()
# +-------+---------+
# | name|(age + 1)|
# +-------+---------+
# |Michael| null|
# | Andy| 31|
# | Justin| 20|
# +-------+---------+
# Select people older than 21
df.filter(df['age'] > 21).show()
# +---+----+
# |age|name|
# +---+----+
# | 30|Andy|
# +---+----+
# Count people by age
df.groupBy("age").count().show()
# +----+-----+
# | age|count|
# +----+-----+
# | 19| 1|
# |null| 1|
# | 30| 1|
# +----+-----+
# $example off:untyped_ops$
# $example on:run_sql$
# Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("people")
sqlDF = spark.sql("SELECT * FROM people")
sqlDF.show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:run_sql$
def schema_inference_example(spark):
# $example on:schema_inferring$
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
people = parts.map(lambda p: Row(name=p[0], age=int(p[1])))
# Infer the schema, and register the DataFrame as a table.
schemaPeople = spark.createDataFrame(people)
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
teenagers = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
# The results of SQL queries are Dataframe objects.
# rdd returns the content as an :class:`pyspark.RDD` of :class:`Row`.
teenNames = teenagers.rdd.map(lambda p: "Name: " + p.name).collect()
for name in teenNames:
print(name)
# Name: Justin
# $example off:schema_inferring$
def programmatic_schema_example(spark):
# $example on:programmatic_schema$
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
# Each line is converted to a tuple.
people = parts.map(lambda p: (p[0], p[1].strip()))
# The schema is encoded in a string.
schemaString = "name age"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
schema = StructType(fields)
# Apply the schema to the RDD.
schemaPeople = spark.createDataFrame(people, schema)
# Creates a temporary view using the DataFrame
schemaPeople.createOrReplaceTempView("people")
# Creates a temporary view using the DataFrame
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
results = spark.sql("SELECT name FROM people")
results.show()
# +-------+
# | name|
# +-------+
# |Michael|
# | Andy|
# | Justin|
# +-------+
# $example off:programmatic_schema$
if __name__ == "__main__":
# $example on:init_session$
spark = SparkSession \
.builder \
.appName("PythonSQL") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# $example off:init_session$
basic_df_example(spark)
schema_inference_example(spark)
programmatic_schema_example(spark)
spark.stop()
| 28.897436
| 97
| 0.614552
|
be779c8e4e305bea52eb1ac32f9f8245589b473f
| 1,290
|
py
|
Python
|
cnn/notebooks/sweeper.py
|
Wentong-DST/mdnet
|
13810d6f8785483b4d72a6c6459faca70b89750d
|
[
"MIT"
] | 1,367
|
2016-05-24T01:10:10.000Z
|
2022-03-27T08:04:23.000Z
|
cnn/notebooks/sweeper.py
|
Wentong-DST/mdnet
|
13810d6f8785483b4d72a6c6459faca70b89750d
|
[
"MIT"
] | 55
|
2016-05-24T06:12:06.000Z
|
2021-08-12T06:42:43.000Z
|
cnn/notebooks/sweeper.py
|
Wentong-DST/mdnet
|
13810d6f8785483b4d72a6c6459faca70b89750d
|
[
"MIT"
] | 340
|
2016-05-24T02:15:26.000Z
|
2022-03-23T22:55:08.000Z
|
import json
import numpy as np
def loadLog(filename):
s = []
for line in open(filename):
r = line.find('json_stats')
if r > -1:
s.append(json.loads(line[r+12:]))
return s
def findSweepParams(frames):
def findConstants(frame):
keys = dict()
for key in frame.keys():
v = np.asarray(frame[key])
u = np.copy(v)
u.fill(v[0])
if np.array_equal(v, u):
keys[key] = v[0]
return keys
changing = dict()
for frame in frames:
for k, v in findConstants(frame).items():
if isinstance(v, list):
v = json.dumps(v)
if k not in changing:
changing[k] = {v}
else:
changing[k].add(v)
all_keys = []
for k, v in changing.items():
if len(v) > 1:
all_keys.append(k)
return sorted(all_keys)
def generateLegend(frame, sweeps):
s = ''
for key in sweeps:
if key not in frame:
s = s + key + '=not present, '
else:
s = s + key + '=' + str(frame[key][0]) + ', '
return s
def generateLegends(frames):
params = findSweepParams(frames)
return [generateLegend(frame, params) for frame in frames]
| 24.807692
| 62
| 0.507752
|
3010f5d8bfc821b373dca81d0a64e12eff8d2eab
| 2,011
|
py
|
Python
|
QR_code_module.py
|
RohiBaner/QR-Code-Detection-in-Video
|
cbc4d100553679b63835b35203e382eb70723507
|
[
"Apache-2.0"
] | null | null | null |
QR_code_module.py
|
RohiBaner/QR-Code-Detection-in-Video
|
cbc4d100553679b63835b35203e382eb70723507
|
[
"Apache-2.0"
] | null | null | null |
QR_code_module.py
|
RohiBaner/QR-Code-Detection-in-Video
|
cbc4d100553679b63835b35203e382eb70723507
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 20 11:38:24 2019
@author: Rohini
"""
from __future__ import print_function
import pyzbar.pyzbar as pyzbar
import numpy as np
import cv2
def decode(im) :
# Find barcodes and QR codes
# thresholds image to white in back then invert it to black in white
# try to just the BGR values of inRange to get the best result
qr_data = []
decodedObjects = pyzbar.decode(im)
# Print results
for obj in decodedObjects:
if obj.data not in qr_data:
qr_data.append(obj.data)
# print('Data : ', obj.data,'\n')
# print('Type : ', obj.type)
# print('Data : ', obj.data,'\n')
return decodedObjects, qr_data
# Display barcode and QR code location
def display(im, decodedObjects):
# Loop over all decoded objects
for decodedObject in decodedObjects:
points = decodedObject.polygon
# If the points do not form a quad, find convex hull
if len(points) > 4 :
hull = cv2.convexHull(np.array([point for point in points], dtype=np.float32))
hull = list(map(tuple, np.squeeze(hull)))
else :
hull = points;
# # Number of points in the convex hull
# n = len(hull)
# # Draw the convext hull
# for j in range(0,n):
# cv2.line(im, hull[j], hull[ (j+1) % n], (255,0,0), 3)
# draw text
x = decodedObject.rect.left
y = decodedObject.rect.top
w = decodedObject.rect.width
h = decodedObject.rect.height
# draw bounding box
cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,255),2)
barCode = str(decodedObject.data)
cv2.putText(im, barCode, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 1, cv2.LINE_AA)
# Main
if __name__ == '__main__':
# Read image
im = cv2.imread('QR_codes_4.PNG') #This image contains both bar codes and QR codes, but the ouput is able to detect only the QR codes
decodedObjects = decode(im)
display(im, decodedObjects)
| 29.144928
| 136
| 0.623073
|
7bfcef6982a1fbcbabbd2808340a0ee7b8a09bf8
| 499
|
py
|
Python
|
analyzer/windows/modules/packages/msi.py
|
Yuanmessi/Bold-Falcon
|
00fcaba0b3d9c462b9d20ecb256ff85db5d119e2
|
[
"BSD-3-Clause"
] | 71
|
2016-11-13T03:26:45.000Z
|
2022-02-22T08:13:04.000Z
|
data/analyzer/windows/modules/packages/msi.py
|
iswenhao/Panda-Sandbox
|
a04069d404cb4326ff459e703f14625dc45759ed
|
[
"MIT"
] | 3
|
2021-07-01T08:09:05.000Z
|
2022-01-28T03:38:36.000Z
|
data/analyzer/windows/modules/packages/msi.py
|
iswenhao/Panda-Sandbox
|
a04069d404cb4326ff459e703f14625dc45759ed
|
[
"MIT"
] | 36
|
2016-12-13T11:37:56.000Z
|
2021-11-11T12:20:10.000Z
|
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.common.abstracts import Package
class Msi(Package):
"""MSI analysis package."""
PATHS = [
("System32", "msiexec.exe"),
]
def start(self, path):
msi_path = self.get_path("msiexec.exe")
return self.execute(msi_path, args=["/I", path])
| 27.722222
| 68
| 0.665331
|
94da7fbe2c9ea0aa22f59e521a9c4c8083bd989f
| 7,032
|
py
|
Python
|
py/server/deephaven/dtypes.py
|
jjbrosnan/deephaven-core
|
ff5f17c1a835756bab827d7cc6caee599738da69
|
[
"MIT"
] | null | null | null |
py/server/deephaven/dtypes.py
|
jjbrosnan/deephaven-core
|
ff5f17c1a835756bab827d7cc6caee599738da69
|
[
"MIT"
] | null | null | null |
py/server/deephaven/dtypes.py
|
jjbrosnan/deephaven-core
|
ff5f17c1a835756bab827d7cc6caee599738da69
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending
#
""" This module defines the data types supported by the Deephaven engine.
Each data type is represented by a DType class which supports creating arrays of the same type and more.
"""
from __future__ import annotations
from typing import Any, Sequence, Callable, Dict, Type
import jpy
import numpy as np
from deephaven import DHError
_JQstType = jpy.get_type("io.deephaven.qst.type.Type")
_JTableTools = jpy.get_type("io.deephaven.engine.util.TableTools")
_j_name_type_map: Dict[str, DType] = {}
def _qst_custom_type(cls_name: str):
return _JQstType.find(_JTableTools.typeFromName(cls_name))
class DType:
""" A class representing a data type in Deephaven."""
def __init__(self, j_name: str, j_type: Type = None, qst_type: jpy.JType = None, is_primitive: bool = False,
np_type: Any = np.object_):
"""
Args:
j_name (str): the full qualified name of the Java class
j_type (Type): the mapped Python class created by JPY
qst_type (JType): the JPY wrapped object for a instance of QST Type
is_primitive (bool): whether this instance represents a primitive Java type
np_type (Any): an instance of numpy dtype (dtype("int64") or numpy class (e.g. np.int16), default is
np.object_
"""
self.j_name = j_name
self.j_type = j_type if j_type else jpy.get_type(j_name)
self.qst_type = qst_type if qst_type else _qst_custom_type(j_name)
self.is_primitive = is_primitive
self.np_type = np_type
_j_name_type_map[j_name] = self
def __repr__(self):
return self.j_name
def __call__(self, *args, **kwargs):
if self.is_primitive:
raise DHError(message=f"primitive type {self.j_name} is not callable.")
try:
return self.j_type(*args, **kwargs)
except Exception as e:
raise DHError(e, f"failed to create an instance of {self.j_name}") from e
bool_ = DType(j_name="java.lang.Boolean", qst_type=_JQstType.booleanType(), np_type=np.bool_)
"""Boolean type"""
byte = DType(j_name="byte", qst_type=_JQstType.byteType(), is_primitive=True, np_type=np.int8)
"""Signed byte integer type"""
int8 = byte
"""Signed byte integer type"""
short = DType(j_name="short", qst_type=_JQstType.shortType(), is_primitive=True, np_type=np.int16)
"""Signed short integer type"""
int16 = short
"""Signed short integer type"""
char = DType(j_name="char", qst_type=_JQstType.charType(), is_primitive=True, np_type=np.dtype('uint16'))
"""Character type"""
int32 = DType(j_name="int", qst_type=_JQstType.intType(), is_primitive=True, np_type=np.int32)
"""Signed 32bit integer type"""
long = DType(j_name="long", qst_type=_JQstType.longType(), is_primitive=True, np_type=np.int64)
"""Signed 64bit integer type"""
int64 = long
"""Signed 64bit integer type"""
int_ = long
"""Signed 64bit integer type"""
float32 = DType(j_name="float", qst_type=_JQstType.floatType(), is_primitive=True, np_type=np.float32)
"""Single-precision floating-point number type"""
single = float32
"""Single-precision floating-point number type"""
float64 = DType(j_name="double", qst_type=_JQstType.doubleType(), is_primitive=True, np_type=np.float64)
"""Double-precision floating-point number type"""
double = float64
"""Double-precision floating-point number type"""
float_ = float64
"""Double-precision floating-point number type"""
string = DType(j_name="java.lang.String", qst_type=_JQstType.stringType())
"""String type"""
BigDecimal = DType(j_name="java.math.BigDecimal")
"""Java BigDecimal type"""
StringSet = DType(j_name="io.deephaven.stringset.StringSet")
"""Deephaven StringSet type"""
DateTime = DType(j_name="io.deephaven.time.DateTime", np_type=np.dtype("datetime64[ns]"))
"""Deephaven DateTime type"""
Period = DType(j_name="io.deephaven.time.Period")
"""Deephaven time period type"""
PyObject = DType(j_name="org.jpy.PyObject")
"""Python object type"""
JObject = DType(j_name="java.lang.Object")
"""Java Object type"""
byte_array = DType(j_name='[B')
"""Byte array type"""
int8_array = byte_array
"""Byte array type"""
short_array = DType(j_name='[S')
"""Short array type"""
int16_array = short_array
"""Short array type"""
int32_array = DType(j_name='[I')
"""32bit integer array type"""
long_array = DType(j_name='[J')
"""64bit integer array type"""
int64_array = long_array
"""64bit integer array type"""
int_array = long_array
"""64bit integer array type"""
single_array = DType(j_name='[S')
"""Single-precision floating-point array type"""
float32_array = single_array
"""Single-precision floating-point array type"""
double_array = DType(j_name='[D')
"""Double-precision floating-point array type"""
float64_array = double_array
"""Double-precision floating-point array type"""
float_array = double_array
"""Double-precision floating-point array type"""
string_array = DType(j_name='[Ljava.lang.String;')
"""Java String array type"""
datetime_array = DType(j_name='[Lio.deephaven.time.DateTime;')
"""Deephaven DateTime array type"""
def array(dtype: DType, seq: Sequence, remap: Callable[[Any], Any] = None) -> jpy.JType:
""" Creates a Java array of the specified data type populated with values from a sequence.
Note:
this method does unsafe casting, meaning precision and values might be lost with down cast
Args:
dtype (DType): the component type of the array
seq (Sequence): a sequence of compatible data, e.g. list, tuple, numpy array, Pandas series, etc.
remap (optional): a callable that takes one value and maps it to another, for handling the translation of
special DH values such as NULL_INT, NAN_INT between Python and the DH engine
Returns:
a Java array
Raises:
DHError
"""
try:
if remap:
if not callable(remap):
raise ValueError("Not a callable")
seq = [remap(v) for v in seq]
else:
if isinstance(seq, str) and dtype == char:
return array(char, seq, remap=ord)
return jpy.array(dtype.j_type, seq)
except Exception as e:
raise DHError(e, f"failed to create a Java {dtype.j_name} array.") from e
def from_jtype(j_class: Any) -> DType:
""" look up a DType that matches the java type, if not found, create a DType for it. """
if not j_class:
return None
j_name = j_class.getName()
dtype = _j_name_type_map.get(j_name)
if not dtype:
return DType(j_name=j_name, j_type=j_class, np_type=np.object_)
else:
return dtype
def from_np_dtype(np_dtype: np.dtype) -> DType:
""" Look up a DType that matches the numpy.dtype, if not found, return PyObject. """
if np_dtype.kind in {'U', 'S'}:
return string
for _, dtype in _j_name_type_map.items():
if np.dtype(dtype.np_type) == np_dtype and dtype.np_type != np.object_:
return dtype
return PyObject
| 36.247423
| 113
| 0.689278
|
a3e29d506a52cce4d97bb9bf2cc8e514a0215c09
| 5,415
|
py
|
Python
|
quantize.py
|
Tiamat-Tech/ZAQ-code
|
e7e9f55791e36c6784d58c356d3ced76a7583369
|
[
"MIT"
] | 55
|
2021-03-30T01:30:46.000Z
|
2022-03-30T03:05:25.000Z
|
quantize.py
|
Tiamat-Tech/ZAQ-code
|
e7e9f55791e36c6784d58c356d3ced76a7583369
|
[
"MIT"
] | 8
|
2021-04-23T07:59:20.000Z
|
2021-06-04T14:28:24.000Z
|
quantize.py
|
Tiamat-Tech/ZAQ-code
|
e7e9f55791e36c6784d58c356d3ced76a7583369
|
[
"MIT"
] | 13
|
2021-04-08T03:15:47.000Z
|
2022-03-18T08:39:12.000Z
|
import os
import copy
import random
import numpy as np
import argparse
import torch
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import network
from dataloader import get_dataloader
from collections import OrderedDict
from quantization import quantize_model
def train(args, model, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(args.device), target.to(args.device)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if args.verbose and batch_idx % args.log_interval == 0:
print('Train Epoch: [{}] [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, test_loader, cur_epoch):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(args.device), target.to(args.device)
output = model(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nEpoch [{}] Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(
cur_epoch, test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct/len(test_loader.dataset)
def main():
parser = argparse.ArgumentParser(description='PyTorch Quantization')
parser.add_argument('--model', type=str, default='resnet20', help='model name (default: mnist)')
parser.add_argument('--num_classes', type=int, default=10)
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100'],
help='dataset name (default: cifar10)')
parser.add_argument('--data_root', required=True, default=None, help='data path')
parser.add_argument('--ckpt', default='', help='the path of pre-trained parammeters')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--seed', type=int, default=6786, metavar='S', help='random seed (default: 6786)')
parser.add_argument('--scheduler', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--step_size', type=int, default=80, help='step size')
parser.add_argument('--batch_size', type=int, default=256, help='input batch size for training')
parser.add_argument('--device', default="0", help='device to use')
parser.add_argument('--verbose', action='store_true', default=False)
parser.add_argument('--log_interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--test_only', action='store_true', default=False)
parser.add_argument('--download', action='store_true', default=False)
# quantization
parser.add_argument('--weight_bit', type=int, default=6, help='bit-width for parameters')
parser.add_argument('--activation_bit', type=int, default=8, help='bit-width for act')
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.device
args.device = torch.device('cuda' if torch.cuda.is_available() else "cpu")
os.makedirs('checkpoint/q_model/', exist_ok=True)
model = network.get_model(args)
model.load_state_dict(torch.load(args.ckpt))
model.to(args.device)
train_loader, test_loader = get_dataloader(args)
best_acc = test(args, model, test_loader, 0)
q_model = quantize_model(model, args)
quant_acc = test(args, q_model, test_loader, 0)
print("Quant Acc=%.6f"%quant_acc)
print("Best Acc=%.6f"%best_acc)
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)
retrain_acc = 0
scheduler = optim.lr_scheduler.StepLR(optimizer, args.step_size, 0.1)
if args.test_only:
return
for epoch in range(1, args.epochs + 1):
train(args, q_model, train_loader, optimizer, epoch)
acc = test(args, q_model, test_loader, epoch)
scheduler.step()
if acc > retrain_acc:
retrain_acc = acc
print('Saving a best checkpoint ...')
torch.save(model.state_dict(),"checkpoint/q_model/%s-%s-Q.pt"%(args.dataset, args.model))
print("Retrain Acc=%.6f" % retrain_acc)
print("Quant Acc=%.6f" % quant_acc)
print("Best Acc=%.6f" % best_acc)
if __name__ == "__main__":
main()
| 43.669355
| 113
| 0.65891
|
88d2643caa60c1e7fe8dbed52e46304c19920fad
| 17,333
|
py
|
Python
|
raiden/messages/transfers.py
|
ExchangeUnion/raiden
|
2217bcb698fcfce3499dc1f41ad919ed82e8e45f
|
[
"MIT"
] | null | null | null |
raiden/messages/transfers.py
|
ExchangeUnion/raiden
|
2217bcb698fcfce3499dc1f41ad919ed82e8e45f
|
[
"MIT"
] | 12
|
2019-08-09T19:12:17.000Z
|
2019-12-05T15:49:29.000Z
|
raiden/messages/transfers.py
|
ExchangeUnion/raiden
|
2217bcb698fcfce3499dc1f41ad919ed82e8e45f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from hashlib import sha256
from typing import overload
from raiden.constants import EMPTY_SIGNATURE, UINT64_MAX, UINT256_MAX
from raiden.messages.abstract import SignedRetrieableMessage
from raiden.messages.cmdid import CmdId
from raiden.messages.metadata import Metadata, RouteMetadata
from raiden.transfer.identifiers import CanonicalIdentifier
from raiden.transfer.mediated_transfer.events import SendLockedTransfer, SendRefundTransfer
from raiden.transfer.utils import hash_balance_data
from raiden.utils import ishash, sha3
from raiden.utils.packing import pack_balance_proof
from raiden.utils.signing import pack_data
from raiden.utils.typing import (
Address,
BlockExpiration,
ChainID,
ChannelID,
ClassVar,
InitiatorAddress,
Locksroot,
Nonce,
PaymentAmount,
PaymentID,
PaymentWithFeeAmount,
Secret,
SecretHash,
TargetAddress,
TokenAddress,
TokenAmount,
TokenNetworkAddress,
)
def assert_envelope_values(
nonce: int,
channel_identifier: ChannelID,
transferred_amount: TokenAmount,
locked_amount: TokenAmount,
locksroot: Locksroot,
):
if nonce <= 0:
raise ValueError("nonce cannot be zero or negative")
if nonce > UINT64_MAX:
raise ValueError("nonce is too large")
if channel_identifier <= 0:
raise ValueError("channel id cannot be zero or negative")
if channel_identifier > UINT256_MAX:
raise ValueError("channel id is too large")
if transferred_amount < 0:
raise ValueError("transferred_amount cannot be negative")
if transferred_amount > UINT256_MAX:
raise ValueError("transferred_amount is too large")
if locked_amount < 0:
raise ValueError("locked_amount cannot be negative")
if locked_amount > UINT256_MAX:
raise ValueError("locked_amount is too large")
if len(locksroot) != 32:
raise ValueError("locksroot must have length 32")
def assert_transfer_values(payment_identifier, token, recipient):
if payment_identifier < 0:
raise ValueError("payment_identifier cannot be negative")
if payment_identifier > UINT64_MAX:
raise ValueError("payment_identifier is too large")
if len(token) != 20:
raise ValueError("token is an invalid address")
if len(recipient) != 20:
raise ValueError("recipient is an invalid address")
@dataclass(repr=False, eq=False)
class Lock:
""" The lock datastructure.
Args:
amount: Amount of the token being transferred.
expiration: Highest block_number until which the transfer can be settled
secrethash: Hashed secret `sha256(secret).digest()` used to register the transfer,
the real `secret` is necessary to release the locked amount.
"""
# Lock is not a message, it is a serializable structure that is reused in
# some messages
amount: PaymentWithFeeAmount
expiration: BlockExpiration
secrethash: SecretHash
def __post_init__(self):
# guarantee that `amount` can be serialized using the available bytes
# in the fixed length format
if self.amount < 0:
raise ValueError(f"amount {self.amount} needs to be positive")
if self.amount > UINT256_MAX:
raise ValueError(f"amount {self.amount} is too large")
if self.expiration < 0:
raise ValueError(f"expiration {self.expiration} needs to be positive")
if self.expiration > UINT256_MAX:
raise ValueError(f"expiration {self.expiration} is too large")
if not ishash(self.secrethash):
raise ValueError("secrethash {self.secrethash} is not a valid hash")
@property
def as_bytes(self):
return pack_data(
(self.expiration, "uint256"), (self.amount, "uint256"), (self.secrethash, "bytes32")
)
@property
def lockhash(self):
return sha3(self.as_bytes)
@classmethod
def from_bytes(cls, serialized):
return cls(
expiration=int.from_bytes(serialized[:32], byteorder="big"),
amount=int.from_bytes(serialized[32:64], byteorder="big"),
secrethash=serialized[64:],
)
@dataclass(repr=False, eq=False)
class EnvelopeMessage(SignedRetrieableMessage):
""" Contains an on-chain message and shares its signature.
For performance reasons envelope messages share the signature with the
blockchain message. The same signature is used for authenticating for both
the client and the smart contract.
"""
chain_id: ChainID
nonce: Nonce
transferred_amount: TokenAmount
locked_amount: TokenAmount
locksroot: Locksroot
channel_identifier: ChannelID
token_network_address: TokenNetworkAddress
def __post_init__(self):
assert_envelope_values(
self.nonce,
self.channel_identifier,
self.transferred_amount,
self.locked_amount,
self.locksroot,
)
@property
def message_hash(self):
raise NotImplementedError
def _data_to_sign(self) -> bytes:
balance_hash = hash_balance_data(
self.transferred_amount, self.locked_amount, self.locksroot
)
balance_proof_packed = pack_balance_proof(
nonce=self.nonce,
balance_hash=balance_hash,
additional_hash=self.message_hash,
canonical_identifier=CanonicalIdentifier(
chain_identifier=self.chain_id,
token_network_address=self.token_network_address,
channel_identifier=self.channel_identifier,
),
)
return balance_proof_packed
@dataclass(repr=False, eq=False)
class SecretRequest(SignedRetrieableMessage):
""" Requests the secret/preimage which unlocks a lock. """
cmdid: ClassVar[CmdId] = CmdId.SECRETREQUEST
payment_identifier: PaymentID
secrethash: SecretHash
amount: PaymentAmount
expiration: BlockExpiration
@classmethod
def from_event(cls, event):
# pylint: disable=unexpected-keyword-arg
return cls(
message_identifier=event.message_identifier,
payment_identifier=event.payment_identifier,
secrethash=event.secrethash,
amount=event.amount,
expiration=event.expiration,
signature=EMPTY_SIGNATURE,
)
def _data_to_sign(self) -> bytes:
return pack_data(
(self.cmdid.value, "uint8"),
(b"\x00" * 3, "bytes"), # padding
(self.message_identifier, "uint64"),
(self.payment_identifier, "uint64"),
(self.secrethash, "bytes32"),
(self.amount, "uint256"),
(self.expiration, "uint256"),
)
@dataclass(repr=False, eq=False)
class Unlock(EnvelopeMessage):
""" Message used to succesfully unlock a lock.
For this message to be valid the balance proof has to be updated to:
- Remove the succesfull lock from the pending locks and decrement the
locked_amount by the lock's amount, otherwise the sender will pay twice.
- Increase the transferred_amount, otherwise the recepient will reject it
because it is not being paid.
This message is needed to unlock off-chain transfers for channels that used
less frequently then the pending locks' expiration, otherwise the receiving
end would have to go on-chain to register the secret.
This message is needed in addition to the RevealSecret to fix
synchronization problems. The recipient can not preemptively update its
channel state because there may other messages in-flight. Consider the
following case:
1. Node A sends a LockedTransfer to B.
2. Node B forwards and eventually receives the secret
3. Node A sends a second LockedTransfer to B.
At point 3, node A had no knowledge about the first payment having its
secret revealed, therefore the pending locks from message at step 3 will
include both locks. If B were to preemptively remove the lock it would
reject the message.
"""
cmdid: ClassVar[CmdId] = CmdId.UNLOCK
payment_identifier: PaymentID
secret: Secret = field(repr=False)
def __post_init__(self):
super().__post_init__()
if self.payment_identifier < 0:
raise ValueError("payment_identifier cannot be negative")
if self.payment_identifier > UINT64_MAX:
raise ValueError("payment_identifier is too large")
if len(self.secret) != 32:
raise ValueError("secret must have 32 bytes")
@property
def secrethash(self):
return sha256(self.secret).digest()
@classmethod
def from_event(cls, event):
balance_proof = event.balance_proof
# pylint: disable=unexpected-keyword-arg
return cls(
chain_id=balance_proof.chain_id,
message_identifier=event.message_identifier,
payment_identifier=event.payment_identifier,
nonce=balance_proof.nonce,
token_network_address=balance_proof.token_network_address,
channel_identifier=balance_proof.channel_identifier,
transferred_amount=balance_proof.transferred_amount,
locked_amount=balance_proof.locked_amount,
locksroot=balance_proof.locksroot,
secret=event.secret,
signature=EMPTY_SIGNATURE,
)
@property
def message_hash(self) -> bytes:
return sha3(
pack_data(
(self.cmdid.value, "uint8"),
(self.message_identifier, "uint64"),
(self.payment_identifier, "uint64"),
(self.secret, "bytes32"),
)
)
@dataclass(repr=False, eq=False)
class RevealSecret(SignedRetrieableMessage):
"""Reveal the lock's secret.
This message is not sufficient to unlock a lock, refer to the Unlock.
"""
cmdid: ClassVar[CmdId] = CmdId.REVEALSECRET
secret: Secret = field(repr=False)
@property
def secrethash(self):
return sha256(self.secret).digest()
@classmethod
def from_event(cls, event):
# pylint: disable=unexpected-keyword-arg
return cls(
message_identifier=event.message_identifier,
secret=event.secret,
signature=EMPTY_SIGNATURE,
)
def _data_to_sign(self) -> bytes:
return pack_data(
(self.cmdid.value, "uint8"),
(b"\x00" * 3, "bytes"), # padding
(self.message_identifier, "uint64"),
(self.secret, "bytes32"),
)
@dataclass(repr=False, eq=False)
class LockedTransferBase(EnvelopeMessage):
""" A transfer which signs that the partner can claim `locked_amount` if
she knows the secret to `secrethash`.
"""
payment_identifier: PaymentID
token: TokenAddress
recipient: Address
lock: Lock
target: TargetAddress
initiator: InitiatorAddress
fee: int
metadata: Metadata
def __post_init__(self):
super().__post_init__()
assert_transfer_values(self.payment_identifier, self.token, self.recipient)
if len(self.target) != 20:
raise ValueError("target is an invalid address")
if len(self.initiator) != 20:
raise ValueError("initiator is an invalid address")
if self.fee > UINT256_MAX:
raise ValueError("fee is too large")
@overload
@classmethod
def from_event(cls, event: SendLockedTransfer) -> "LockedTransfer":
# pylint: disable=unused-argument
...
@overload # noqa: F811
@classmethod
def from_event(cls, event: SendRefundTransfer) -> "RefundTransfer":
# pylint: disable=unused-argument
...
@classmethod # noqa: F811
def from_event(cls, event):
transfer = event.transfer
balance_proof = transfer.balance_proof
lock = Lock(
amount=transfer.lock.amount,
expiration=transfer.lock.expiration,
secrethash=transfer.lock.secrethash,
)
fee = 0
# pylint: disable=unexpected-keyword-arg
return cls(
chain_id=balance_proof.chain_id,
message_identifier=event.message_identifier,
payment_identifier=transfer.payment_identifier,
nonce=balance_proof.nonce,
token_network_address=balance_proof.token_network_address,
token=transfer.token,
channel_identifier=balance_proof.channel_identifier,
transferred_amount=balance_proof.transferred_amount,
locked_amount=balance_proof.locked_amount,
recipient=event.recipient,
locksroot=balance_proof.locksroot,
lock=lock,
target=transfer.target,
initiator=transfer.initiator,
fee=fee,
signature=EMPTY_SIGNATURE,
metadata=Metadata(
routes=[RouteMetadata(route=r.route) for r in transfer.route_states]
),
)
def _packed_data(self):
return pack_data(
(self.cmdid.value, "uint8"),
(self.message_identifier, "uint64"),
(self.payment_identifier, "uint64"),
(self.lock.expiration, "uint256"),
(self.token, "address"),
(self.recipient, "address"),
(self.target, "address"),
(self.initiator, "address"),
(self.lock.secrethash, "bytes32"),
(self.lock.amount, "uint256"),
(self.fee, "uint256"),
)
@dataclass(repr=False, eq=False)
class LockedTransfer(LockedTransferBase):
""" Message used to reserve tokens for a new mediated transfer.
For this message to be valid, the sender must:
- Use a lock.amount smaller then its current capacity. If the amount is
higher, then the recipient will reject it, as it means spending money it
does not own.
- Have the new lock represented in locksroot.
- Increase the locked_amount by exactly `lock.amount` otherwise the message
would be rejected by the recipient. If the locked_amount is increased by
more, then funds may get locked in the channel. If the locked_amount is
increased by less, then the recipient will reject the message as it may
mean it received the funds with an on-chain unlock.
The initiator will estimate the fees based on the available routes and
incorporate it in the lock's amount. Note that with permissive routing it
is not possible to predetermine the exact fee amount, as the initiator does
not know which nodes are available, thus an estimated value is used.
"""
cmdid: ClassVar[CmdId] = CmdId.LOCKEDTRANSFER
@property
def message_hash(self) -> bytes:
metadata_hash = (self.metadata and self.metadata.hash) or b""
return sha3(self._packed_data() + metadata_hash)
@dataclass(repr=False, eq=False)
class RefundTransfer(LockedTransferBase):
""" A message used when a payee does not have any available routes to
forward the transfer.
This message is used by the payee to refund the payer when no route is
available. This transfer refunds the payer, allowing him to try a new path
to complete the transfer.
"""
cmdid: ClassVar[CmdId] = CmdId.REFUNDTRANSFER
@property
def message_hash(self) -> bytes:
return sha3(self._packed_data())
@dataclass(repr=False, eq=False)
class LockExpired(EnvelopeMessage):
""" Message used when a lock expires.
This will complete an unsuccesful transfer off-chain.
For this message to be valid the balance proof has to be updated to:
- Remove the expired lock from the pending locks and reflect it in the
locksroot.
- Decrease the locked_amount by exactly by lock.amount. If less tokens are
decreased the sender may get tokens locked. If more tokens are decreased
the recipient will reject the message as on-chain unlocks may fail.
This message is necessary for synchronization since other messages may be
in-flight, vide Unlock for examples.
"""
cmdid: ClassVar[CmdId] = CmdId.LOCKEXPIRED
recipient: Address
secrethash: SecretHash
@classmethod
def from_event(cls, event):
balance_proof = event.balance_proof
# pylint: disable=unexpected-keyword-arg
return cls(
chain_id=balance_proof.chain_id,
nonce=balance_proof.nonce,
token_network_address=balance_proof.token_network_address,
channel_identifier=balance_proof.channel_identifier,
transferred_amount=balance_proof.transferred_amount,
locked_amount=balance_proof.locked_amount,
locksroot=balance_proof.locksroot,
message_identifier=event.message_identifier,
recipient=event.recipient,
secrethash=event.secrethash,
signature=EMPTY_SIGNATURE,
)
@property
def message_hash(self) -> bytes:
return sha3(
pack_data(
(self.cmdid.value, "uint8"),
(self.message_identifier, "uint64"),
(self.recipient, "address"),
(self.secrethash, "bytes32"),
)
)
| 33.141491
| 96
| 0.666301
|
e922b3e17962a270acc1cd9793355adaea33e56e
| 11,995
|
py
|
Python
|
srcs/model.py
|
huy-ha/PIXOR
|
9ad2b451470aa30c7dd40be23b526f6fbedb6bc1
|
[
"MIT"
] | 259
|
2019-02-19T07:38:55.000Z
|
2022-03-28T05:58:42.000Z
|
srcs/model.py
|
huy-ha/PIXOR
|
9ad2b451470aa30c7dd40be23b526f6fbedb6bc1
|
[
"MIT"
] | 30
|
2019-02-18T08:43:27.000Z
|
2021-02-05T15:51:36.000Z
|
srcs/model.py
|
huy-ha/PIXOR
|
9ad2b451470aa30c7dd40be23b526f6fbedb6bc1
|
[
"MIT"
] | 59
|
2018-10-28T17:42:03.000Z
|
2022-02-15T11:59:32.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from utils import maskFOV_on_BEV
def conv3x3(in_planes, out_planes, stride=1, bias=False):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=bias)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride, bias=True)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
#out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
#out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, downsample=None, use_bn=True):
super(Bottleneck, self).__init__()
bias = not use_bn
self.use_bn = use_bn
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=bias)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=bias)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.downsample = downsample
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.conv1(x)
if self.use_bn:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.use_bn:
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
if self.use_bn:
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.relu(residual + out)
return out
class BackBone(nn.Module):
def __init__(self, block, num_block, geom, use_bn=True):
super(BackBone, self).__init__()
self.use_bn = use_bn
# Block 1
self.conv1 = conv3x3(36, 32)
self.conv2 = conv3x3(32, 32)
self.bn1 = nn.BatchNorm2d(32)
self.bn2 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
# Block 2-5
self.in_planes = 32
self.block2 = self._make_layer(block, 24, num_blocks=num_block[0])
self.block3 = self._make_layer(block, 48, num_blocks=num_block[1])
self.block4 = self._make_layer(block, 64, num_blocks=num_block[2])
self.block5 = self._make_layer(block, 96, num_blocks=num_block[3])
# Lateral layers
self.latlayer1 = nn.Conv2d(384, 196, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(192, 96, kernel_size=1, stride=1, padding=0)
# Top-down layers
self.deconv1 = nn.ConvTranspose2d(196, 128, kernel_size=3, stride=2, padding=1, output_padding=1)
p = 0 if geom['label_shape'][1] == 175 else 1
self.deconv2 = nn.ConvTranspose2d(128, 96, kernel_size=3, stride=2, padding=1, output_padding=(1, p))
def forward(self, x):
x = self.conv1(x)
if self.use_bn:
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
if self.use_bn:
x = self.bn2(x)
c1 = self.relu(x)
# bottom up layers
c2 = self.block2(c1)
c3 = self.block3(c2)
c4 = self.block4(c3)
c5 = self.block5(c4)
l5 = self.latlayer1(c5)
l4 = self.latlayer2(c4)
p5 = l4 + self.deconv1(l5)
l3 = self.latlayer3(c3)
p4 = l3 + self.deconv2(p5)
return p4
def _make_layer(self, block, planes, num_blocks):
if self.use_bn:
downsample = nn.Sequential(
nn.Conv2d(self.in_planes, planes * block.expansion,
kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
else:
downsample = nn.Conv2d(self.in_planes, planes * block.expansion,
kernel_size=1, stride=2, bias=True)
layers = []
layers.append(block(self.in_planes, planes, stride=2, downsample=downsample))
self.in_planes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(block(self.in_planes, planes, stride=1))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.upsample(x, size=(H, W), mode='bilinear') + y
class Header(nn.Module):
def __init__(self, use_bn=True):
super(Header, self).__init__()
self.use_bn = use_bn
bias = not use_bn
self.conv1 = conv3x3(96, 96, bias=bias)
self.bn1 = nn.BatchNorm2d(96)
self.conv2 = conv3x3(96, 96, bias=bias)
self.bn2 = nn.BatchNorm2d(96)
self.conv3 = conv3x3(96, 96, bias=bias)
self.bn3 = nn.BatchNorm2d(96)
self.conv4 = conv3x3(96, 96, bias=bias)
self.bn4 = nn.BatchNorm2d(96)
self.clshead = conv3x3(96, 1, bias=True)
self.reghead = conv3x3(96, 6, bias=True)
def forward(self, x):
x = self.conv1(x)
if self.use_bn:
x = self.bn1(x)
x = self.conv2(x)
if self.use_bn:
x = self.bn2(x)
x = self.conv3(x)
if self.use_bn:
x = self.bn3(x)
x = self.conv4(x)
if self.use_bn:
x = self.bn4(x)
cls = torch.sigmoid(self.clshead(x))
reg = self.reghead(x)
return cls, reg
class Decoder(nn.Module):
def __init__(self, geom):
super(Decoder, self).__init__()
self.geometry = [geom["L1"], geom["L2"], geom["W1"], geom["W2"]]
self.grid_size = 0.4
self.target_mean = [0.008, 0.001, 0.202, 0.2, 0.43, 1.368]
self.target_std_dev = [0.866, 0.5, 0.954, 0.668, 0.09, 0.111]
def forward(self, x):
'''
:param x: Tensor 6-channel geometry
6 channel map of [cos(yaw), sin(yaw), log(x), log(y), w, l]
Shape of x: (B, C=6, H=200, W=175)
:return: Concatenated Tensor of 8 channel geometry map of bounding box corners
8 channel are [rear_left_x, rear_left_y,
rear_right_x, rear_right_y,
front_right_x, front_right_y,
front_left_x, front_left_y]
Return tensor has a shape (B, C=8, H=200, W=175), and is located on the same device as x
'''
# Tensor in (B, C, H, W)
device = torch.device('cpu')
if x.is_cuda:
device = x.get_device()
for i in range(6):
x[:, i, :, :] = x[:, i, :, :] * self.target_std_dev[i] + self.target_mean[i]
cos_t, sin_t, dx, dy, log_w, log_l = torch.chunk(x, 6, dim=1)
theta = torch.atan2(sin_t, cos_t)
cos_t = torch.cos(theta)
sin_t = torch.sin(theta)
x = torch.arange(self.geometry[2], self.geometry[3], self.grid_size, dtype=torch.float32, device=device)
y = torch.arange(self.geometry[0], self.geometry[1], self.grid_size, dtype=torch.float32, device=device)
yy, xx = torch.meshgrid([y, x])
centre_y = yy + dy
centre_x = xx + dx
l = log_l.exp()
w = log_w.exp()
rear_left_x = centre_x - l/2 * cos_t - w/2 * sin_t
rear_left_y = centre_y - l/2 * sin_t + w/2 * cos_t
rear_right_x = centre_x - l/2 * cos_t + w/2 * sin_t
rear_right_y = centre_y - l/2 * sin_t - w/2 * cos_t
front_right_x = centre_x + l/2 * cos_t + w/2 * sin_t
front_right_y = centre_y + l/2 * sin_t - w/2 * cos_t
front_left_x = centre_x + l/2 * cos_t - w/2 * sin_t
front_left_y = centre_y + l/2 * sin_t + w/2 * cos_t
decoded_reg = torch.cat([rear_left_x, rear_left_y, rear_right_x, rear_right_y,
front_right_x, front_right_y, front_left_x, front_left_y], dim=1)
return decoded_reg
class PIXOR(nn.Module):
'''
The input of PIXOR nn module is a tensor of [batch_size, height, weight, channel]
The output of PIXOR nn module is also a tensor of [batch_size, height/4, weight/4, channel]
Note that we convert the dimensions to [C, H, W] for PyTorch's nn.Conv2d functions
'''
def __init__(self, geom, use_bn=True, decode=False):
super(PIXOR, self).__init__()
self.backbone = BackBone(Bottleneck, [3, 6, 6, 3], geom, use_bn)
self.header = Header(use_bn)
self.corner_decoder = Decoder(geom)
self.use_decode = decode
self.cam_fov_mask = maskFOV_on_BEV(geom['label_shape'])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
prior = 0.01
self.header.clshead.weight.data.fill_(-math.log((1.0-prior)/prior))
self.header.clshead.bias.data.fill_(0)
self.header.reghead.weight.data.fill_(0)
self.header.reghead.bias.data.fill_(0)
def set_decode(self, decode):
self.use_decode = decode
def forward(self, x):
device = torch.device('cpu')
if x.is_cuda:
device = x.get_device()
# x = x.permute(0, 3, 1, 2)
# Torch Takes Tensor of shape (Batch_size, channels, height, width)
features = self.backbone(x)
cls, reg = self.header(features)
self.cam_fov_mask = self.cam_fov_mask.to(device)
cls = cls * self.cam_fov_mask
if self.use_decode:
decoded = self.corner_decoder(reg)
# Return tensor(Batch_size, height, width, channels)
#decoded = decoded.permute(0, 2, 3, 1)
#cls = cls.permute(0, 2, 3, 1)
#reg = reg.permute(0, 2, 3, 1)
pred = torch.cat([cls, reg, decoded], dim=1)
else:
pred = torch.cat([cls, reg], dim=1)
return pred
def test_decoder(decode = True):
geom = {
"L1": -40.0,
"L2": 40.0,
"W1": 0.0,
"W2": 70.0,
"H1": -2.5,
"H2": 1.0,
"input_shape": [800, 700, 36],
"label_shape": [200, 175, 7]
}
print("Testing PIXOR decoder")
net = PIXOR(geom, use_bn=False)
net.set_decode(decode)
preds = net(torch.autograd.Variable(torch.randn(2, 800, 700, 36)))
print("Predictions output size", preds.size())
if __name__ == "__main__":
test_decoder()
| 33.788732
| 112
| 0.573739
|
6f5ccf793074b0879934c853fa8a5aa0bcb4666b
| 7,253
|
py
|
Python
|
negative_binomial/core.py
|
zsusswein/negative_binomial
|
1e99e31c743354d5a002f97dcaf24f31a5d731cc
|
[
"MIT"
] | null | null | null |
negative_binomial/core.py
|
zsusswein/negative_binomial
|
1e99e31c743354d5a002f97dcaf24f31a5d731cc
|
[
"MIT"
] | null | null | null |
negative_binomial/core.py
|
zsusswein/negative_binomial
|
1e99e31c743354d5a002f97dcaf24f31a5d731cc
|
[
"MIT"
] | null | null | null |
''' This module contains functions necessary to fit a negative binomial
using the maximum likelihood estimator and some numerical analysis
@author: Zachary Susswein (based on original code by Peter Xenopoulos)
'''
## Libraries
import numpy as np
from scipy.optimize import minimize
from scipy.stats import nbinom
import matplotlib.pyplot as plt
## Functions
def nu_sum(vec_element, k):
'''
This function efficiently computes the gamma function term of the NB log lik
by expanding the sum into a grid. Treats the gamma function as a logged
factorial because the data must be integer values.
@param vec_element: an element of the data vector
@param k: the value of the dispersion parameter
'''
nu = np.arange(0, vec_element, 1)
return np.sum(np.log(1 + nu / k))
def neg_log_lik(k, y_bar, vec, n):
'''
This function computes the negative log likelihood of the NB dist. using the
MLE estimate of the mean, y_bar, and a set version of the dispersion parameter.
This approach produces a biased estimate because it does not account for
the use of the unbiased estimator of the sample mean (y_bar) in the place
of the population mean.
@param k: the dispersion parameter
@param y_bar: the sample mean, an unbiased estimator of the population mean
@param vec: the data vector
@param n: the number of observations
'''
x = 0
for i in range(n):
x += nu_sum(vec[i], k)
log_lik = (x / n) + y_bar * np.log(y_bar) - (y_bar + k) * np.log(1 + y_bar / k)
return -log_lik
def plot_pmf(k_hat, y_bar, vec):
'''
plot the estimated pmf over the data
@param k_hat: the estimated value of the NB dispersion parameter
@param y_bar: the estimated value of the NB mean
'''
p_hat = (y_bar**2 / k_hat) / (y_bar + (y_bar**2 / k_hat))
n_hat = y_bar**2 / (y_bar**2 / k_hat)
x = np.arange(min(vec), max(vec + 1), 1)
y_tilde = nbinom(n = n_hat,
p = p_hat)
plt.hist(vec, alpha = .2)
plt.plot(y_tilde.pmf(x) * len(vec), color = 'blue')
return None
def neg_bin_fit(vec, init = 1, plot = False):
'''
The workhorse function to fit negative binomial dist. to data. Assumes that underdispersion
does not occur, which guarantees the score has at least one root in the positive reals.
Uses the mean and dispersion parameterization of the pmf common in ecology.
@param vec: The data vector used to fit the negative binomial distribution
@param init: The initial estimate for k, the dispersion parameter
@param plot: whether to plot the fitted distribution over the data
'''
#####
## Type and data checking
# Check the input is properly specified
if not isinstance(vec, np.ndarray):
raise TypeError("Argument 'vec' must be a numpy.ndarray")
if len(vec.shape) != 1:
raise TypeError("Argument 'vec' must be a vector with shape (n,)")
if (not np.issubdtype(vec.dtype, np.integer)):
raise ValueError("Numpy array elements must be of type int")
if type(plot) is not bool:
raise TypeError('Argument `plot` must be a boolean')
if (type(init) is not float) & (type(init) is not int):
raise TypeError('Argument `init` must be of type float or type int')
if init <= 0:
raise ValueError('Argument `init` must be greater than zero')
# Check the data
if np.sum(vec < 0) > 0:
raise ValueError("Data must all be greater than or equal to zero, negative number provided")
if np.mean(vec) > np.var(vec):
raise ValueError("Data are underdispersed; fitting method does not allow for underdispersion")
#####
## Fit the NB dist. to the vector
# MLE of the mean
y_bar = np.mean(vec)
# MLE of k
fit = minimize(fun = neg_log_lik,
x0 = 1,
args = (np.mean(vec), vec, len(vec),),
method = 'L-BFGS-B',
bounds = ((0.00001, None),))
mean = np.array([y_bar, fit.x[0]])
se = np.array([np.std(vec) / len(vec), np.sqrt(1 / fit.hess_inv.todense()[0, 0])])
if plot:
plot_pmf(mean[1], mean[0], vec)
return mean, se
def get_sample(vec, max_sample_try):
'''
This function generates a single bootstrap sample from the data. It rejects
sample values that are underdispersed.
'''
success = 0
n_sample_try = 0
while (success == 0) & (n_sample_try < max_sample_try):
sample = np.random.choice(a = vec, size = vec.shape[0], replace = True)
n_sample_try += 1
if np.mean(sample) < np.var(sample):
success = 1
if success == 0:
raise Exception("Unable to generate samples without underdispersion")
return sample
def bootstrap_CI(vec, init = 1, alpha = .05, n_samples = 1000, max_sample_try = 100, plot = True):
'''
This function uses uses bootstrapping to compute the (1 - alpha)% confidence
intervals for the negative binomial mean and dispersion parameter.
@param vec: the data vector
@param init: the initial value to pass to `neg_bin_fit()`
@param alpha: the desired confidence level (by default 0.05)
@param n_samples: the number of bootstrap samples
@param max_sample_try: the maximum number of times to attempt bootstrapping
each sample before raising an exception. Passed to 'get_sample()'
@param plot: A boolean, whether to plot the results
'''
sample_results = np.empty((n_samples, 2))
for i in range(n_samples):
sample_results[i], se = neg_bin_fit(vec = get_sample(vec, max_sample_try),
init = init,
plot = False)
boot_upper = np.quantile(sample_results, 1 - alpha, axis = 0)
boot_lower = np.quantile(sample_results, alpha, axis = 0)
boot_mean = np.mean(sample_results, axis = 0)
mean, se = neg_bin_fit(vec = vec,
init = init,
plot = False)
z_upper = mean + 1.96 * se
z_lower = mean - 1.96 * se
if plot:
x = np.array([0, .5])
fig, ax = plt.subplots()
ax.plot(x, mean, 'o', color = 'blue', label = 'Bootstrap')
ax.plot((x, x), (boot_upper, boot_lower), color = 'blue')
#ax.errorbar(x = x, y = mean, yerr = boot_upper - boot_lower, fmt = 'o', color = 'blue')
ax.set_xticks(x)
ax.set_xticklabels(['Bootstrap: mu_hat', 'Bootstrap: k_hat'])
ax.set_title('Confidence intervals for means')
ax.plot(x+.1, mean, 'o', color = 'green', label = 'Normal approx')
ax.plot((x+.1, x+.1), (z_upper, z_lower), color = 'green')
#ax.errorbar(x = x+.1, y = mean, yerr = z_upper - z_lower, fmt = 'o', color = 'green')
ax.set_xticks(x)
ax.set_xticklabels(['mu_hat', 'k_hat'])
ax.legend()
plt.show()
return np.array([boot_upper, boot_mean, boot_lower]), np.array([z_upper, mean, z_lower])
| 32.235556
| 102
| 0.607059
|
8f74e0a21cbfb0171884ca0833ab2d9e8fa68034
| 9,832
|
py
|
Python
|
guillotina/contrib/dyncontent/subscriber.py
|
vangheem/guillotina
|
fb678dd81807fd4c63aa1ef3dae4b9a151e3f274
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/contrib/dyncontent/subscriber.py
|
vangheem/guillotina
|
fb678dd81807fd4c63aa1ef3dae4b9a151e3f274
|
[
"BSD-2-Clause"
] | 1
|
2021-01-16T01:02:36.000Z
|
2021-01-29T17:07:58.000Z
|
guillotina/contrib/dyncontent/subscriber.py
|
vangheem/guillotina
|
fb678dd81807fd4c63aa1ef3dae4b9a151e3f274
|
[
"BSD-2-Clause"
] | null | null | null |
from guillotina import app_settings
from guillotina import BEHAVIOR_CACHE
from guillotina import configure
from guillotina import FACTORY_CACHE
from guillotina.component import get_global_components
from guillotina.component import get_utility
from guillotina.component import query_utility
from guillotina.content import get_cached_factory
from guillotina.content import load_cached_schema
from guillotina.contrib.dyncontent import behaviors
from guillotina.contrib.dyncontent import contents
from guillotina.contrib.dyncontent.vocabularies import AppSettingSource
from guillotina.directives import index_field
from guillotina.directives import metadata
from guillotina.directives import read_permission
from guillotina.directives import write_permission
from guillotina.interfaces import IApplication
from guillotina.interfaces import IApplicationInitializedEvent
from guillotina.interfaces import IBehavior
from guillotina.interfaces import IResourceFactory
from guillotina.schema.vocabulary import SimpleVocabulary
from guillotina.utils import import_class
from zope.interface import Interface
from zope.interface.interface import InterfaceClass
import json
import logging
import typing
SUPPORTED_DIRECTIVES = {
"index": index_field,
"read_permission": read_permission,
"write_permission": write_permission,
"metadata": metadata,
}
logger = logging.getLogger("guillotina.contrib.dyncontent")
def get_vocabulary(prop, params):
# Vocabulary option
if "vocabulary" in prop:
if isinstance(prop["vocabulary"], dict):
params["vocabulary"] = SimpleVocabulary.fromItems([x for x in prop["vocabulary"].items()])
elif prop["vocabulary"].startswith("appsettings:"):
params["source"] = AppSettingSource(prop["vocabulary"].replace("appsettings:", ""))
else:
params["vocabulary"] = prop["vocabulary"]
def get_fields(*, properties: typing.Dict[str, typing.Dict]):
fields: typing.Dict[str, typing.Any] = {}
tags: typing.Dict[str, typing.Any] = {}
for prop_id, prop in properties.items():
params: typing.Dict[str, typing.Any] = {}
field_class = typing.cast(typing.Callable, import_class(typing.cast(str, prop.get("type"))))
# Vocabulary
get_vocabulary(prop, params)
# Required
params["required"] = prop.get("required", False)
# Title
params["title"] = prop.get("title")
widget = prop.get("widget", None)
if widget:
params["widget"] = widget
# Schema
schema = prop.get("schema", None)
if schema:
params["schema"] = json.dumps(schema)
# Value type
value_type = prop.get("value_type", None)
if value_type:
value_class = typing.cast(typing.Callable, import_class(value_type))
params["value_type"] = value_class(required=False, title=params["title"] + " value")
# Default
if prop.get("default", None) is not None:
params["default"] = prop.get("default")
# Index
index = prop.get("index", None)
if index:
tags.setdefault(prop_id, {})["index"] = index
write_permission = prop.get("write_permission", None)
if write_permission:
tags.setdefault(prop_id, {})["write_permission"] = write_permission
metadata = prop.get("metadata", None)
if metadata:
tags.setdefault(prop_id, {})["metadata"] = None
read_permission = prop.get("read_permission", None)
if read_permission:
tags.setdefault(prop_id, {})["read_permission"] = read_permission
fields[prop_id] = field_class(**params) # noqa
#
return fields, tags
def create_content_factory(proto_name, proto_definition):
parent_interface = import_class(
proto_definition.get("inherited_interface", "guillotina.interfaces.content.IFolder")
)
parent_class = import_class(proto_definition.get("inherited_class", "guillotina.content.Folder"))
schema_fields, tags = get_fields(properties=proto_definition.get("properties"))
class_interface = InterfaceClass(
"I" + proto_name,
(parent_interface,),
schema_fields,
__module__="guillotina.contrib.dyncontent.interfaces",
)
for field_id, tag in tags.items():
for tag_id, tag_metadata in tag.items():
if tag_id in SUPPORTED_DIRECTIVES:
if tag_metadata is None:
SUPPORTED_DIRECTIVES[tag_id].apply(class_interface, field_id)
elif isinstance(tag_metadata, dict):
SUPPORTED_DIRECTIVES[tag_id].apply(class_interface, field_id, **tag_metadata)
elif isinstance(tag_metadata, list):
SUPPORTED_DIRECTIVES[tag_id].apply(class_interface, field_id, *tag_metadata)
elif tag_id == "fieldset":
SUPPORTED_DIRECTIVES[tag_id].apply(class_interface, field_id, tag_metadata)
elif isinstance(tag_metadata, str):
SUPPORTED_DIRECTIVES[tag_id].apply(class_interface, **{field_id: tag_metadata})
klass = type(proto_name, (parent_class,), {})
klass.__module__ = "guillotina.contrib.dyncontent.contents"
setattr(contents, proto_name, klass)
behaviors = []
for bhr in proto_definition.get("behaviors", []):
if bhr in BEHAVIOR_CACHE:
behaviors.append(BEHAVIOR_CACHE[bhr])
else:
raise Exception(f"Behavior not found {bhr}")
contenttype = {
"schema": class_interface,
"type_name": proto_name,
"allowed_types": proto_definition.get("allowed_types", []),
"add_permission": proto_definition.get("add_permission", "guillotina.AddContent"),
"behaviors": behaviors,
}
utility = query_utility(IResourceFactory, name=proto_name)
if utility is not None:
sm = get_global_components()
sm.unregisterUtility(utility, IResourceFactory, proto_name)
configure.register_configuration(klass, contenttype, "contenttype")
def create_behaviors_factory(proto_name, proto_definition):
if proto_definition.get("for", None) is None:
raise Exception("We need a for interface")
else:
for_ = import_class(proto_definition.get("for"))
if for_ is None:
raise Exception("Wrong for interface")
parent_class = import_class(
proto_definition.get("inherited_class", "guillotina.behaviors.instance.AnnotationBehavior")
)
schema_fields, tags = get_fields(properties=proto_definition.get("properties"))
base_interface = proto_definition.get("base_interface", None)
if base_interface is None:
base_interface = Interface
class_interface = InterfaceClass(
"I" + proto_name,
(base_interface,),
schema_fields,
__module__="guillotina.contrib.dyncontent.interfaces",
)
for field_id, tag in tags.items():
for tag_id, tag_metadata in tag.items():
if tag_id in SUPPORTED_DIRECTIVES:
SUPPORTED_DIRECTIVES[tag_id].apply(class_interface, field_id, tag_metadata)
klass = type(proto_name, (parent_class,), {})
klass.__module__ = "guillotina.contrib.dyncontent.behaviors"
setattr(behaviors, proto_name, klass)
behavior = {
"for_": for_,
"provides": class_interface,
"data_key": proto_definition.get("data_key", "default"),
"auto_serialize": proto_definition.get("auto_serialize", True),
"name": proto_name,
"name_only": proto_definition.get("name_only", False),
"title": proto_definition.get("title", ""),
"marker": proto_definition.get("marker", None),
"description": proto_definition.get("description", ""),
}
configure.register_configuration(klass, behavior, "behavior")
def reload_behavior_configuration():
root = get_utility(IApplication, name="root")
configure.load_configuration(root.app.config, "guillotina.contrib.dyncontent.behaviors", "behavior")
root.app.config.execute_actions()
load_cached_schema()
def reload_content_configuration():
root = get_utility(IApplication, name="root")
configure.load_configuration(root.app.config, "guillotina.contrib.dyncontent.contents", "contenttype")
root.app.config.execute_actions()
load_cached_schema()
@configure.subscriber(for_=IApplicationInitializedEvent)
async def add_initialized(event):
type_names = []
behaviors = []
for type_name, definition in app_settings.get("behaviors", {}).items():
create_behaviors_factory(type_name, definition)
behaviors.append(type_name)
reload_behavior_configuration()
for type_name, definition in app_settings.get("contents", {}).items():
create_content_factory(type_name, definition)
type_names.append(type_name)
reload_content_configuration()
for type_name in type_names:
# Verify its created
if type_name in FACTORY_CACHE:
del FACTORY_CACHE[type_name]
get_cached_factory(type_name)
for proto_name in behaviors:
# Verify its created
interface_name = "guillotina.contrib.dyncontent.interfaces.I" + proto_name
utility = get_utility(IBehavior, name=interface_name)
class_interface = import_class(interface_name)
assert BEHAVIOR_CACHE[interface_name].__identifier__ == interface_name
utility.interface == class_interface
@configure.subscriber(for_=IApplicationInitializedEvent)
async def clean_up(event):
for type_name, _ in app_settings.get("behaviors", {}).items():
configure.clear_behavior_by_name(type_name)
for type_name, _ in app_settings.get("contents", {}).items():
configure.clear_contenttype_by_name(type_name)
| 36.014652
| 106
| 0.691721
|
31eb14277707e5887fc4027906ee43cc89da2a46
| 131
|
py
|
Python
|
source/constant.py
|
gilbertHuang/CG-diskusage
|
be448bb76419b43fb43c790836f9182a7773f8ff
|
[
"MIT"
] | null | null | null |
source/constant.py
|
gilbertHuang/CG-diskusage
|
be448bb76419b43fb43c790836f9182a7773f8ff
|
[
"MIT"
] | null | null | null |
source/constant.py
|
gilbertHuang/CG-diskusage
|
be448bb76419b43fb43c790836f9182a7773f8ff
|
[
"MIT"
] | null | null | null |
size_unit = ['b', 'kb', 'Mb', 'Gb', 'Tb']
size_diff = 1000
size_round = 2
sort_size_name = 'size'
sort_children_name = 'children'
| 18.714286
| 41
| 0.656489
|
38671f73696e5b05a2ce9d8c3567ee9e0b840436
| 1,811
|
py
|
Python
|
mpisppy/tests/test_sc.py
|
vishalbelsare/mpi-sppy
|
019fa1c04396a5bcadf758a31bc96217c17b43c9
|
[
"BSD-3-Clause"
] | 2
|
2020-06-05T14:31:46.000Z
|
2020-09-29T20:08:05.000Z
|
mpisppy/tests/test_sc.py
|
vishalbelsare/mpi-sppy
|
019fa1c04396a5bcadf758a31bc96217c17b43c9
|
[
"BSD-3-Clause"
] | 22
|
2020-06-06T19:30:33.000Z
|
2020-10-30T23:00:58.000Z
|
mpisppy/tests/test_sc.py
|
vishalbelsare/mpi-sppy
|
019fa1c04396a5bcadf758a31bc96217c17b43c9
|
[
"BSD-3-Clause"
] | 6
|
2020-06-06T17:57:38.000Z
|
2020-09-18T22:38:19.000Z
|
import unittest
import sys
import os
import parapint
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
class TestSC(unittest.TestCase):
def setUp(self):
self.original_path = sys.path
example_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'examples', 'farmer')
sys.path.append(example_dir)
def tearDown(self):
sys.path = self.original_path
def test_farmer_example(self):
import schur_complement as sc_example
linear_solver = parapint.linalg.MPISchurComplementLinearSolver(subproblem_solvers={ndx: parapint.linalg.ScipyInterface(compute_inertia=True) for ndx in range(3)},
schur_complement_solver=parapint.linalg.ScipyInterface(compute_inertia=True))
sc_opt = sc_example.solve_with_sc(scen_count=3, linear_solver=linear_solver)
sc_sol = sc_opt.gather_var_values_to_rank0()
if rank == 0:
self.assertAlmostEqual(sc_sol[('Scenario0', 'DevotedAcreage[CORN0]')], 80)
self.assertAlmostEqual(sc_sol[('Scenario0', 'DevotedAcreage[SUGAR_BEETS0]')], 250)
self.assertAlmostEqual(sc_sol[('Scenario0', 'DevotedAcreage[WHEAT0]')], 170)
self.assertAlmostEqual(sc_sol[('Scenario1', 'DevotedAcreage[CORN0]')], 80)
self.assertAlmostEqual(sc_sol[('Scenario1', 'DevotedAcreage[SUGAR_BEETS0]')], 250)
self.assertAlmostEqual(sc_sol[('Scenario1', 'DevotedAcreage[WHEAT0]')], 170)
self.assertAlmostEqual(sc_sol[('Scenario2', 'DevotedAcreage[CORN0]')], 80)
self.assertAlmostEqual(sc_sol[('Scenario2', 'DevotedAcreage[SUGAR_BEETS0]')], 250)
self.assertAlmostEqual(sc_sol[('Scenario2', 'DevotedAcreage[WHEAT0]')], 170)
| 46.435897
| 170
| 0.6709
|
36c4e2cd935842a9a731c1e7ecd49b493035ece7
| 324
|
py
|
Python
|
lecciones/44/interfaces_3.py
|
ImAlexisSaez/curso-python-desde-0
|
c4a84dae0804adefe4ee6024b411d8ed288da759
|
[
"MIT"
] | 2
|
2020-08-31T02:17:36.000Z
|
2022-01-29T15:25:27.000Z
|
lecciones/44/interfaces_3.py
|
ImAlexisSaez/curso-python-desde-0
|
c4a84dae0804adefe4ee6024b411d8ed288da759
|
[
"MIT"
] | null | null | null |
lecciones/44/interfaces_3.py
|
ImAlexisSaez/curso-python-desde-0
|
c4a84dae0804adefe4ee6024b411d8ed288da759
|
[
"MIT"
] | null | null | null |
from tkinter import Tk, Frame, Label
root = Tk()
root.title("Probando el widget Label")
root.resizable(width=True, height=True)
root.iconbitmap("icon.ico")
root.config(bg="lightblue")
frame = Frame(root, width=500, height=400)
frame.pack()
Label(frame, text="Mi primera etiqueta.").place(x=100, y=200)
root.mainloop()
| 19.058824
| 61
| 0.725309
|
67b78701f39a171a2d9ecb8bb5b7b93652fc0346
| 3,784
|
py
|
Python
|
chap_02/exe_048_date_to_zodiac_sign.py
|
aleattene/python-workbook
|
bf26ba716c957316d1463fb25488384e319d5b91
|
[
"MIT"
] | null | null | null |
chap_02/exe_048_date_to_zodiac_sign.py
|
aleattene/python-workbook
|
bf26ba716c957316d1463fb25488384e319d5b91
|
[
"MIT"
] | null | null | null |
chap_02/exe_048_date_to_zodiac_sign.py
|
aleattene/python-workbook
|
bf26ba716c957316d1463fb25488384e319d5b91
|
[
"MIT"
] | null | null | null |
"""
The horoscopes commonly reported in newspapers
use the position of the sun at the time of one’s birth to try and predict the future.
This system of astrology divides the year into twelve zodiac signs,
as outline in the table below:
Zodiac Sign Date Range
Capricorn December 22 to January 19
Aquarius January 20 to February 18
Pisces February 19 to March 20
Aries March 21 to April 19
Taurus April 20 to May 20
Gemini May 21 to June 20
Cancer June 21 to July 22
Leo July 23 to August 22
Virgo August 23 to September 22
Libra September 23 to October 22
Scorpio October 23 to November 21
Sagittarius November 22 to December 21
Write a program that asks the user to enter his or her month and day of birth.
Then your program should report the user’s zodiac sign as part of an appropriate output message.
"""
# START Definition of FUNCTIONS
def validaMonth(stringInput):
month = stringInput.upper()
if (month == "JANUARY") or (month == "FEBRUARY") or (month == "MARCH") or \
(month == "APRIL") or (month == "MAY") or (month == "JUNE") or \
(month == "JULY") or (month == "AUGUST") or (month == "SEPTEMBER") or \
(month == "OCTOBER") or (month == "NOVEMBER") or (month == "DECEMBER"):
return True
else:
return False
def valutaIntValido(numero):
if numero.isdigit():
if 0 < int(numero) <= 31: # Possible evolution: 28/30/31 days
return True
return False
def dateToZodiacSign(month, day): # Possible evolution: IF NESTED (if same month)
month = month.upper()
if (month == "DECEMBER" and day >= 22) or (month == "JANUARY" and day <= 19):
return "CAPRICORN"
elif (month == "JANUARY" and day >= 20) or (month == "FEBRUARY" and day <= 18):
return "AQUARIUS"
elif (month == "FEBRUARY" and day >= 19) or (month == "MARCH" and day <= 20):
return "PISCES"
elif (month == "MARCH" and day >= 21) or (month == "APRIL" and day <= 19):
return "ARIES"
elif (month == "APRIL" and day >= 20) or (month == "MAY" and day <= 20):
return "TAURUS"
elif (month == "MAY" and day >= 21) or (month == "JUNE" and day <= 20):
return "GEMINI"
elif (month == "JUNE" and day >= 21) or (month == "JULY" and day <= 22):
return "CANCER"
elif (month == "JULY" and day >= 23) or (month == "AUGUST" and day <= 22):
return "LEO"
elif (month == "AUGUST" and day >= 23) or (month == "SEPTEMBER" and day <= 22):
return "VIRGO"
elif (month == "SEPTEMBER" and day >= 23) or (month == "OCTOBER" and day <= 22):
return "LIBRA"
elif (month == "OCTOBER" and day >= 23) or (month == "NOVEMBER" and day <= 21):
return "SCORPIO"
else:
return "SAGITTARIUS" # 22 November -> 21 December
# END Definition of FUNCTIONS
# Acquisition and Control of the DATA entered by the USER
print("Enter your MONTH and DAY of BIRTH.")
month = input("MONTH: ")
day = input("DAY: ")
monthValidated = validaMonth(month)
dayIntValidated = valutaIntValido(day)
while not(monthValidated and dayIntValidated):
print("Incorrect entry. Try again.")
print("Enter your MONTH and DAY of BIRTH.")
month = input("MONTH: ")
day = input("DAY: ")
monthValidated = validaMonth(month)
dayIntValidated = valutaIntValido(day)
# Conversion STR -> INT
day = int(day)
# Evaluation DATE -> ZODIAC SIGN
zodiacSign = dateToZodiacSign(month, day)
# Displaying the RESULTS
print("Your ZODIAC SIGN is " + zodiacSign +
" (date of birth " + month.upper() + " " + str(day) + ").")
| 36.384615
| 96
| 0.600423
|
d50898d5ae082b05129d3a437ed8a6eaffde2aac
| 17,797
|
py
|
Python
|
gs_quant/base.py
|
S-Manglik/gs-quant
|
af22aa8574571db45ddc2a9627d25a26bd00e09b
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/base.py
|
S-Manglik/gs-quant
|
af22aa8574571db45ddc2a9627d25a26bd00e09b
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/base.py
|
S-Manglik/gs-quant
|
af22aa8574571db45ddc2a9627d25a26bd00e09b
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import builtins
import copy
import datetime as dt
import logging
from abc import ABC, ABCMeta, abstractmethod
from collections import namedtuple
from dataclasses import Field, InitVar, MISSING, dataclass, field, fields, replace
from enum import EnumMeta
from functools import update_wrapper
from typing import Iterable, Mapping, Optional, Union
import numpy as np
from dataclasses_json import config, global_config
from dataclasses_json.core import _decode_generic, _is_supported_generic
from gs_quant.context_base import ContextBase, ContextMeta
from gs_quant.json_convertors import encode_date_or_str, decode_date_or_str, decode_optional_date, encode_datetime, \
decode_datetime, decode_float_or_str, decode_instrument, encode_dictable
from inflection import camelize, underscore
_logger = logging.getLogger(__name__)
__builtins = set(dir(builtins))
__getattribute__ = object.__getattribute__
__setattr__ = object.__setattr__
_rename_cache = {}
def exclude_none(o):
return o is None
def exlude_always(_o):
return True
def is_iterable(o, t):
return isinstance(o, Iterable) and all(isinstance(it, t) for it in o)
def is_instance_or_iterable(o, t):
return isinstance(o, t) or is_iterable(o, t)
def _get_underscore(arg):
if arg not in _rename_cache:
_rename_cache[arg] = underscore(arg)
return _rename_cache[arg]
def handle_camel_case_args(cls):
init = cls.__init__
def wrapper(self, *args, **kwargs):
normalised_kwargs = {}
for arg, value in kwargs.items():
if not arg.isupper():
snake_case_arg = _get_underscore(arg)
if snake_case_arg != arg and snake_case_arg in kwargs:
raise ValueError('{} and {} both specified'.format(arg, snake_case_arg))
arg = snake_case_arg
arg = cls._field_mappings().get(arg, arg)
normalised_kwargs[arg] = value
return init(self, *args, **normalised_kwargs)
cls.__init__ = update_wrapper(wrapper=wrapper, wrapped=init)
return cls
field_metadata = config(exclude=exclude_none)
name_metadata = config(exclude=exlude_always)
class RiskKey(namedtuple('RiskKey', ('provider', 'date', 'market', 'params', 'scenario', 'risk_measure'))):
@property
def ex_measure(self):
return RiskKey(self.provider, self.date, self.market, self.params, self.scenario, None)
@property
def fields(self):
return self._fields
class EnumBase:
@classmethod
def _missing_(cls: EnumMeta, key):
if not isinstance(key, str):
key = str(key)
return next((m for m in cls.__members__.values() if m.value.lower() == key.lower()), None)
def __reduce_ex__(self, protocol):
return self.__class__, (self.value,)
def __lt__(self: EnumMeta, other):
return self.value < other.value
def __repr__(self):
return self.value
class HashableDict(dict):
def __hash__(self):
return hash(tuple(self.items()))
class DictBase(HashableDict):
_PROPERTIES = set()
def __init__(self, *args, **kwargs):
if self._PROPERTIES:
invalid_arg = next((k for k in kwargs.keys() if k not in self._PROPERTIES), None)
if invalid_arg is not None:
raise AttributeError(f"'{self.__class__.__name__}' has no attribute '{invalid_arg}'")
super().__init__(*args, **{camelize(k, uppercase_first_letter=False): v for k, v in kwargs.items()
if v is not None})
def __getitem__(self, item):
return super().__getitem__(camelize(item, uppercase_first_letter=False))
def __setitem__(self, key, value):
if value is not None:
return super().__setitem__(camelize(key, uppercase_first_letter=False), value)
def __getattr__(self, item):
if self._PROPERTIES:
if _get_underscore(item) in self._PROPERTIES:
return self.get(item)
elif item in self:
return self[item]
raise AttributeError(f"'{self.__class__.__name__}' has no attribute '{item}'")
def __setattr__(self, key, value):
if key in dir(self):
return super().__setattr__(key, value)
elif self._PROPERTIES and _get_underscore(key) not in self._PROPERTIES:
raise AttributeError(f"'{self.__class__.__name__}' has no attribute '{key}'")
self[key] = value
@classmethod
def properties(cls) -> set:
return cls._PROPERTIES
class Base(ABC):
"""The base class for all generated classes"""
__fields_by_name = None
__field_mappings = None
def __getattr__(self, item):
fields_by_name = __getattribute__(self, '_fields_by_name')()
if item.startswith('_') or item in fields_by_name:
return __getattribute__(self, item)
# Handle setting via camelCase names (legacy behaviour) and field mappings from disallowed names
snake_case_item = _get_underscore(item)
field_mappings = __getattribute__(self, '_field_mappings')()
snake_case_item = field_mappings.get(snake_case_item, snake_case_item)
try:
return __getattribute__(self, snake_case_item)
except AttributeError:
return __getattribute__(self, item)
def __setattr__(self, key, value):
# Handle setting via camelCase names (legacy behaviour)
snake_case_key = _get_underscore(key)
snake_case_key = self._field_mappings().get(snake_case_key, snake_case_key)
fld = self._fields_by_name().get(snake_case_key)
if fld:
if not fld.init:
raise ValueError(f'{key} cannot be set')
key = snake_case_key
value = self.__coerce_value(fld.type, value)
__setattr__(self, key, value)
def __repr__(self):
if self.name is not None:
return f'{self.name} ({self.__class__.__name__})'
return super().__repr__()
@classmethod
def __coerce_value(cls, typ: type, value):
if isinstance(value, np.generic):
# Handle numpy types
return value.item()
elif hasattr(value, 'tolist'):
# tolist converts scalar or array to native python type if not already native.
return value()
elif typ in (DictBase, Optional[DictBase]) and isinstance(value, Base):
return value.to_dict()
if _is_supported_generic(typ):
return _decode_generic(typ, value, False)
else:
return value
@classmethod
def _fields_by_name(cls) -> Mapping[str, Field]:
if cls is Base:
return {}
if cls.__fields_by_name is None:
cls.__fields_by_name = {f.name: f for f in fields(cls)}
return cls.__fields_by_name
@classmethod
def _field_mappings(cls) -> Mapping[str, str]:
if cls is Base:
return {}
if cls.__field_mappings is None:
field_mappings = {}
for fld in fields(cls):
config_fn = fld.metadata.get('dataclasses_json', {}).get('letter_case')
if config_fn:
mapped_name = config_fn('field_name')
if mapped_name:
field_mappings[mapped_name] = fld.name
cls.__field_mappings = field_mappings
return cls.__field_mappings
def clone(self, **kwargs):
"""
Clone this object, overriding specified values
:param kwargs: property names and values, e.g. swap.clone(fixed_rate=0.01)
**Examples**
To change the market data location of the default context:
>>> from gs_quant.instrument import IRCap
>>> cap = IRCap('5y', 'GBP')
>>>
>>> new_cap = cap.clone(cap_rate=0.01)
"""
return replace(self, **kwargs)
@classmethod
def properties(cls) -> set:
"""The public property names of this class"""
return set(f[:-1] if f[-1] == '_' else f for f in cls._fields_by_name().keys())
def as_dict(self, as_camel_case: bool = False) -> dict:
"""Dictionary of the public, non-null properties and values"""
# to_dict() converts all the values to JSON type, does camel case and name mappings
# asdict() does not convert values or case of the keys or do name mappings
ret = {}
field_mappings = {v: k for k, v in self._field_mappings().items()}
for key in self.__fields_by_name.keys():
value = __getattribute__(self, key)
key = field_mappings.get(key, key)
if value is not None:
if as_camel_case:
key = camelize(key, uppercase_first_letter=False)
ret[key] = value
return ret
@classmethod
def default_instance(cls):
"""
Construct a default instance of this type
"""
required = {f.name: None if f.default == MISSING else f.default for f in fields(cls) if f.init}
return cls(**required)
def from_instance(self, instance):
"""
Copy the values from an existing instance of the same type to our self
:param instance: from which to copy:
:return:
"""
if not isinstance(instance, type(self)):
raise ValueError('Can only use from_instance with an object of the same type')
for fld in fields(self.__class__):
if fld.init:
__setattr__(self, fld.name, __getattribute__(instance, fld.name))
@dataclass
class Priceable(Base):
def resolve(self, in_place: bool = True):
"""
Resolve non-supplied properties of an instrument
**Examples**
>>> from gs_quant.instrument import IRSwap
>>>
>>> swap = IRSwap('Pay', '10y', 'USD')
>>> rate = swap.fixedRate
rate is None
>>> swap.resolve()
>>> rate = swap.fixedRate
rates is now the solved fixed rate
"""
raise NotImplementedError
def dollar_price(self):
"""
Present value in USD
:return: a float or a future, depending on whether the current PricingContext is async, or has been entered
**Examples**
>>> from gs_quant.instrument import IRCap
>>>
>>> cap = IRCap('1y', 'EUR')
>>> price = cap.dollar_price()
price is the present value in USD (a float)
>>> cap_usd = IRCap('1y', 'USD')
>>> cap_eur = IRCap('1y', 'EUR')
>>>
>>> from gs_quant.markets import PricingContext
>>>
>>> with PricingContext():
>>> price_usd_f = cap_usd.dollar_price()
>>> price_eur_f = cap_eur.dollar_price()
>>>
>>> price_usd = price_usd_f.result()
>>> price_eur = price_eur_f.result()
price_usd_f and price_eur_f are futures, price_usd and price_eur are floats
"""
raise NotImplementedError
def price(self):
"""
Present value in local currency. Note that this is not yet supported on all instruments
***Examples**
>>> from gs_quant.instrument import IRSwap
>>>
>>> swap = IRSwap('Pay', '10y', 'EUR')
>>> price = swap.price()
price is the present value in EUR (a float)
"""
raise NotImplementedError
def calc(self, risk_measure, fn=None):
"""
Calculate the value of the risk_measure
:param risk_measure: the risk measure to compute, e.g. IRDelta (from gs_quant.risk)
:param fn: a function for post-processing results
:return: a float or dataframe, depending on whether the value is scalar or structured, or a future thereof
(depending on how PricingContext is being used)
**Examples**
>>> from gs_quant.instrument import IRCap
>>> from gs_quant.risk import IRDelta
>>>
>>> cap = IRCap('1y', 'USD')
>>> delta = cap.calc(IRDelta)
delta is a dataframe
>>> from gs_quant.instrument import EqOption
>>> from gs_quant.risk import EqDelta
>>>
>>> option = EqOption('.SPX', '3m', 'ATMF', 'Call', 'European')
>>> delta = option.calc(EqDelta)
delta is a float
>>> from gs_quant.markets import PricingContext
>>>
>>> cap_usd = IRCap('1y', 'USD')
>>> cap_eur = IRCap('1y', 'EUR')
>>> with PricingContext():
>>> usd_delta_f = cap_usd.calc(IRDelta)
>>> eur_delta_f = cap_eur.calc(IRDelta)
>>>
>>> usd_delta = usd_delta_f.result()
>>> eur_delta = eur_delta_f.result()
usd_delta_f and eur_delta_f are futures, usd_delta and eur_delta are dataframes
"""
raise NotImplementedError
class __ScenarioMeta(ABCMeta, ContextMeta):
pass
@dataclass
class Scenario(Base, ContextBase, ABC, metaclass=__ScenarioMeta):
def __lt__(self, other):
if self.__repr__ != other.__repr__:
return self.name < other.name
return False
def __repr__(self):
if self.name:
return self.name
else:
params = self.as_dict()
sorted_keys = sorted(params.keys(), key=lambda x: x.lower())
params = ', '.join(
[f'{k}:{params[k].__repr__ if isinstance(params[k], Base) else params[k]}' for k in sorted_keys])
return self.scenario_type + '(' + params + ')'
@dataclass
class RiskMeasureParameter(Base, ABC):
pass
@dataclass
class InstrumentBase(Base, ABC):
quantity_: InitVar[float] = field(default=1, init=False)
@property
@abstractmethod
def provider(self):
...
@property
def instrument_quantity(self) -> float:
return self.quantity_
@property
def resolution_key(self) -> Optional[RiskKey]:
try:
return self.__resolution_key
except AttributeError:
return None
@property
def unresolved(self):
try:
return self.__unresolved
except AttributeError:
return None
@property
def metadata(self):
try:
return self.__metadata
except AttributeError:
return None
@metadata.setter
def metadata(self, value):
self.__metadata = value
def from_instance(self, instance):
self.__resolution_key = None
super().from_instance(instance)
self.__unresolved = instance.__unresolved
self.__resolution_key = instance.__resolution_key
def resolved(self, values: dict, resolution_key: RiskKey):
all_values = self.as_dict(True)
all_values.update(values)
new_instrument = self.from_dict(all_values)
new_instrument.name = self.name
new_instrument.__unresolved = copy.copy(self)
new_instrument.__resolution_key = resolution_key
return new_instrument
@dataclass
class Market(ABC):
def __hash__(self):
return hash(self.market or self.location)
def __eq__(self, other):
return (self.market or self.location) == (other.market or other.location)
def __lt__(self, other):
return repr(self) < repr(other)
@property
@abstractmethod
def market(self):
...
@property
@abstractmethod
def location(self):
...
def to_dict(self):
return self.market.to_dict()
class Sentinel:
def __init__(self, name: str):
self.__name = name
def __eq__(self, other):
return self.__name == other.__name
def get_enum_value(enum_type: EnumMeta, value: Union[EnumBase, str]):
if value in (None,):
return None
if isinstance(value, enum_type):
return value
try:
enum_value = enum_type(value)
except ValueError:
_logger.warning('Setting value to {}, which is not a valid entry in {}'.format(value, enum_type))
enum_value = value
return enum_value
# Yes, I know this is a little evil ...
global_config.encoders[dt.date] = dt.date.isoformat
global_config.encoders[Optional[dt.date]] = encode_date_or_str
global_config.decoders[dt.date] = decode_optional_date
global_config.decoders[Optional[dt.date]] = decode_optional_date
global_config.encoders[Union[dt.date, str]] = encode_date_or_str
global_config.encoders[Optional[Union[dt.date, str]]] = encode_date_or_str
global_config.decoders[Union[dt.date, str]] = decode_date_or_str
global_config.decoders[Optional[Union[dt.date, str]]] = decode_date_or_str
global_config.encoders[dt.datetime] = encode_datetime
global_config.encoders[Optional[dt.datetime]] = encode_datetime
global_config.decoders[dt.datetime] = decode_datetime
global_config.decoders[Optional[dt.datetime]] = decode_datetime
global_config.decoders[Union[float, str]] = decode_float_or_str
global_config.decoders[Optional[Union[float, str]]] = decode_float_or_str
global_config.decoders[InstrumentBase] = decode_instrument
global_config.decoders[Optional[InstrumentBase]] = decode_instrument
global_config.encoders[Market] = encode_dictable
global_config.encoders[Optional[Market]] = encode_dictable
| 30.318569
| 117
| 0.638029
|
7f7e276506a59face227fc9554f8d1fc5cab4fcd
| 10,982
|
py
|
Python
|
test/functional/test_framework/test_node.py
|
trublud/Kids-Coin
|
a48e9a84fc5b5b053d39795df5ad24f1becbc296
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_node.py
|
trublud/Kids-Coin
|
a48e9a84fc5b5b053d39795df5ad24f1becbc296
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_node.py
|
trublud/Kids-Coin
|
a48e9a84fc5b5b053d39795df5ad24f1becbc296
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoind node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import re
import subprocess
import time
from .authproxy import JSONRPCException
from .util import (
assert_equal,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir, use_cli=False):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 60
if binary is None:
self.binary = os.getenv("KIDSCOIND", "kidscoind")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("KIDSCOINCLI", "kidscoin-cli"), self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print("Cleaning up leftover process")
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return getattr(self.rpc, name)
def start(self, extra_args=None, stderr=None, *args, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr, *args, **kwargs)
self.running = True
self.log.debug("kidscoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "kidscoind exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to kidscoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes bitcoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *args, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(*args, **kwargs)
self.p2ps.append(p2p_conn)
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, "No p2p connection"
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running kidscoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
| 38.669014
| 248
| 0.629667
|
66e6be53c0c1d73a2c15d9c3416ea7dcff153c59
| 10,521
|
py
|
Python
|
tensorflow/python/autograph/pyct/templates.py
|
LucasSloan/tensorflow
|
eddf3635b41fd457b2a36cf17bba40352c4abc57
|
[
"Apache-2.0"
] | 2
|
2019-03-14T14:51:00.000Z
|
2021-06-16T05:06:22.000Z
|
tensorflow/python/autograph/pyct/templates.py
|
apeforest/tensorflow
|
07da23bfa2a9ca10cd7c1dd6bea0f85d981c013e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/autograph/pyct/templates.py
|
apeforest/tensorflow
|
07da23bfa2a9ca10cd7c1dd6bea0f85d981c013e
|
[
"Apache-2.0"
] | 1
|
2019-03-09T23:31:44.000Z
|
2019-03-09T23:31:44.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST conversion templates.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import textwrap
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
class ReplaceTransformer(gast.NodeTransformer):
"""Replace AST nodes."""
def __init__(self, replacements):
"""Create a new ReplaceTransformer.
Args:
replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by.
"""
self.replacements = replacements
self.in_replacements = False
self.preserved_annos = {
anno.Basic.ORIGIN,
anno.Basic.SKIP_PROCESSING,
anno.Static.ORIG_DEFINITIONS,
}
def _prepare_replacement(self, replaced, key):
"""Prepares a replacement AST that's safe to swap in for a node.
Args:
replaced: ast.AST, the node being replaced
key: Hashable, the key of the replacement AST
Returns:
ast.AST, the replacement AST
"""
repl = self.replacements[key]
new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)
if isinstance(new_nodes, gast.AST):
new_nodes = [new_nodes]
return new_nodes
def visit_Expr(self, node):
# When replacing a placeholder with an entire statement, the replacement
# must stand on its own and not be wrapped in an Expr.
new_value = self.visit(node.value)
if new_value is node.value:
return node
return new_value
def visit_keyword(self, node):
if node.arg not in self.replacements:
return self.generic_visit(node)
repl = self._prepare_replacement(node, node.arg)
if isinstance(repl, gast.keyword):
return repl
elif (repl and isinstance(repl, (list, tuple)) and
all(isinstance(r, gast.keyword) for r in repl)):
return repl
# TODO(mdan): We may allow replacing with a string as well.
# For example, if one wanted to replace foo with bar in foo=baz, then
# we could allow changing just node arg, so that we end up with bar=baz.
raise ValueError(
'a keyword argument may only be replaced by another keyword or a '
'non-empty list of keywords. Found: {} for keyword {}'.format(
repl, node.arg))
def visit_FunctionDef(self, node):
node = self.generic_visit(node)
if node.name not in self.replacements:
return node
repl = self.replacements[node.name]
if not isinstance(repl, (gast.Name, ast.Name)):
raise ValueError(
'a function name can only be replaced by a Name node. Found: %s' %
repl)
node.name = repl.id
return node
def _check_has_context(self, node):
if not node.ctx:
raise ValueError('node %s is missing ctx value' % node)
# TODO(mdan): Rewrite _check and _set using a separate transformer.
def _check_inner_children_have_context(self, node):
if isinstance(node, gast.Attribute):
self._check_inner_children_have_context(node.value)
self._check_has_context(node)
elif isinstance(node, (gast.Tuple, gast.List)):
for e in node.elts:
self._check_inner_children_have_context(e)
self._check_has_context(node)
elif isinstance(node, gast.Dict):
for e in node.keys:
self._check_inner_children_have_context(e)
for e in node.values:
self._check_inner_children_have_context(e)
elif isinstance(node, gast.Index):
self._check_inner_children_have_context(node.value)
elif isinstance(node, gast.Subscript):
self._check_inner_children_have_context(node.value)
self._check_inner_children_have_context(node.slice)
elif isinstance(node, gast.Slice):
self._check_inner_children_have_context(node.lower)
if node.upper:
self._check_inner_children_have_context(node.upper)
if node.step:
self._check_inner_children_have_context(node.step)
elif isinstance(node, gast.BinOp):
self._check_inner_children_have_context(node.left)
self._check_inner_children_have_context(node.right)
elif isinstance(node, gast.UnaryOp):
self._check_inner_children_have_context(node.operand)
elif isinstance(node, gast.Name):
self._check_has_context(node)
elif isinstance(node, (gast.Str, gast.Num)):
pass
else:
raise ValueError('unexpected node type "%s"' % node)
def _set_inner_child_context(self, node, ctx):
if isinstance(node, gast.Attribute):
self._set_inner_child_context(node.value, gast.Load())
node.ctx = ctx
elif isinstance(node, (gast.Tuple, gast.List)):
for e in node.elts:
self._set_inner_child_context(e, ctx)
node.ctx = ctx
elif isinstance(node, gast.Name):
node.ctx = ctx
elif isinstance(node, gast.Call):
self._set_inner_child_context(node.func, ctx)
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
for a in node.args:
self._check_inner_children_have_context(a)
for k in node.keywords:
self._check_inner_children_have_context(k.value)
elif isinstance(node, gast.Dict):
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
for e in node.keys:
self._check_inner_children_have_context(e)
for e in node.values:
self._check_inner_children_have_context(e)
elif isinstance(node, gast.Subscript):
self._set_inner_child_context(node.value, ctx)
self._check_inner_children_have_context(node.slice)
elif isinstance(node, gast.BinOp):
self._check_inner_children_have_context(node.left)
self._check_inner_children_have_context(node.right)
elif isinstance(node, gast.UnaryOp):
self._check_inner_children_have_context(node.operand)
elif isinstance(node, (gast.Str, gast.Num)):
pass
else:
raise ValueError('unexpected node type "%s"' % node)
def visit_Attribute(self, node):
node = self.generic_visit(node)
if node.attr not in self.replacements:
return node
repl = self.replacements[node.attr]
if not isinstance(repl, gast.Name):
raise ValueError(
'An attribute can only be replaced by a Name node. Found: %s' % repl)
node.attr = repl.id
return node
def visit_Name(self, node):
if node.id not in self.replacements:
return node
new_nodes = self._prepare_replacement(node, node.id)
# Preserve the target context.
for n in new_nodes:
if isinstance(n, (gast.Tuple, gast.List)):
for e in n.elts:
self._set_inner_child_context(e, node.ctx)
if isinstance(n, gast.Attribute):
# For attributes, the inner Name node receives the context, while the
# outer ones have it set to Load.
self._set_inner_child_context(n, node.ctx)
else:
n.ctx = node.ctx
if len(new_nodes) == 1:
new_nodes, = new_nodes
return new_nodes
def _convert_to_ast(n):
"""Converts from a known data type to AST."""
if isinstance(n, str):
# Note: the node will receive the ctx value from the template, see
# ReplaceTransformer.visit_Name.
return gast.Name(id=n, ctx=None, annotation=None)
if isinstance(n, qual_names.QN):
return n.ast()
if isinstance(n, list):
return [_convert_to_ast(e) for e in n]
if isinstance(n, tuple):
return tuple(_convert_to_ast(e) for e in n)
return n
def replace(template, **replacements):
"""Replaces placeholders in a Python template.
AST Name and Tuple nodes always receive the context that inferred from
the template. However, when replacing more complex nodes (that can potentially
contain Name children), then the caller is responsible for setting the
appropriate context.
Args:
template: A string representing Python code. Any symbol name can be used
that appears in the template code can be used as placeholder.
**replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by. String values are also
supported as a shorthand for AST Name nodes with the respective ID.
Returns:
An AST node or list of AST nodes with the replacements made. If the
template was a function, a list will be returned. If the template was a
node, the same node will be returned. If the template was a string, an
AST node will be returned (a `Module` node in the case of a multi-line
string, an `Expr` node otherwise).
Raises:
ValueError: if the arguments are incorrect.
"""
if not isinstance(template, str):
raise ValueError('Expected string template, got %s' % type(template))
tree = parser.parse_str(textwrap.dedent(template))
for k in replacements:
replacements[k] = _convert_to_ast(replacements[k])
results = ReplaceTransformer(replacements).visit(tree).body
if isinstance(results, list):
return [qual_names.resolve(r) for r in results]
return qual_names.resolve(results)
def replace_as_expression(template, **replacements):
"""Variant of replace that generates expressions, instead of code blocks."""
replacement = replace(template, **replacements)
if len(replacement) != 1:
raise ValueError(
'single expression expected; for more general templates use replace')
node = replacement[0]
node = qual_names.resolve(node)
if isinstance(node, gast.Expr):
return node.value
elif isinstance(node, gast.Name):
return node
raise ValueError(
'the template is expected to generate an expression or a name node;'
' instead found %s' % node)
| 36.030822
| 80
| 0.701359
|
5cb281e38ed705ca34532ce44c2e26ced08addfe
| 735
|
py
|
Python
|
tensorpack/__init__.py
|
myelintek/tensorpack
|
fcbf5869d78cf7f3b59c46318b6c883a7ea12056
|
[
"Apache-2.0"
] | 3
|
2017-12-02T16:49:42.000Z
|
2018-11-04T16:53:44.000Z
|
tensorpack/__init__.py
|
dongzhuoyao/tensorpack
|
78bcf6053172075a761eac90ab22f0b631b272a0
|
[
"Apache-2.0"
] | 6
|
2020-01-28T23:03:24.000Z
|
2022-02-10T01:21:18.000Z
|
tensorpack/__init__.py
|
wdings/Mask-RCNN
|
8d5ae5cc2cfcf2e4e53b4d1064ac9e727f736d09
|
[
"Apache-2.0"
] | 5
|
2017-11-15T14:46:27.000Z
|
2018-11-04T16:54:06.000Z
|
# -*- coding: utf-8 -*-
# File: __init__.py
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
import os as _os
from tensorpack.libinfo import __version__, _HAS_TF
from tensorpack.utils import *
from tensorpack.dataflow import *
# dataflow can be used alone without installing tensorflow
if _HAS_TF:
from tensorpack.models import *
from tensorpack.callbacks import *
from tensorpack.tfutils import *
# Default to v2
if _os.environ.get('TENSORPACK_TRAIN_API', 'v2') == 'v2':
from tensorpack.train import *
else:
from tensorpack.trainv1 import *
from tensorpack.graph_builder import InputDesc, ModelDesc, ModelDescBase
from tensorpack.input_source import *
from tensorpack.predict import *
| 27.222222
| 76
| 0.72517
|
fe6e7f3257fc05c611f5c41710975f4aabdee379
| 800
|
py
|
Python
|
tests/test_ublame.py
|
taintedkernel/ublame
|
27f3041ada145cfa15e90b0b6cd67b3d4e1dd9ad
|
[
"MIT"
] | 10
|
2020-11-11T10:41:33.000Z
|
2021-09-04T22:17:40.000Z
|
tests/test_ublame.py
|
taintedkernel/ublame
|
27f3041ada145cfa15e90b0b6cd67b3d4e1dd9ad
|
[
"MIT"
] | 4
|
2020-11-13T22:03:55.000Z
|
2022-03-17T22:18:47.000Z
|
tests/test_ublame.py
|
taintedkernel/ublame
|
27f3041ada145cfa15e90b0b6cd67b3d4e1dd9ad
|
[
"MIT"
] | 1
|
2022-03-17T18:47:23.000Z
|
2022-03-17T18:47:23.000Z
|
import os
import unittest
from ..ublame import repo_path_for, trim_diff
LOREM_IPSUM = """
Lorem ipsum dolor sit amet,
consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
"""
LINES = """{}
+def trim_diff(diff, token):
{}
""".format(
LOREM_IPSUM * 3, LOREM_IPSUM * 3
)
class ConfigTest(unittest.TestCase):
def test_repo_path_for(self):
self.assertEquals(
repo_path_for(__file__),
os.path.abspath(os.path.dirname(os.path.join(__file__, "../../"))),
)
def test_trim_diff_not_found(self):
self.assertEquals(trim_diff(LINES, "foobar"), "")
def test_trim_diff_found(self):
self.assertEquals(
trim_diff(LINES, ("token",), 2), "\n".join(LINES.split("\n")[11:16])
)
| 23.529412
| 80
| 0.645
|
78068d88c6a57d7f15bbe69b52d53101eeba4c50
| 344
|
py
|
Python
|
phoenix/slackbot/management/commands/postmortem_report_notifications.py
|
kiwicom/phoenix
|
19bfbd23387e7d054780cfba9dbb385525aa7d80
|
[
"MIT"
] | 8
|
2019-03-03T22:46:06.000Z
|
2021-12-14T20:26:28.000Z
|
phoenix/slackbot/management/commands/postmortem_report_notifications.py
|
kiwicom/phoenix
|
19bfbd23387e7d054780cfba9dbb385525aa7d80
|
[
"MIT"
] | 250
|
2018-11-21T09:21:11.000Z
|
2021-09-22T17:48:07.000Z
|
phoenix/slackbot/management/commands/postmortem_report_notifications.py
|
kiwicom/phoenix
|
19bfbd23387e7d054780cfba9dbb385525aa7d80
|
[
"MIT"
] | 1
|
2021-02-16T08:01:04.000Z
|
2021-02-16T08:01:04.000Z
|
import logging
from django.core.management.base import BaseCommand
from ...tasks import postmortem_notifications
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Check for missing postmortems in announcements and send out notifications."""
def handle(self, *args, **options):
postmortem_notifications()
| 22.933333
| 84
| 0.761628
|
0c74fdc1accff1a4bafcb184077210ae8ff00c0a
| 13,433
|
py
|
Python
|
envs/doom/multiplayer/doom_multiagent_wrapper.py
|
neevparikh/hierarchical-doom
|
082f794b9c6101c4e94f15bf4f93c718ee219ea5
|
[
"MIT"
] | 1
|
2021-11-19T19:39:36.000Z
|
2021-11-19T19:39:36.000Z
|
envs/doom/multiplayer/doom_multiagent_wrapper.py
|
neevparikh/hierarchical-doom
|
082f794b9c6101c4e94f15bf4f93c718ee219ea5
|
[
"MIT"
] | null | null | null |
envs/doom/multiplayer/doom_multiagent_wrapper.py
|
neevparikh/hierarchical-doom
|
082f794b9c6101c4e94f15bf4f93c718ee219ea5
|
[
"MIT"
] | null | null | null |
import threading
import time
from enum import Enum
from multiprocessing import Process
from queue import Empty, Queue
import faster_fifo
import cv2
import filelock
import gym
from filelock import FileLock
from envs.doom.doom_gym import doom_lock_file
from envs.doom.doom_render import concat_grid, cvt_doom_obs
from envs.doom.multiplayer.doom_multiagent import find_available_port, DEFAULT_UDP_PORT
from envs.env_utils import RewardShapingInterface, get_default_reward_shaping
from utils.utils import log
from functools import wraps
from time import sleep
def retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=False):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for i in range(num_attempts):
try:
return func(*args, **kwargs)
except exception_class as e:
# This accesses the self instance variable
multiagent_wrapper_obj = args[0]
multiagent_wrapper_obj.initialized = False
multiagent_wrapper_obj.close()
# This is done to reset if it is in the step function
if should_reset:
multiagent_wrapper_obj.reset()
if i == num_attempts - 1:
raise
else:
log.error('Failed with error %r, trying again', e)
sleep(sleep_time)
return wrapper
return decorator
def safe_get(q, timeout=1e6, msg='Queue timeout'):
"""Using queue.get() with timeout is necessary, otherwise KeyboardInterrupt is not handled."""
while True:
try:
return q.get(timeout=timeout)
except Empty:
log.warning(msg)
def udp_port_num(env_config):
if env_config is None:
return DEFAULT_UDP_PORT
port_to_use = DEFAULT_UDP_PORT + 100 * env_config.worker_index + env_config.vector_index
return port_to_use
class TaskType(Enum):
INIT, TERMINATE, RESET, STEP, STEP_UPDATE, INFO, SET_ATTR = range(7)
def init_multiplayer_env(make_env_func, player_id, env_config, init_info=None):
env = make_env_func(player_id=player_id)
if env_config is not None and 'worker_index' in env_config:
env.unwrapped.worker_index = env_config.worker_index
if env_config is not None and 'vector_index' in env_config:
env.unwrapped.vector_index = env_config.vector_index
if init_info is None:
port_to_use = udp_port_num(env_config)
port = find_available_port(port_to_use, increment=1000)
log.debug('Using port %d', port)
init_info = dict(port=port)
env.unwrapped.init_info = init_info
env.seed(env.unwrapped.worker_index * 1000 + env.unwrapped.vector_index * 10 + player_id)
return env
class MultiAgentEnvWorker:
def __init__(self,
player_id,
make_env_func,
env_config,
use_multiprocessing=False,
reset_on_init=True):
self.player_id = player_id
self.make_env_func = make_env_func
self.env_config = env_config
self.reset_on_init = reset_on_init
if use_multiprocessing:
self.process = Process(target=self.start, daemon=False)
self.task_queue, self.result_queue = faster_fifo.Queue(), faster_fifo.Queue()
else:
self.process = threading.Thread(target=self.start)
self.task_queue, self.result_queue = Queue(), Queue()
self.process.start()
def _init(self, init_info):
log.info('Initializing env for player %d, init_info: %r...', self.player_id, init_info)
env = init_multiplayer_env(self.make_env_func, self.player_id, self.env_config, init_info)
if self.reset_on_init:
env.reset()
return env
@staticmethod
def _terminate(env):
if env is None:
return
env.close()
@staticmethod
def _get_info(env):
"""Specific to custom VizDoom environments."""
info = {}
if hasattr(env.unwrapped, 'get_info_all'):
info = env.unwrapped.get_info_all() # info for the new episode
return info
def _set_env_attr(self, env, player_id, attr_chain, value):
"""Allows us to set an arbitrary attribute of the environment, e.g. attr_chain can be unwrapped.foo.bar"""
assert player_id == self.player_id
attrs = attr_chain.split('.')
curr_attr = env
try:
for attr_name in attrs[:-1]:
curr_attr = getattr(curr_attr, attr_name)
except AttributeError:
log.error('Env does not have an attribute %s', attr_chain)
attr_to_set = attrs[-1]
setattr(curr_attr, attr_to_set, value)
def start(self):
env = None
while True:
data, task_type = safe_get(self.task_queue)
if task_type == TaskType.INIT:
env = self._init(data)
self.result_queue.put(None) # signal we're done
continue
if task_type == TaskType.TERMINATE:
self._terminate(env)
break
results = None
if task_type == TaskType.RESET:
results = env.reset()
elif task_type == TaskType.INFO:
results = self._get_info(env)
elif task_type == TaskType.STEP or task_type == TaskType.STEP_UPDATE:
# collect obs, reward, done, and info
action = data
env.unwrapped.update_state = task_type == TaskType.STEP_UPDATE
results = env.step(action)
elif task_type == TaskType.SET_ATTR:
player_id, attr_chain, value = data
self._set_env_attr(env, player_id, attr_chain, value)
else:
raise Exception(f'Unknown task type {task_type}')
self.result_queue.put(results)
class MultiAgentEnv(gym.Env, RewardShapingInterface):
def __init__(self, num_agents, make_env_func, env_config, skip_frames):
gym.Env.__init__(self)
RewardShapingInterface.__init__(self)
self.num_agents = num_agents
log.debug('Multi agent env, num agents: %d', self.num_agents)
self.skip_frames = skip_frames # number of frames to skip (1 = no skip)
env = make_env_func(player_id=-1) # temporary env just to query observation_space and stuff
self.action_space = env.action_space
self.observation_space = env.observation_space
self.default_reward_shaping = get_default_reward_shaping(env)
env.close()
self.current_reward_shaping = [self.default_reward_shaping for _ in self.num_agents]
self.make_env_func = make_env_func
self.safe_init = env_config is not None and env_config.get('safe_init', False)
if self.safe_init:
sleep_seconds = env_config.worker_index * 1.0
log.info('Sleeping %.3f seconds to avoid creating all envs at once', sleep_seconds)
time.sleep(sleep_seconds)
log.info('Done sleeping at %d', env_config.worker_index)
self.env_config = env_config
self.workers = None
# only needed when rendering
self.enable_rendering = False
self.last_obs = None
self.reset_on_init = True
self.initialized = False
def get_default_reward_shaping(self):
return self.default_reward_shaping
def get_current_reward_shaping(self, agent_idx: int):
return self.current_reward_shaping[agent_idx]
def set_reward_shaping(self, reward_shaping: dict, agent_idx: int):
self.current_reward_shaping[agent_idx] = reward_shaping
self.set_env_attr(
agent_idx,
'unwrapped.reward_shaping_interface.reward_shaping_scheme',
reward_shaping,
)
def await_tasks(self, data, task_type, timeout=None):
"""
Task result is always a tuple of lists, e.g.:
(
[0th_agent_obs, 1st_agent_obs, ... ],
[0th_agent_reward, 1st_agent_reward, ... ],
...
)
If your "task" returns only one result per agent (e.g. reset() returns only the observation),
the result will be a tuple of length 1. It is a responsibility of the caller to index appropriately.
"""
if data is None:
data = [None] * self.num_agents
assert len(data) == self.num_agents
for i, worker in enumerate(self.workers):
worker.task_queue.put((data[i], task_type))
result_lists = None
for i, worker in enumerate(self.workers):
results = safe_get(
worker.result_queue,
timeout=0.2 if timeout is None else timeout,
msg=f'Takes a surprisingly long time to process task {task_type}, retry...',
)
if not isinstance(results, (tuple, list)):
results = [results]
if result_lists is None:
result_lists = tuple([] for _ in results)
for j, r in enumerate(results):
result_lists[j].append(r)
return result_lists
def _ensure_initialized(self):
if self.initialized:
return
self.workers = [
MultiAgentEnvWorker(i,
self.make_env_func,
self.env_config,
reset_on_init=self.reset_on_init) for i in range(self.num_agents)
]
init_attempt = 0
while True:
init_attempt += 1
try:
port_to_use = udp_port_num(self.env_config)
port = find_available_port(port_to_use, increment=1000)
log.debug('Using port %d', port)
init_info = dict(port=port)
lock_file = doom_lock_file(max_parallel=20)
lock = FileLock(lock_file)
with lock.acquire(timeout=10):
for i, worker in enumerate(self.workers):
worker.task_queue.put((init_info, TaskType.INIT))
if self.safe_init:
time.sleep(1.0) # just in case
else:
time.sleep(0.05)
for i, worker in enumerate(self.workers):
worker.result_queue.get(timeout=20)
except filelock.Timeout:
continue
except Exception:
raise RuntimeError('Critical error: worker stuck on initialization. Abort!')
else:
break
log.debug('%d agent workers initialized for env %d!',
len(self.workers),
self.env_config.worker_index)
self.initialized = True
@retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=False)
def info(self):
self._ensure_initialized()
info = self.await_tasks(None, TaskType.INFO)[0]
return info
@retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=False)
def reset(self):
self._ensure_initialized()
observation = self.await_tasks(None, TaskType.RESET, timeout=2.0)[0]
return observation
@retry_dm(exception_class=Exception, num_attempts=3, sleep_time=1, should_reset=True)
def step(self, actions):
self._ensure_initialized()
for frame in range(self.skip_frames - 1):
self.await_tasks(actions, TaskType.STEP)
obs, rew, dones, infos = self.await_tasks(actions, TaskType.STEP_UPDATE)
for info in infos:
info['num_frames'] = self.skip_frames
if all(dones):
obs = self.await_tasks(None, TaskType.RESET, timeout=2.0)[0]
if self.enable_rendering:
self.last_obs = obs
return obs, rew, dones, infos
# noinspection PyUnusedLocal
def render(self, *args, **kwargs):
self.enable_rendering = True
if self.last_obs is None:
return
render_multiagent = True
if render_multiagent:
obs_display = [o['obs'] for o in self.last_obs]
obs_grid = concat_grid(obs_display)
cv2.imshow('vizdoom', obs_grid)
else:
obs_display = self.last_obs[0]['obs']
cv2.imshow('vizdoom', cvt_doom_obs(obs_display))
cv2.waitKey(1)
def close(self):
if self.workers is not None:
# log.info('Stopping multiagent env %d...', self.env_config.worker_index)
for worker in self.workers:
worker.task_queue.put((None, TaskType.TERMINATE))
time.sleep(0.1)
for worker in self.workers:
worker.process.join()
def seed(self, seed=None):
"""Does not really make sense for the wrapper. Individual envs will be uniquely seeded on init."""
pass
def set_env_attr(self, agent_idx, attr_chain, value):
data = (agent_idx, attr_chain, value)
worker = self.workers[agent_idx]
worker.task_queue.put((data, TaskType.SET_ATTR))
result = safe_get(worker.result_queue, timeout=0.1)
assert result is None
| 34.710594
| 114
| 0.607385
|
659315da2586a8a11cc3bd6fe56111c0d791b434
| 253
|
py
|
Python
|
karanja_me/manage.py
|
denisKaranja/django-dive-in
|
451742ac065136cb0f9ac7b042d5913bbc2a36d0
|
[
"MIT"
] | null | null | null |
karanja_me/manage.py
|
denisKaranja/django-dive-in
|
451742ac065136cb0f9ac7b042d5913bbc2a36d0
|
[
"MIT"
] | null | null | null |
karanja_me/manage.py
|
denisKaranja/django-dive-in
|
451742ac065136cb0f9ac7b042d5913bbc2a36d0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "karanja_me.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23
| 74
| 0.774704
|
337c59aff63d5fed88f593f1c2fc399ae66d94c2
| 6,512
|
py
|
Python
|
influxdb_client/domain/pkg_summary_diff_buckets.py
|
kelseiv/influxdb-client-python
|
9a0d2d659157cca96f6a04818fdeb215d699bdd7
|
[
"MIT"
] | 1
|
2021-06-06T10:39:47.000Z
|
2021-06-06T10:39:47.000Z
|
influxdb_client/domain/pkg_summary_diff_buckets.py
|
kelseiv/influxdb-client-python
|
9a0d2d659157cca96f6a04818fdeb215d699bdd7
|
[
"MIT"
] | null | null | null |
influxdb_client/domain/pkg_summary_diff_buckets.py
|
kelseiv/influxdb-client-python
|
9a0d2d659157cca96f6a04818fdeb215d699bdd7
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PkgSummaryDiffBuckets(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'name': 'str',
'old_description': 'str',
'new_description': 'str',
'old_rp': 'str',
'new_rp': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'old_description': 'oldDescription',
'new_description': 'newDescription',
'old_rp': 'oldRP',
'new_rp': 'newRP'
}
def __init__(self, id=None, name=None, old_description=None, new_description=None, old_rp=None, new_rp=None): # noqa: E501
"""PkgSummaryDiffBuckets - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._name = None
self._old_description = None
self._new_description = None
self._old_rp = None
self._new_rp = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if old_description is not None:
self.old_description = old_description
if new_description is not None:
self.new_description = new_description
if old_rp is not None:
self.old_rp = old_rp
if new_rp is not None:
self.new_rp = new_rp
@property
def id(self):
"""Gets the id of this PkgSummaryDiffBuckets. # noqa: E501
:return: The id of this PkgSummaryDiffBuckets. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PkgSummaryDiffBuckets.
:param id: The id of this PkgSummaryDiffBuckets. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this PkgSummaryDiffBuckets. # noqa: E501
:return: The name of this PkgSummaryDiffBuckets. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PkgSummaryDiffBuckets.
:param name: The name of this PkgSummaryDiffBuckets. # noqa: E501
:type: str
"""
self._name = name
@property
def old_description(self):
"""Gets the old_description of this PkgSummaryDiffBuckets. # noqa: E501
:return: The old_description of this PkgSummaryDiffBuckets. # noqa: E501
:rtype: str
"""
return self._old_description
@old_description.setter
def old_description(self, old_description):
"""Sets the old_description of this PkgSummaryDiffBuckets.
:param old_description: The old_description of this PkgSummaryDiffBuckets. # noqa: E501
:type: str
"""
self._old_description = old_description
@property
def new_description(self):
"""Gets the new_description of this PkgSummaryDiffBuckets. # noqa: E501
:return: The new_description of this PkgSummaryDiffBuckets. # noqa: E501
:rtype: str
"""
return self._new_description
@new_description.setter
def new_description(self, new_description):
"""Sets the new_description of this PkgSummaryDiffBuckets.
:param new_description: The new_description of this PkgSummaryDiffBuckets. # noqa: E501
:type: str
"""
self._new_description = new_description
@property
def old_rp(self):
"""Gets the old_rp of this PkgSummaryDiffBuckets. # noqa: E501
:return: The old_rp of this PkgSummaryDiffBuckets. # noqa: E501
:rtype: str
"""
return self._old_rp
@old_rp.setter
def old_rp(self, old_rp):
"""Sets the old_rp of this PkgSummaryDiffBuckets.
:param old_rp: The old_rp of this PkgSummaryDiffBuckets. # noqa: E501
:type: str
"""
self._old_rp = old_rp
@property
def new_rp(self):
"""Gets the new_rp of this PkgSummaryDiffBuckets. # noqa: E501
:return: The new_rp of this PkgSummaryDiffBuckets. # noqa: E501
:rtype: str
"""
return self._new_rp
@new_rp.setter
def new_rp(self, new_rp):
"""Sets the new_rp of this PkgSummaryDiffBuckets.
:param new_rp: The new_rp of this PkgSummaryDiffBuckets. # noqa: E501
:type: str
"""
self._new_rp = new_rp
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PkgSummaryDiffBuckets):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.798354
| 127
| 0.582924
|
c887339f08f310fa9213b9b956dbc1adb7cbbe62
| 5,853
|
py
|
Python
|
main.py
|
qarchli/dqn-on-space-invaders
|
148f1a7b65b2f47dab736b08cc7d6b7de1725a00
|
[
"MIT"
] | 1
|
2020-06-05T07:05:17.000Z
|
2020-06-05T07:05:17.000Z
|
main.py
|
qarchli/dqn-on-space-invaders
|
148f1a7b65b2f47dab736b08cc7d6b7de1725a00
|
[
"MIT"
] | null | null | null |
main.py
|
qarchli/dqn-on-space-invaders
|
148f1a7b65b2f47dab736b08cc7d6b7de1725a00
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pickle
from collections import deque
import gym
from gym import wrappers
import torch
from agent import DQNAgent
def train(n_episodes=100,
max_t=10000,
eps_start=1.0,
eps_end=0.01,
eps_decay=0.996):
"""
Training a Deep Q-Learning agent to play Space Invaders
---
Params
======
n_episodes (int): maximum number of training epsiodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): mutiplicative factor (per episode) for decreasing epsilon
Returns
======
scores: list of the scores obtained in each episode
"""
# to store the score of each episode
scores = []
# list containing the timestep per episode at which the game is over
done_timesteps = []
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start
for i_episode in range(1, n_episodes + 1):
state = env.reset()
score = 0
for timestep in range(max_t):
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
# above step decides whether we will train(learn) the network
# actor (local_qnetwork) or we will fill the replay buffer
# if len replay buffer is equal to the batch size then we will
# train the network or otherwise we will add experience tuple in our
# replay buffer.
state = next_state
score += reward
if done:
print('\tEpisode {} done in {} timesteps.'.format(
i_episode, timestep))
done_timesteps.append(timestep)
break
scores_window.append(score) # save the most recent score
scores.append(score) # save the most recent score
eps = max(eps * eps_decay, eps_end) # decrease the epsilon
if timestep % SAVE_EVERY == 0:
print('\rEpisode {}\tTimestep {}\tAverage Score {:.2f}'.format(
i_episode, timestep, np.mean(scores_window)), end="")
# save the final network
torch.save(agent.qnetwork_local.state_dict(),
SAVE_DIR + 'model.pth')
# save the final scores
with open(SAVE_DIR + 'scores', 'wb') as fp:
pickle.dump(scores, fp)
# save the done timesteps
with open(SAVE_DIR + 'dones', 'wb') as fp:
pickle.dump(done_timesteps, fp)
# save the final network
torch.save(agent.qnetwork_local.state_dict(), SAVE_DIR + 'model.pth')
# save the final scores
with open(SAVE_DIR + 'scores', 'wb') as fp:
pickle.dump(scores, fp)
# save the done timesteps
with open(SAVE_DIR + 'dones', 'wb') as fp:
pickle.dump(done_timesteps, fp)
return scores
def test(env, trained_agent, n_games=5, n_steps_per_game=10000):
for game in range(n_games):
env = wrappers.Monitor(env,
"./test/game-{}".format(game),
force=True)
observation = env.reset()
score = 0
for step in range(n_steps_per_game):
action = trained_agent.act(observation)
observation, reward, done, info = env.step(action)
score += reward
if done:
print('GAME-{} OVER! score={}'.format(game, score))
break
env.close()
# Agent was trained on GPU in colab.
# The files presented in train folder are those colab
# TODO
# - Encapsulate the training data into a trainloader to avoid GPU runtime error
if __name__ == '__main__':
TRAIN = True # train or test
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 100 # how often to update the target network
SAVE_EVERY = 100 # how often to save the network to disk
MAX_TIMESTEPS = 10
N_EPISODES = 10
SAVE_DIR = "./train/"
env = gym.make('SpaceInvaders-v0')
if TRAIN:
# init agent
agent = DQNAgent(state_size=4,
action_size=env.action_space.n,
seed=0,
lr=LR,
gamma=GAMMA,
tau=TAU,
buffer_size=BUFFER_SIZE,
batch_size=BATCH_SIZE,
update_every=UPDATE_EVERY)
# train and get the scores
scores = train(n_episodes=N_EPISODES, max_t=MAX_TIMESTEPS)
# plot the running mean of scores
N = 100 # running mean window
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(
np.convolve(np.array(scores), np.ones((N, )) / N, mode='valid'))
plt.ylabel('Score')
plt.xlabel('Timestep #')
plt.show()
else:
N_GAMES = 5
N_STEPS_PER_GAME = 10000
# init a new agent
trained_agent = DQNAgent(state_size=4,
action_size=env.action_space.n,
seed=0)
# replace the weights with the trained weights
trained_agent.qnetwork_local.load_state_dict(
torch.load(SAVE_DIR + 'model.pth'))
# enable inference mode
trained_agent.qnetwork_local.eval()
# test and save results to disk
test(env, trained_agent)
| 34.429412
| 89
| 0.571673
|
f78a49e89b79ea1c013dedb5cb776bddbe080ff5
| 3,134
|
py
|
Python
|
launch.py
|
U039b/frida-scripts
|
f16b6bd631b52ca9444723d24d6f21550f6ed266
|
[
"MIT"
] | 3
|
2021-11-21T09:38:08.000Z
|
2022-01-19T22:06:00.000Z
|
launch.py
|
U039b/frida-scripts
|
f16b6bd631b52ca9444723d24d6f21550f6ed266
|
[
"MIT"
] | null | null | null |
launch.py
|
U039b/frida-scripts
|
f16b6bd631b52ca9444723d24d6f21550f6ed266
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import frida
import glob
import time
import json
from frida_tools.application import ConsoleApplication
PWD = os.path.dirname(os.path.abspath(__file__))
class Application(ConsoleApplication):
SESSION_ID_LENGTH = 32
MASTER_KEY_LENGTH = 48
def _add_options(self, parser):
parser.add_option('-o', '--output', help='The output directory')
def _initialize(self, parser, options, args):
if not os.path.exists(options.output):
os.makedirs(options.output)
self._output_dir = options.output
def _usage(self):
return 'usage: %prog [options] target'
def _needs_target(self):
return True
@staticmethod
def _agent():
js_files = glob.glob(f'{PWD}/scripts/*.js', recursive=True)
js_script = ''
for js_file in js_files:
with open(js_file, mode='r') as f:
js_script += f.read()
with open(f'{PWD}/script.txt', mode='w') as f:
f.write(js_script)
return js_script
def _start(self):
self._output_files = {}
self._update_status('Attached')
def on_message(message, data):
self._reactor.schedule(lambda: self._on_message(message, data))
self._session_cache = set()
self._script = self._session.create_script(self._agent())
self._script.on('message', on_message)
self._update_status('Loading script...')
self._script.load()
self._update_status('Loaded script')
api = self._script.exports
api.log_ssl_keys()
api.log_aes_info()
self._update_status('Loaded script')
self._resume()
time.sleep(1)
# api.log_device_info()
def _on_child_added(self, child):
print('⚡ child_added: {}'.format(child))
self._instrument(child.pid)
def _on_child_removed(self, child):
print('⚡ child_removed: {}'.format(child))
def _save_data(self):
for filename, elt in self._output_files.items():
if len(elt) == 0:
continue
data_type = elt[0].get('data_type')
with open(f'{self._output_dir}/{filename}', mode='w') as out:
if data_type == 'json':
json.dump(elt, out, indent=2)
else:
for record in elt:
data = record.get('data')
out.write(f'{data}\n')
def _acc_data(self, data):
output_file = data.get('dump')
if output_file not in self._output_files:
self._output_files[output_file] = []
self._output_files[output_file].append(data)
def _on_message(self, message, data):
if message['type'] == 'send':
if message.get('payload'):
self._acc_data(message.get('payload'))
return
def main():
try:
app = Application()
app.run()
except KeyboardInterrupt as k:
# Have to handle something?
pass
finally:
app._save_data()
if __name__ == '__main__':
main()
| 27.491228
| 75
| 0.580728
|
f7a6088182132a5aabda0e2b1fa218231e2dcb82
| 11,276
|
py
|
Python
|
napalm_getters/napalm_getters.py
|
jwbensley/NAPALM_Examples
|
7095592dc3832b366056125e952038a88e195f6b
|
[
"MIT"
] | 2
|
2019-03-09T19:19:04.000Z
|
2022-02-21T03:06:55.000Z
|
napalm_getters/napalm_getters.py
|
jwbensley/NAPALM_Examples
|
7095592dc3832b366056125e952038a88e195f6b
|
[
"MIT"
] | null | null | null |
napalm_getters/napalm_getters.py
|
jwbensley/NAPALM_Examples
|
7095592dc3832b366056125e952038a88e195f6b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
Loop over a list of devices in a YAML inventory file and run all the the built
in NAPALM getters against each device. Log the output to a per-device file as
structed YAML data.
sudo -H pip3 install napalm
example inventory.yml:
---
# required: hostname, os
# optional: username, password, timeout, optional_args
R1:
hostname: 192.168.223.2
os: ios
username: admin
password: admin
timeout: 15 # Default is 60 seconds
optional_args:
secret: enable
transport: telnet # Default is SSH
port: 23 # Default is 22
verbose: True # Default is False
R2:
hostname: 192.168.188.2
os: junos
optional_args:
config_lock: True
"""
import argparse
from datetime import datetime
from getpass import getpass
from jnpr.junos.exception import ConnectAuthError as JuniperConnectAuthError
from jnpr.junos.exception import ConnectRefusedError as JuniperConnectRefusedError
import napalm
from napalm._SUPPORTED_DRIVERS import SUPPORTED_DRIVERS
from napalm.base.exceptions import ConnectionException
from napalm.base.exceptions import LockError
from napalm.base.exceptions import MergeConfigException
from napalm.base.exceptions import ReplaceConfigException
from napalm.base.exceptions import UnlockError
from netmiko.utilities import get_structured_data
from netmiko.ssh_exception import NetMikoAuthenticationException
import os
from paramiko.ssh_exception import SSHException
import pprint
from socket import error as SocketError
from socket import timeout as SocketTimeout
import sys
import yaml
def check_log_path_exists(log_dir):
if not os.path.isdir(log_dir):
print("Path to output logging directory doesn't exist: {}".
format(log_dir))
try:
os.mkdir(log_dir)
print("Created directory: {}".format(log_dir))
return True
except Exception:
print("Couldn't create directory: {}".format(log_dir))
return False
else:
return True
def get_port(device):
port = "unknown"
try:
if device.netmiko_optional_args['port']:
port = device.netmiko_optional_args['port']
except (AttributeError, KeyError):
pass
try:
if device.port:
port = device.port
except AttributeError:
pass
return port
def get_transport(device):
transport = "unknown"
try:
if device.transport:
transport = device.transport
except (AttributeError):
pass
return transport
def load_inv(filename, type=None):
try:
inventory_file = open(filename)
except Exception:
print("Couldnt open inventory file {}".format(filename))
sys.exit(1)
try:
inventory = yaml.load(inventory_file)
except Exception as e:
print("Couldn't load YAML file {}: {}".format(filename, e))
sys.exit(1)
inventory_file.close()
# Filter the inventory down to the specified type if one is supplied
if type:
filtered_inventory = {}
for dev, opt in inventory.items():
if opt['os'] == type:
filtered_inventory[dev] = opt
else:
filtered_inventory = inventory
return filtered_inventory
def parse_cli_args():
parser = argparse.ArgumentParser(
description='Loop over a list of devices in an inventory file log '
'the structured output of every NAPALM getter to a file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-i', '--inventory-file',
help='Input YAML inventory file',
type=str,
default='inventory.yml',
)
parser.add_argument(
'-l', '--log-dir',
help='Path to the output logging directory',
type=str,
default='./logs',
)
parser.add_argument(
'-t', '--type',
help='Only process devices with the specific OS type e.g. ios or junos',
type=str,
default=None,
)
parser.add_argument(
'-u', '--username',
help='Default username for device access',
type=str,
default=None,
)
return vars(parser.parse_args())
def set_dev_opts(args, opt):
if 'username' not in opt:
if not args['username']:
print ("No username specified")
return False
else:
opt['username'] = args['username']
if 'password' not in opt:
opt['password'] = args['password']
if opt['password'] == "":
print("No password specified")
return False
if 'optional_args' not in opt:
opt['optional_args'] = None
return True
def main():
args = parse_cli_args()
args['password'] = getpass("Default password:")
inventory = load_inv(args['inventory_file'], args['type'])
if not check_log_path_exists(args['log_dir']):
sys.exit(1)
for dev, opt in inventory.items():
print("Trying {}...".format(dev))
if opt['os'] not in SUPPORTED_DRIVERS:
print("{} has an unsupported device OS type: {}".format(dev, opt['os']))
continue
timestamp = datetime.now().strftime('%Y-%m-%d--%H-%M-%S')
output_file = args['log_dir']+'/'+dev+'_'+timestamp+'.yml'
try:
output_log = open(output_file, 'w')
except Exception:
print("Couldn't open output log file {}".format(output_log))
continue
if not set_dev_opts(args, opt):
continue
driver = napalm.get_network_driver(opt['os'])
# If Kwargs doesn't have exactly the keys required (no extras)
# driver() will throw an exception
opt.pop('os')
device = driver(**opt)
# Try to get the transport port number and type for debug messages
port = get_port(device)
transport = get_transport(device)
# Connect to the device
try:
device.open()
except (JuniperConnectAuthError, NetMikoAuthenticationException):
print("Unable to authenticate to {} as {}".
format(opt['hostname'], opt['username']))
continue
except (ConnectionException, JuniperConnectRefusedError, SocketError,
SocketTimeout, SSHException):
print("Unable to connect to: {} using {} on port {}".
format(opt['hostname'], transport, port))
continue
structured_output = {}
try:
bgp_neighbours = device.get_bgp_neighbors()
structured_output['get_bgp_neighbors'] = bgp_neighbours
except Exception:
print("Couldn't get BGP neighbours from {}".
format(opt['hostname']))
'''
# IOS bugs when the neighour is UP?!
# Also only supports IPv4/IPv6 unicast AFI/SAFI
structured_output['get_bgp_neighbors_detail'] = {}
# table will be a tuple,
# entry 0 is the routerID
# entry 1 is the dict of peers
for table in bgp_neighbours.items():
# Each 'peer' will be a dict,
# key is the BGP peer IP and val is a defaultdict,
# with a single entry which is also default dict,
# which contains all the BGP peer details
if table[1]['peers']:
for neighbour in table[1]['peers'].keys():
try:
bgp_neighbours_detailed = device.get_bgp_neighbors_detail(neighbour)
for k1, v1 in bgp_neighbours_detailed.items():
for k2, v2 in v1.items():
structured_output['get_bgp_neighbors_detail'][neighbour] = v2[0]
except Exception as e:
print("Couldn't get detailed BGP neighbour information"
" from {} for {}".format(opt['hostname'], neighbour))
print(e)
sys.exit(1)
#continue
'''
try:
environment = device.get_environment()
structured_output['get_environment'] = environment
except Exception:
print("Couldn't get environment details from {}".
format(opt['hostname']))
try:
facts = device.get_facts()
structured_output['get_facts'] = facts
except Exception:
print("Couldn't get facts from {}".
format(opt['hostname']))
try:
interfaces = device.get_interfaces()
structured_output['get_interfaces'] = interfaces
except Exception:
print("Couldn't get interfaces from {}".
format(opt['hostname']))
try:
interface_counters = device.get_interfaces_counters()
structured_output['get_interfaces_counters'] = interface_counters
except Exception:
print("Couldn't get interface counters from {}".
format(opt['hostname']))
try:
interface_ips = device.get_interfaces_ip()
structured_output['get_interfaces_ip'] = interface_ips
except Exception:
print("Couldn't get interface IPs from {}".
format(opt['hostname']))
try:
vrfs = device.get_network_instances()
structured_output['get_network_instances'] = vrfs
except Exception:
print("Couldn't get VRFs from {}".
format(opt['hostname']))
try:
optics = device.get_optics()
structured_output['get_optics'] = optics
except Exception:
print("Couldn't get optics from {}".
format(opt['hostname']))
try:
snmp = device.get_snmp_information()
structured_output['get_snmp_information'] = snmp
except Exception:
print("Couldn't get optics from {}".
format(opt['hostname']))
try:
ntp_servers = device.get_ntp_servers()
structured_output['get_ntp_servers'] = ntp_servers
except Exception:
print("Couldn't get optics from {}".
format(opt['hostname']))
try:
ntp_stats = device.get_ntp_stats()
structured_output['get_ntp_stats'] = ntp_stats
except Exception:
print("Couldn't get optics from {}".
format(opt['hostname']))
try:
yaml.dump(structured_output, output_log, default_flow_style=False)
except Exception:
print("Couldn't serialise CLI output to YAML")
output_log.close()
device.close()
print("{} done".format(dev))
return
if __name__ == '__main__':
sys.exit(main())
| 30.724796
| 97
| 0.572189
|
69cb68da05f4e747f80b6e5e35160cef1b6d99ea
| 969
|
py
|
Python
|
aws_eden_core/validators.py
|
baikonur-oss/aws-eden-core
|
bee5bd1d281421df7392222b41e3d32a72106e50
|
[
"MIT"
] | 1
|
2020-05-26T08:44:07.000Z
|
2020-05-26T08:44:07.000Z
|
aws_eden_core/validators.py
|
baikonur-oss/aws-eden-core
|
bee5bd1d281421df7392222b41e3d32a72106e50
|
[
"MIT"
] | 71
|
2019-08-22T03:04:47.000Z
|
2020-10-09T19:50:09.000Z
|
aws_eden_core/validators.py
|
baikonur-oss/aws-eden-core
|
bee5bd1d281421df7392222b41e3d32a72106e50
|
[
"MIT"
] | null | null | null |
import logging
import re
import boto3
logger = logging.getLogger()
ecr = boto3.client('ecr')
def is_string(value):
if type(value) == str:
return True
return False
def check_image_uri(image_uri: str):
logger.info(f"Checking if image {image_uri} exists")
groups = re.match('([0-9]+)\.dkr\.ecr\.[^\.]+\.amazonaws\.com/([^:]+):(.+)', image_uri).groups()
registry_id = groups[0]
repository_name = groups[1]
image_tag = groups[2]
response = ecr.describe_images(
registryId=registry_id,
repositoryName=repository_name,
imageIds=[
{
'imageTag': image_tag
}
]
)
logger.debug(f"Response from ECR: {response}")
if len(response['imageDetails']) == 1:
logger.info("Image exists")
return True
else:
logger.info("Image not found")
raise ValueError(f"Image {image_uri} not found in registry/account {registry_id}")
| 23.071429
| 100
| 0.602683
|
03b418036627cf9ec72cf4228c761f6f683ff71e
| 4,281
|
py
|
Python
|
TTS/tts/tf/utils/convert_torch_to_tf_utils.py
|
mightmay/Mien-TTS
|
8a22ff0a79558b3cf4981ce1b63f4d1485ea6338
|
[
"MIT"
] | null | null | null |
TTS/tts/tf/utils/convert_torch_to_tf_utils.py
|
mightmay/Mien-TTS
|
8a22ff0a79558b3cf4981ce1b63f4d1485ea6338
|
[
"MIT"
] | null | null | null |
TTS/tts/tf/utils/convert_torch_to_tf_utils.py
|
mightmay/Mien-TTS
|
8a22ff0a79558b3cf4981ce1b63f4d1485ea6338
|
[
"MIT"
] | 1
|
2021-04-28T17:30:03.000Z
|
2021-04-28T17:30:03.000Z
|
import numpy as np
import tensorflow as tf
# NOTE: linter has a problem with the current TF release
#pylint: disable=no-value-for-parameter
#pylint: disable=unexpected-keyword-arg
def tf_create_dummy_inputs():
""" Create dummy inputs for TF Tacotron2 model """
batch_size = 4
max_input_length = 32
max_mel_length = 128
pad = 1
n_chars = 24
input_ids = tf.random.uniform([batch_size, max_input_length + pad], maxval=n_chars, dtype=tf.int32)
input_lengths = np.random.randint(0, high=max_input_length+1 + pad, size=[batch_size])
input_lengths[-1] = max_input_length
input_lengths = tf.convert_to_tensor(input_lengths, dtype=tf.int32)
mel_outputs = tf.random.uniform(shape=[batch_size, max_mel_length + pad, 80])
mel_lengths = np.random.randint(0, high=max_mel_length+1 + pad, size=[batch_size])
mel_lengths[-1] = max_mel_length
mel_lengths = tf.convert_to_tensor(mel_lengths, dtype=tf.int32)
return input_ids, input_lengths, mel_outputs, mel_lengths
def compare_torch_tf(torch_tensor, tf_tensor):
""" Compute the average absolute difference b/w torch and tf tensors """
return abs(torch_tensor.detach().numpy() - tf_tensor.numpy()).mean()
def convert_tf_name(tf_name):
""" Convert certain patterns in TF layer names to Torch patterns """
tf_name_tmp = tf_name
tf_name_tmp = tf_name_tmp.replace(':0', '')
tf_name_tmp = tf_name_tmp.replace('/forward_lstm/lstm_cell_1/recurrent_kernel', '/weight_hh_l0')
tf_name_tmp = tf_name_tmp.replace('/forward_lstm/lstm_cell_2/kernel', '/weight_ih_l1')
tf_name_tmp = tf_name_tmp.replace('/recurrent_kernel', '/weight_hh')
tf_name_tmp = tf_name_tmp.replace('/kernel', '/weight')
tf_name_tmp = tf_name_tmp.replace('/gamma', '/weight')
tf_name_tmp = tf_name_tmp.replace('/beta', '/bias')
tf_name_tmp = tf_name_tmp.replace('/', '.')
return tf_name_tmp
def transfer_weights_torch_to_tf(tf_vars, var_map_dict, state_dict):
""" Transfer weigths from torch state_dict to TF variables """
print(" > Passing weights from Torch to TF ...")
for tf_var in tf_vars:
torch_var_name = var_map_dict[tf_var.name]
print(f' | > {tf_var.name} <-- {torch_var_name}')
# if tuple, it is a bias variable
if not isinstance(torch_var_name, tuple):
torch_layer_name = '.'.join(torch_var_name.split('.')[-2:])
torch_weight = state_dict[torch_var_name]
if 'convolution1d/kernel' in tf_var.name or 'conv1d/kernel' in tf_var.name:
# out_dim, in_dim, filter -> filter, in_dim, out_dim
numpy_weight = torch_weight.permute([2, 1, 0]).detach().cpu().numpy()
elif 'lstm_cell' in tf_var.name and 'kernel' in tf_var.name:
numpy_weight = torch_weight.transpose(0, 1).detach().cpu().numpy()
# if variable is for bidirectional lstm and it is a bias vector there
# needs to be pre-defined two matching torch bias vectors
elif '_lstm/lstm_cell_' in tf_var.name and 'bias' in tf_var.name:
bias_vectors = [value for key, value in state_dict.items() if key in torch_var_name]
assert len(bias_vectors) == 2
numpy_weight = bias_vectors[0] + bias_vectors[1]
elif 'rnn' in tf_var.name and 'kernel' in tf_var.name:
numpy_weight = torch_weight.transpose(0, 1).detach().cpu().numpy()
elif 'rnn' in tf_var.name and 'bias' in tf_var.name:
bias_vectors = [value for key, value in state_dict.items() if torch_var_name[:-2] in key]
assert len(bias_vectors) == 2
numpy_weight = bias_vectors[0] + bias_vectors[1]
elif 'linear_layer' in torch_layer_name and 'weight' in torch_var_name:
numpy_weight = torch_weight.transpose(0, 1).detach().cpu().numpy()
else:
numpy_weight = torch_weight.detach().cpu().numpy()
assert np.all(tf_var.shape == numpy_weight.shape), f" [!] weight shapes does not match: {tf_var.name} vs {torch_var_name} --> {tf_var.shape} vs {numpy_weight.shape}"
tf.keras.backend.set_value(tf_var, numpy_weight)
return tf_vars
def load_tf_vars(model_tf, tf_vars):
for tf_var in tf_vars:
model_tf.get_layer(tf_var.name).set_weights(tf_var)
return model_tf
| 50.364706
| 173
| 0.687223
|
129d7fac54a972374147d6109a01e63f0a9384f4
| 537
|
py
|
Python
|
application/userproject/forms.py
|
sebazai/tsoha-tyoaikaseuranta
|
a2c9671d5ad937362bfb90e2150924120f2d2233
|
[
"MIT"
] | null | null | null |
application/userproject/forms.py
|
sebazai/tsoha-tyoaikaseuranta
|
a2c9671d5ad937362bfb90e2150924120f2d2233
|
[
"MIT"
] | 2
|
2019-02-15T19:20:17.000Z
|
2019-02-25T08:42:45.000Z
|
application/userproject/forms.py
|
sebazai/tsoha-tyoajanseuranta
|
a2c9671d5ad937362bfb90e2150924120f2d2233
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import validators
from wtforms import StringField, IntegerField, SelectField, BooleanField
from application import db
from sqlalchemy.sql import text
from application.userproject import models
from application.project import models
class UserProjectForm(FlaskForm):
project = SelectField('Projekti')
users = SelectField('Käyttäjä')
asiakas = BooleanField('Onko projektin asiakas?')
paaprojekti = BooleanField('Pääasiallinen projekti?')
class Meta:
csrf = False
| 29.833333
| 72
| 0.782123
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.