hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a3d2243d082790614459a110978ae66628dc23bf
| 5,978
|
py
|
Python
|
tests/models/validators/v1_3_0/jsd_f5a269c44f2a95fa.py
|
daxm/dnacentersdk
|
5baa0cb151fb9e72cf7af1ae29e7541d89c3f06b
|
[
"MIT"
] | null | null | null |
tests/models/validators/v1_3_0/jsd_f5a269c44f2a95fa.py
|
daxm/dnacentersdk
|
5baa0cb151fb9e72cf7af1ae29e7541d89c3f06b
|
[
"MIT"
] | null | null | null |
tests/models/validators/v1_3_0/jsd_f5a269c44f2a95fa.py
|
daxm/dnacentersdk
|
5baa0cb151fb9e72cf7af1ae29e7541d89c3f06b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""DNA Center Get task tree data model.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorF5A269C44F2A95Fa(object):
"""Get task tree request schema definition."""
def __init__(self):
super(JSONSchemaValidatorF5A269C44F2A95Fa, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"response": {
"description":
"",
"items": {
"properties": {
"additionalStatusURL": {
"description":
"",
"type": [
"string",
"null"
]
},
"data": {
"description":
"",
"type": [
"string",
"null"
]
},
"endTime": {
"description":
"",
"type": [
"string",
"null",
"number"
]
},
"errorCode": {
"description":
"",
"type": [
"string",
"null"
]
},
"errorKey": {
"description":
"",
"type": [
"string",
"null"
]
},
"failureReason": {
"description":
"",
"type": [
"string",
"null"
]
},
"id": {
"description":
"",
"type": [
"string",
"null"
]
},
"instanceTenantId": {
"description":
"",
"type": [
"string",
"null"
]
},
"isError": {
"type": [
"boolean",
"null"
]
},
"lastUpdate": {
"description":
"",
"type": [
"string",
"null",
"number"
]
},
"operationIdList": {},
"parentId": {
"description":
"",
"type": [
"string",
"null"
]
},
"progress": {
"description":
"",
"type": [
"string",
"null"
]
},
"rootId": {
"description":
"",
"type": [
"string",
"null"
]
},
"serviceType": {
"description":
"",
"type": [
"string",
"null"
]
},
"startTime": {
"description":
"",
"type": [
"string",
"null",
"number"
]
},
"username": {
"description":
"",
"type": [
"string",
"null"
]
},
"version": {
"type": [
"number",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": [
"array",
"null",
"object"
]
},
"version": {
"description":
"",
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 27.172727
| 78
| 0.357143
|
d7c71789c6af3a42b83b5b8f9f9d17a81a1437f6
| 1,848
|
py
|
Python
|
src/csci_utils/io/io.py
|
kuangzheng800/2021sp-csci-utils-kuangzheng800-copy
|
5b269a280c1ce826330d11cee43fb16442910a36
|
[
"MIT"
] | null | null | null |
src/csci_utils/io/io.py
|
kuangzheng800/2021sp-csci-utils-kuangzheng800-copy
|
5b269a280c1ce826330d11cee43fb16442910a36
|
[
"MIT"
] | null | null | null |
src/csci_utils/io/io.py
|
kuangzheng800/2021sp-csci-utils-kuangzheng800-copy
|
5b269a280c1ce826330d11cee43fb16442910a36
|
[
"MIT"
] | null | null | null |
import os
from contextlib import contextmanager
from typing import ContextManager, Union
from datetime import datetime as dt
from builtins import FileExistsError
import warnings
@contextmanager
def atomic_write(
file: Union[str, os.PathLike], mode: str = "w", as_file: bool = True, **kwargs
) -> ContextManager:
"""Write a file atomically
:param file: str or :class:`os.PathLike` target to write
:param mode: the mode in which the file is opened, defaults to "w" (writing in text mode)
:param bool as_file: if True, the yielded object is a :class:File.
(eg, what you get with `open(...)`). Otherwise, it will be the
temporary file path string
:param kwargs: anything else needed to open the file
:raises: FileExistsError if target exists
Example::
with atomic_write("hello.txt") as f:
f.write("world!")
"""
dir_path = os.path.dirname(os.path.realpath(file))
file_name = os.path.basename(file)
temp_file_name = os.path.join(dir_path, '.temp' + str(dt.now()).encode('ascii').hex()[:6] + file_name)
try:
temp_file = open(temp_file_name, mode)
if os.path.exists(file):
print('Writing unsuccessful, file already exists!')
yield None
else:
if as_file:
yield temp_file
else:
yield temp_file_name
except Exception as e:
warnings.warn('Writing unsuccessful, aborted due to {}'.format(str(e)))
temp_file.close()
os.remove(temp_file_name)
else: # instead of using finally, else is only called when no exception encountered
temp_file.close()
os.rename(temp_file_name, file)
if __name__ == '__main__': # pragma: no cover
with atomic_write("hello.txt", as_file= False) as f:
f.write("world!")
| 30.295082
| 106
| 0.645022
|
21b46be984f52712f65ae39c4346dd6ff4bc4828
| 6,728
|
py
|
Python
|
python/dask_cudf/dask_cudf/io/parquet.py
|
sriramch/cudf
|
db4adb284e0a16e807cb70ff01f0be537051c60a
|
[
"Apache-2.0"
] | null | null | null |
python/dask_cudf/dask_cudf/io/parquet.py
|
sriramch/cudf
|
db4adb284e0a16e807cb70ff01f0be537051c60a
|
[
"Apache-2.0"
] | null | null | null |
python/dask_cudf/dask_cudf/io/parquet.py
|
sriramch/cudf
|
db4adb284e0a16e807cb70ff01f0be537051c60a
|
[
"Apache-2.0"
] | null | null | null |
import warnings
from functools import partial
import pyarrow.parquet as pq
import dask.dataframe as dd
from dask.dataframe.io.parquet.arrow import ArrowEngine
import cudf
from cudf.core.column import build_categorical_column
from cudf.io import write_to_dataset
class CudfEngine(ArrowEngine):
@staticmethod
def read_metadata(*args, **kwargs):
meta, stats, parts = ArrowEngine.read_metadata(*args, **kwargs)
# If `strings_to_categorical==True`, convert objects to int32
strings_to_cats = kwargs.get("strings_to_categorical", False)
dtypes = {}
for col in meta.columns:
if meta[col].dtype == "O":
dtypes[col] = "int32" if strings_to_cats else "object"
meta = cudf.DataFrame.from_pandas(meta)
for col, dtype in dtypes.items():
meta[col] = meta[col].astype(dtype)
return (meta, stats, parts)
@staticmethod
def read_partition(
fs, piece, columns, index, categories=(), partitions=(), **kwargs
):
if columns is not None:
columns = [c for c in columns]
if isinstance(index, list):
columns += index
if isinstance(piece, str):
# `piece` is a file-path string
piece = pq.ParquetDatasetPiece(
piece, open_file_func=partial(fs.open, mode="rb")
)
else:
# `piece` = (path, row_group, partition_keys)
(path, row_group, partition_keys) = piece
piece = pq.ParquetDatasetPiece(
path,
row_group=row_group,
partition_keys=partition_keys,
open_file_func=partial(fs.open, mode="rb"),
)
strings_to_cats = kwargs.get("strings_to_categorical", False)
if cudf.utils.ioutils._is_local_filesystem(fs):
df = cudf.read_parquet(
piece.path,
engine="cudf",
columns=columns,
row_group=piece.row_group,
strings_to_categorical=strings_to_cats,
**kwargs.get("read", {}),
)
else:
with fs.open(piece.path, mode="rb") as f:
df = cudf.read_parquet(
f,
engine="cudf",
columns=columns,
row_group=piece.row_group,
strings_to_categorical=strings_to_cats,
**kwargs.get("read", {}),
)
if index and index[0] in df.columns:
df = df.set_index(index[0])
if len(piece.partition_keys) > 0:
if partitions is None:
raise ValueError("Must pass partition sets")
for i, (name, index2) in enumerate(piece.partition_keys):
categories = [
val.as_py() for val in partitions.levels[i].dictionary
]
sr = cudf.Series(index2).astype(type(index2)).repeat(len(df))
df[name] = build_categorical_column(
categories=categories, codes=sr._column, ordered=False
)
return df
@staticmethod
def write_partition(
df,
path,
fs,
filename,
partition_on,
return_metadata,
fmd=None,
compression=None,
index_cols=None,
**kwargs,
):
preserve_index = False
# Must use arrow engine if return_metadata=True
# (cudf does not collect/return metadata on write)
if return_metadata:
if index_cols:
df = df.set_index(index_cols)
preserve_index = True
md_list = []
t = df.to_arrow(preserve_index=preserve_index)
if partition_on:
pq.write_to_dataset(
t,
path,
partition_cols=partition_on,
filesystem=fs,
metadata_collector=md_list,
**kwargs,
)
else:
with fs.open(fs.sep.join([path, filename]), "wb") as fil:
pq.write_table(
t,
fil,
compression=compression,
metadata_collector=md_list,
**kwargs,
)
if md_list:
md_list[0].set_file_path(filename)
else:
md_list = [None]
if partition_on:
write_to_dataset(
df,
path,
partition_cols=partition_on,
fs=fs,
preserve_index=preserve_index,
**kwargs,
)
else:
df.to_parquet(
fs.sep.join([path, filename]),
compression=compression,
**kwargs,
)
# Return the schema needed to write the metadata
if return_metadata:
return [{"schema": t.schema, "meta": md_list[0]}]
else:
return []
def read_parquet(
path,
columns=None,
chunksize=None,
split_row_groups=True,
gather_statistics=None,
**kwargs,
):
""" Read parquet files into a Dask DataFrame
Calls ``dask.dataframe.read_parquet`` to cordinate the execution of
``cudf.read_parquet``, and ultimately read multiple partitions into a
single Dask dataframe. The Dask version must supply an ``ArrowEngine``
class to support full functionality.
See ``cudf.read_parquet`` and Dask documentation for further details.
Examples
--------
>>> import dask_cudf
>>> df = dask_cudf.read_parquet("/path/to/dataset/") # doctest: +SKIP
See Also
--------
cudf.read_parquet
"""
if isinstance(columns, str):
columns = [columns]
if chunksize and gather_statistics is False:
warnings.warn(
"Setting chunksize parameter with gather_statistics=False. "
"Use gather_statistics=True to enable row-group aggregation."
)
if chunksize and split_row_groups is False:
warnings.warn(
"Setting chunksize parameter with split_row_groups=False. "
"Use split_row_groups=True to enable row-group aggregation."
)
return dd.read_parquet(
path,
columns=columns,
chunksize=chunksize,
split_row_groups=split_row_groups,
gather_statistics=gather_statistics,
engine=CudfEngine,
**kwargs,
)
to_parquet = partial(dd.to_parquet, engine=CudfEngine)
| 31.886256
| 77
| 0.53805
|
634650e04d0d4968f0d6a3b0fb1b8eaa3cb543e7
| 4,985
|
py
|
Python
|
demos/kitchen_sink/main.py
|
Jonypr-code/KivyMD
|
3ac5ba86430d9002baef678b47f0d7873b66b7bd
|
[
"MIT"
] | 1,111
|
2015-07-15T02:31:09.000Z
|
2022-03-29T17:22:02.000Z
|
demos/kitchen_sink/main.py
|
AllSafeCybercurity/kivyMD
|
85c51f3e7a26ca170d639e73899df5d465ee8941
|
[
"MIT"
] | 706
|
2015-06-10T22:24:13.000Z
|
2022-03-31T16:22:39.000Z
|
demos/kitchen_sink/main.py
|
AllSafeCybercurity/kivyMD
|
85c51f3e7a26ca170d639e73899df5d465ee8941
|
[
"MIT"
] | 561
|
2015-07-15T04:57:23.000Z
|
2022-03-31T17:14:31.000Z
|
import ast
import os
import sys
from pathlib import Path
from kivy.core.window import Window
from kivy.factory import Factory # NOQA: F401
from kivy.lang import Builder
from kivy.loader import Loader
from libs.baseclass.dialog_change_theme import (
KitchenSinkDialogChangeTheme,
KitchenSinkUsageCode,
)
from libs.baseclass.list_items import ( # NOQA: F401
KitchenSinkOneLineLeftIconItem,
)
from kivymd import images_path
from kivymd.app import MDApp
os.environ["KIVY_PROFILE_LANG"] = "1"
if getattr(sys, "frozen", False): # bundle mode with PyInstaller
os.environ["KITCHEN_SINK_ROOT"] = sys._MEIPASS
else:
sys.path.append(os.path.abspath(__file__).split("demos")[0])
os.environ["KITCHEN_SINK_ROOT"] = str(Path(__file__).parent)
# os.environ["KITCHEN_SINK_ROOT"] = os.path.dirname(os.path.abspath(__file__))
os.environ["KITCHEN_SINK_ASSETS"] = os.path.join(
os.environ["KITCHEN_SINK_ROOT"], f"assets{os.sep}"
)
Window.softinput_mode = "below_target"
class KitchenSinkApp(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.theme_cls.primary_palette = "Teal"
self.dialog_change_theme = None
self.toolbar = None
self.data_screens = {}
Loader.loading_image = f"{images_path}transparent.png"
def build(self):
Builder.load_file(
os.path.join(
os.environ["KITCHEN_SINK_ROOT"], "libs", "kv", "list_items.kv"
)
)
return Builder.load_file(
os.path.join(
os.environ["KITCHEN_SINK_ROOT"], "libs", "kv", "start_screen.kv"
)
)
def show_dialog_change_theme(self):
if not self.dialog_change_theme:
self.dialog_change_theme = KitchenSinkDialogChangeTheme()
self.dialog_change_theme.set_list_colors_themes()
self.dialog_change_theme.open()
def on_start(self):
"""Creates a list of items with examples on start screen."""
Builder.load_file(
os.path.join(
os.environ["KITCHEN_SINK_ROOT"],
"libs",
"kv",
"dialog_change_theme.kv",
)
)
with open(
os.path.join(os.environ["KITCHEN_SINK_ROOT"], "screens_data.json")
) as read_file:
self.data_screens = ast.literal_eval(read_file.read())
data_screens = list(self.data_screens.keys())
data_screens.sort()
for name_item_example in data_screens:
self.root.ids.backdrop_front_layer.data.append(
{
"viewclass": "KitchenSinkOneLineLeftIconItem",
"text": name_item_example,
"icon": self.data_screens[name_item_example]["icon"],
"on_release": lambda x=name_item_example: self.set_example_screen(
x
),
}
)
def set_example_screen(self, name_screen):
manager = self.root.ids.screen_manager
if not manager.has_screen(
self.data_screens[name_screen]["name_screen"]
):
name_kv_file = self.data_screens[name_screen]["kv_string"]
Builder.load_file(
os.path.join(
os.environ["KITCHEN_SINK_ROOT"],
"libs",
"kv",
f"{name_kv_file}.kv",
)
)
if "Import" in self.data_screens[name_screen]:
exec(self.data_screens[name_screen]["Import"])
screen_object = eval(self.data_screens[name_screen]["Factory"])
self.data_screens[name_screen]["object"] = screen_object
if "toolbar" in screen_object.ids:
screen_object.ids.toolbar.title = name_screen
manager.add_widget(screen_object)
code_file = os.path.join(
os.environ["KITCHEN_SINK_ROOT"],
"assets",
"usage",
self.data_screens[name_screen]["source_code"],
)
with open(code_file, "r") as f:
self.sample_code = f.read()
self.screen_name = name_screen
self.website = self.data_screens[name_screen]["more_info"]
manager.current = self.data_screens[name_screen]["name_screen"]
def back_to_home_screen(self):
self.root.ids.screen_manager.current = "home"
def switch_theme_style(self):
self.theme_cls.theme_style = (
"Light" if self.theme_cls.theme_style == "Dark" else "Dark"
)
self.root.ids.backdrop.ids._front_layer.md_bg_color = [0, 0, 0, 0]
def show_code(self):
if self.theme_cls.device_orientation == "landscape":
code = KitchenSinkUsageCode(
code=self.sample_code,
title=self.screen_name,
website=self.website,
)
code.open()
KitchenSinkApp().run()
| 34.37931
| 86
| 0.59659
|
a65c63123ea5527f4b927e3bf3d497473bc91bb5
| 17,395
|
py
|
Python
|
examples/system/ota/native_ota_example/example_test.py
|
tidbyt/esp-idf
|
3a19016f84ac10fc9e3865cbc42c6c5593d2d481
|
[
"Apache-2.0"
] | null | null | null |
examples/system/ota/native_ota_example/example_test.py
|
tidbyt/esp-idf
|
3a19016f84ac10fc9e3865cbc42c6c5593d2d481
|
[
"Apache-2.0"
] | null | null | null |
examples/system/ota/native_ota_example/example_test.py
|
tidbyt/esp-idf
|
3a19016f84ac10fc9e3865cbc42c6c5593d2d481
|
[
"Apache-2.0"
] | null | null | null |
import re
import os
import socket
from threading import Thread
import ssl
from tiny_test_fw import DUT
import ttfw_idf
import random
import subprocess
try:
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
import http.server as BaseHTTPServer
from http.server import SimpleHTTPRequestHandler
server_cert = "-----BEGIN CERTIFICATE-----\n" \
"MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"\
"BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"\
"aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n"\
"MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"\
"ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n"\
"CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n"\
"nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n"\
"9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n"\
"w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n"\
"3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n"\
"lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n"\
"IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n"\
"DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n"\
"/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n"\
"lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n"\
"6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n"\
"fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n"\
"y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n"\
"hA==\n"\
"-----END CERTIFICATE-----\n"
server_key = "-----BEGIN PRIVATE KEY-----\n"\
"MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n"\
"uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n"\
"iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n"\
"ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n"\
"BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n"\
"1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n"\
"Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n"\
"02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n"\
"4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n"\
"SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n"\
"cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n"\
"8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n"\
"MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n"\
"6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n"\
"CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n"\
"ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n"\
"0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n"\
"5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n"\
"zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n"\
"V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n"\
"RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n"\
"nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n"\
"GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n"\
"9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n"\
"qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n"\
"muhfskWf4MABV0yTUaKcGg==\n"\
"-----END PRIVATE KEY-----\n"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
if server_status == 0:
return True
return False
def create_file(server_file, file_data):
with open(server_file, "w+") as file:
file.write(file_data)
def get_ca_cert(ota_image_dir):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
create_file(server_file, server_cert)
key_file = os.path.join(ota_image_dir, "server_key.pem")
create_file(key_file, server_key)
return server_file, key_file
def start_https_server(ota_image_dir, server_ip, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
chunked_server = subprocess.Popen(["openssl", "s_server", "-WWW", "-key", key_file, "-cert", server_file, "-port", str(server_port)])
return chunked_server
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# No. of times working of application to be validated
iterations = 3
# File to be downloaded. This file is generated after compilation
bin_name = "native_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Original binary file generated after compilation
bin_name = "native_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated.bin"
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name)
dut1.expect("native_ota_example: Image validation failed, image is corrupted", timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Original binary file generated after compilation
bin_name = "native_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated_header.bin"
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name)
dut1.expect("native_ota_example: received package is not fit len", timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Random binary file to be generated
random_bin_name = "random.bin"
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, "w+")
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(str(0))
for i in range(random_bin_size - 1):
fo.write(str(random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + random_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + random_bin_name)
dut1.expect("esp_ota_ops: OTA image has invalid magic byte", timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
# File to be downloaded. This file is generated after compilation
bin_name = "native_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8070/" + bin_name))
dut1.write("https://" + host_ip + ":8070/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, "server_cert.pem"))
os.remove(os.path.join(dut1.app.binary_path, "server_key.pem"))
if __name__ == '__main__':
test_examples_protocol_native_ota_example()
test_examples_protocol_native_ota_example_chunked()
test_examples_protocol_native_ota_example_truncated_bin()
test_examples_protocol_native_ota_example_truncated_header()
test_examples_protocol_native_ota_example_random()
| 47.92011
| 137
| 0.708939
|
ec03260c617faab793a29c96b2f047454e964a62
| 376
|
py
|
Python
|
codeforces/math数学/1100/955A喂猫.py
|
yofn/pyacm
|
e573f8fdeea77513711f00c42f128795cbba65a6
|
[
"Apache-2.0"
] | null | null | null |
codeforces/math数学/1100/955A喂猫.py
|
yofn/pyacm
|
e573f8fdeea77513711f00c42f128795cbba65a6
|
[
"Apache-2.0"
] | null | null | null |
codeforces/math数学/1100/955A喂猫.py
|
yofn/pyacm
|
e573f8fdeea77513711f00c42f128795cbba65a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# https://codeforces.com/problemset/problem/955/A
# 只有两个选择: 立刻或等到20点
def f(l1,l2):
hh,mm = l1
h,d,c,n = l2
m = hh*60+mm
if m>1200:
return 0.8*c*((h+n-1)//n)
h2 = h + (1200-m)*d
return min(c*((h+n-1)//n), 0.8*c*((h2+n-1)//n))
l1 = list(map(int,input().split()))
l2 = list(map(int,input().split()))
print(f(l1,l2))
| 20.888889
| 51
| 0.534574
|
ce153079a9d1434a4fb4fbee7436998ee5d48595
| 809
|
py
|
Python
|
Lib/corpuscrawler/crawl_agr.py
|
cash/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 95
|
2019-06-13T23:34:21.000Z
|
2022-03-12T05:22:49.000Z
|
Lib/corpuscrawler/crawl_agr.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 31
|
2019-06-02T18:56:53.000Z
|
2021-08-10T20:16:02.000Z
|
Lib/corpuscrawler/crawl_agr.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 35
|
2019-06-18T08:26:24.000Z
|
2022-01-11T13:59:40.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='agr')
crawl_bibleis(crawler, out, bible='AGRTBL')
| 36.772727
| 74
| 0.770087
|
12f6975cfcf21450cf6c4db3994ec84445b92391
| 64,593
|
py
|
Python
|
saleor/graphql/order/tests/test_fulfillment.py
|
redsquaresoftware/saleor
|
54dd7446d250b4f292e34f9928692652ccea5326
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/order/tests/test_fulfillment.py
|
redsquaresoftware/saleor
|
54dd7446d250b4f292e34f9928692652ccea5326
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/order/tests/test_fulfillment.py
|
redsquaresoftware/saleor
|
54dd7446d250b4f292e34f9928692652ccea5326
|
[
"CC-BY-4.0"
] | null | null | null |
from unittest.mock import ANY, patch
import graphene
import pytest
from ....core.exceptions import InsufficientStock, InsufficientStockData
from ....giftcard import GiftCardEvents
from ....giftcard.models import GiftCard, GiftCardEvent
from ....order import OrderLineData, OrderStatus
from ....order.actions import fulfill_order_lines
from ....order.error_codes import OrderErrorCode
from ....order.events import OrderEvents
from ....order.models import Fulfillment, FulfillmentLine, FulfillmentStatus
from ....plugins.manager import get_plugins_manager
from ....warehouse.models import Allocation, Stock
from ...tests.utils import assert_no_permission, get_graphql_content
ORDER_FULFILL_QUERY = """
mutation fulfillOrder(
$order: ID, $input: OrderFulfillInput!
) {
orderFulfill(
order: $order,
input: $input
) {
errors {
field
code
message
warehouse
orderLines
}
}
}
"""
@patch("saleor.plugins.manager.PluginsManager.product_variant_out_of_stock")
def test_order_fulfill_with_out_of_stock_webhook(
product_variant_out_of_stock_webhooks,
staff_api_client,
order_with_lines,
permission_manage_orders,
warehouse,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
_, order_line2 = order.lines.all()
order_line2_id = graphene.Node.to_global_id("OrderLine", order_line2.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"notifyCustomer": True,
"lines": [
{
"orderLineId": order_line2_id,
"stocks": [{"quantity": 2, "warehouse": warehouse_id}],
},
],
},
}
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
stock = order_line2.variant.stocks.filter(warehouse=warehouse).first()
product_variant_out_of_stock_webhooks.assert_called_once_with(stock)
@pytest.mark.parametrize("fulfillment_auto_approve", [True, False])
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill(
mock_create_fulfillments,
fulfillment_auto_approve,
staff_api_client,
staff_user,
order_with_lines,
permission_manage_orders,
warehouse,
site_settings,
):
site_settings.fulfillment_auto_approve = fulfillment_auto_approve
site_settings.save(update_fields=["fulfillment_auto_approve"])
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line, order_line2 = order.lines.all()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
order_line2_id = graphene.Node.to_global_id("OrderLine", order_line2.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"notifyCustomer": True,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 3, "warehouse": warehouse_id}],
},
{
"orderLineId": order_line2_id,
"stocks": [{"quantity": 2, "warehouse": warehouse_id}],
},
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert not data["errors"]
fulfillment_lines_for_warehouses = {
str(warehouse.pk): [
{"order_line": order_line, "quantity": 3},
{"order_line": order_line2, "quantity": 2},
]
}
mock_create_fulfillments.assert_called_once_with(
staff_user,
None,
order,
fulfillment_lines_for_warehouses,
ANY,
True,
allow_stock_to_be_exceeded=False,
approved=fulfillment_auto_approve,
)
def test_order_fulfill_with_stock_exceeded_with_flag_disabled(
staff_api_client,
staff_user,
order_with_lines,
permission_manage_orders,
warehouse,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line, order_line2 = order.lines.all()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
order_line2_id = graphene.Node.to_global_id("OrderLine", order_line2.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
# set stocks to out of quantity and assert
Stock.objects.filter(warehouse=warehouse).update(quantity=0)
# make first stock quantity < 0
stock = Stock.objects.filter(warehouse=warehouse).first()
stock.quantity = -99
stock.save()
for stock in Stock.objects.filter(warehouse=warehouse):
assert stock.quantity <= 0
variables = {
"order": order_id,
"input": {
"notifyCustomer": False,
"allowStockToBeExceeded": False,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 3, "warehouse": warehouse_id}],
},
{
"orderLineId": order_line2_id,
"stocks": [{"quantity": 2, "warehouse": warehouse_id}],
},
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert data["errors"]
errors = data["errors"]
assert errors[0]["code"] == "INSUFFICIENT_STOCK"
assert errors[0]["message"] == "Insufficient product stock: SKU_AA"
assert errors[1]["code"] == "INSUFFICIENT_STOCK"
assert errors[1]["message"] == "Insufficient product stock: SKU_B"
def test_order_fulfill_with_stock_exceeded_with_flag_enabled(
staff_api_client,
staff_user,
order_with_lines,
permission_manage_orders,
warehouse,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line, order_line2 = order.lines.all()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
order_line2_id = graphene.Node.to_global_id("OrderLine", order_line2.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
# set stocks to out of quantity and assert
Stock.objects.filter(warehouse=warehouse).update(quantity=0)
for stock in Stock.objects.filter(warehouse=warehouse):
assert stock.quantity == 0
variables = {
"order": order_id,
"input": {
"notifyCustomer": False,
"allowStockToBeExceeded": True,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 3, "warehouse": warehouse_id}],
},
{
"orderLineId": order_line2_id,
"stocks": [{"quantity": 2, "warehouse": warehouse_id}],
},
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert not data["errors"]
order.refresh_from_db()
assert order.status == OrderStatus.FULFILLED
order_lines = order.lines.all()
assert order_lines[0].quantity_fulfilled == 3
assert order_lines[0].quantity_unfulfilled == 0
assert order_lines[1].quantity_fulfilled == 2
assert order_lines[1].quantity_unfulfilled == 0
# check if stocks quantity are < 0 after fulfillments
for stock in Stock.objects.filter(warehouse=warehouse):
assert stock.quantity < 0
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_above_available_quantity(
mock_create_fulfillments,
staff_api_client,
staff_user,
order_with_lines,
permission_manage_orders,
warehouse,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line, order_line2 = order.lines.all()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
order_line2_id = graphene.Node.to_global_id("OrderLine", order_line2.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
fulfillment = Fulfillment.objects.create(
order=order, status=FulfillmentStatus.WAITING_FOR_APPROVAL
)
FulfillmentLine.objects.create(
order_line=order_line,
quantity=1,
stock=warehouse.stock_set.first(),
fulfillment_id=fulfillment.pk,
)
variables = {
"order": order_id,
"input": {
"notifyCustomer": True,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 4, "warehouse": warehouse_id}],
},
{
"orderLineId": order_line2_id,
"stocks": [{"quantity": 2, "warehouse": warehouse_id}],
},
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
error = data["errors"][0]
assert error["field"] == "orderLineId"
assert error["code"] == OrderErrorCode.FULFILL_ORDER_LINE.name
mock_create_fulfillments.assert_not_called()
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_as_app(
mock_create_fulfillments,
app_api_client,
order_with_lines,
permission_manage_orders,
warehouse,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line, order_line2 = order.lines.all()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
order_line2_id = graphene.Node.to_global_id("OrderLine", order_line2.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"notifyCustomer": True,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 3, "warehouse": warehouse_id}],
},
{
"orderLineId": order_line2_id,
"stocks": [{"quantity": 2, "warehouse": warehouse_id}],
},
],
},
}
response = app_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert not data["errors"]
fulfillment_lines_for_warehouses = {
str(warehouse.pk): [
{"order_line": order_line, "quantity": 3},
{"order_line": order_line2, "quantity": 2},
]
}
mock_create_fulfillments.assert_called_once_with(
None,
app_api_client.app,
order,
fulfillment_lines_for_warehouses,
ANY,
True,
allow_stock_to_be_exceeded=False,
approved=True,
)
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_many_warehouses(
mock_create_fulfillments,
staff_api_client,
staff_user,
order_with_lines,
permission_manage_orders,
warehouses,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
warehouse1, warehouse2 = warehouses
order_line1, order_line2 = order.lines.all()
order_id = graphene.Node.to_global_id("Order", order.id)
order_line1_id = graphene.Node.to_global_id("OrderLine", order_line1.id)
order_line2_id = graphene.Node.to_global_id("OrderLine", order_line2.id)
warehouse1_id = graphene.Node.to_global_id("Warehouse", warehouse1.pk)
warehouse2_id = graphene.Node.to_global_id("Warehouse", warehouse2.pk)
variables = {
"order": order_id,
"input": {
"lines": [
{
"orderLineId": order_line1_id,
"stocks": [{"quantity": 3, "warehouse": warehouse1_id}],
},
{
"orderLineId": order_line2_id,
"stocks": [
{"quantity": 1, "warehouse": warehouse1_id},
{"quantity": 1, "warehouse": warehouse2_id},
],
},
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert not data["errors"]
fulfillment_lines_for_warehouses = {
str(warehouse1.pk): [
{"order_line": order_line1, "quantity": 3},
{"order_line": order_line2, "quantity": 1},
],
str(warehouse2.pk): [{"order_line": order_line2, "quantity": 1}],
}
mock_create_fulfillments.assert_called_once_with(
staff_user,
None,
order,
fulfillment_lines_for_warehouses,
ANY,
True,
allow_stock_to_be_exceeded=False,
approved=True,
)
@patch("saleor.giftcard.utils.send_gift_card_notification")
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_with_gift_cards(
mock_create_fulfillments,
mock_send_notification,
staff_api_client,
staff_user,
order,
gift_card_non_shippable_order_line,
gift_card_shippable_order_line,
permission_manage_orders,
warehouse,
):
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line, order_line2 = (
gift_card_non_shippable_order_line,
gift_card_shippable_order_line,
)
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
order_line2_id = graphene.Node.to_global_id("OrderLine", order_line2.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"notifyCustomer": True,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 1, "warehouse": warehouse_id}],
},
{
"orderLineId": order_line2_id,
"stocks": [{"quantity": 1, "warehouse": warehouse_id}],
},
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert not data["errors"]
gift_cards = GiftCard.objects.all()
assert gift_cards.count() == 2
non_shippable_gift_card = gift_cards.get(
product_id=gift_card_non_shippable_order_line.variant.product_id
)
shippable_gift_card = gift_cards.get(
product_id=gift_card_shippable_order_line.variant.product_id
)
assert non_shippable_gift_card.initial_balance.amount == round(
gift_card_non_shippable_order_line.unit_price_gross.amount, 2
)
assert non_shippable_gift_card.current_balance.amount == round(
gift_card_non_shippable_order_line.unit_price_gross.amount, 2
)
assert shippable_gift_card.initial_balance.amount == round(
gift_card_shippable_order_line.unit_price_gross.amount, 2
)
assert shippable_gift_card.current_balance.amount == round(
gift_card_shippable_order_line.unit_price_gross.amount, 2
)
assert GiftCardEvent.objects.filter(
gift_card=shippable_gift_card, type=GiftCardEvents.BOUGHT
)
assert GiftCardEvent.objects.filter(
gift_card=non_shippable_gift_card, type=GiftCardEvents.BOUGHT
)
fulfillment_lines_for_warehouses = {
str(warehouse.pk): [
{"order_line": order_line, "quantity": 1},
{"order_line": order_line2, "quantity": 1},
]
}
mock_create_fulfillments.assert_called_once_with(
staff_user,
None,
order,
fulfillment_lines_for_warehouses,
ANY,
True,
allow_stock_to_be_exceeded=False,
approved=True,
)
mock_send_notification.assert_called_once_with(
staff_user,
None,
order.user,
order.user_email,
non_shippable_gift_card,
ANY,
order.channel.slug,
resending=False,
)
@patch("saleor.giftcard.utils.send_gift_card_notification")
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_with_gift_card_lines_waiting_for_approval(
mock_create_fulfillments,
mock_send_notification,
staff_api_client,
staff_user,
order,
gift_card_non_shippable_order_line,
gift_card_shippable_order_line,
permission_manage_orders,
warehouse,
site_settings,
):
query = ORDER_FULFILL_QUERY
site_settings.fulfillment_auto_approve = False
site_settings.save(update_fields=["fulfillment_auto_approve"])
order_id = graphene.Node.to_global_id("Order", order.id)
order_line, order_line2 = (
gift_card_non_shippable_order_line,
gift_card_shippable_order_line,
)
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
order_line2_id = graphene.Node.to_global_id("OrderLine", order_line2.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"notifyCustomer": True,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 1, "warehouse": warehouse_id}],
},
{
"orderLineId": order_line2_id,
"stocks": [{"quantity": 1, "warehouse": warehouse_id}],
},
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert not data["errors"]
assert GiftCard.objects.count() == 0
fulfillment_lines_for_warehouses = {
str(warehouse.pk): [
{"order_line": order_line, "quantity": 1},
{"order_line": order_line2, "quantity": 1},
]
}
mock_create_fulfillments.assert_called_once_with(
staff_user,
None,
order,
fulfillment_lines_for_warehouses,
ANY,
True,
approved=False,
allow_stock_to_be_exceeded=False,
)
mock_send_notification.assert_not_called()
@patch("saleor.giftcard.utils.send_gift_card_notification")
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_with_gift_cards_by_app(
mock_create_fulfillments,
mock_send_notification,
app_api_client,
order,
gift_card_shippable_order_line,
permission_manage_orders,
warehouse,
):
query = ORDER_FULFILL_QUERY
app = app_api_client.app
order_id = graphene.Node.to_global_id("Order", order.id)
order_line = gift_card_shippable_order_line
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
quantity = 2
variables = {
"order": order_id,
"input": {
"notifyCustomer": True,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": quantity, "warehouse": warehouse_id}],
},
],
},
}
response = app_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert not data["errors"]
gift_cards = GiftCard.objects.all()
assert gift_cards.count() == quantity
for card in gift_cards:
assert card.initial_balance.amount == round(
gift_card_shippable_order_line.unit_price_gross.amount, 2
)
assert card.current_balance.amount == round(
gift_card_shippable_order_line.unit_price_gross.amount, 2
)
assert GiftCardEvent.objects.filter(
gift_card=card,
type=GiftCardEvents.BOUGHT,
user=None,
app=app_api_client.app,
)
fulfillment_lines_for_warehouses = {
str(warehouse.pk): [
{"order_line": order_line, "quantity": 2},
]
}
mock_create_fulfillments.assert_called_once_with(
None,
app,
order,
fulfillment_lines_for_warehouses,
ANY,
True,
allow_stock_to_be_exceeded=False,
approved=True,
)
mock_send_notification.assert_not_called
@patch("saleor.giftcard.utils.send_gift_card_notification")
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_with_gift_cards_multiple_warehouses(
mock_create_fulfillments,
mock_send_notification,
app_api_client,
order,
gift_card_shippable_order_line,
permission_manage_orders,
warehouses,
):
query = ORDER_FULFILL_QUERY
app = app_api_client.app
order_id = graphene.Node.to_global_id("Order", order.id)
order_line = gift_card_shippable_order_line
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
warehouse1, warehouse2 = warehouses
warehouse1_id = graphene.Node.to_global_id("Warehouse", warehouse1.pk)
warehouse2_id = graphene.Node.to_global_id("Warehouse", warehouse2.pk)
quantity_1 = 2
quantity_2 = 1
variables = {
"order": order_id,
"input": {
"notifyCustomer": True,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [
{"quantity": quantity_1, "warehouse": warehouse1_id},
{"quantity": quantity_2, "warehouse": warehouse2_id},
],
},
],
},
}
response = app_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert not data["errors"]
gift_cards = GiftCard.objects.all()
assert gift_cards.count() == quantity_1 + quantity_2
for card in gift_cards:
assert card.initial_balance.amount == round(
gift_card_shippable_order_line.unit_price_gross.amount, 2
)
assert card.current_balance.amount == round(
gift_card_shippable_order_line.unit_price_gross.amount, 2
)
assert GiftCardEvent.objects.filter(
gift_card=card,
type=GiftCardEvents.BOUGHT,
user=None,
app=app_api_client.app,
)
fulfillment_lines_for_warehouses = {
str(warehouse1.pk): [
{"order_line": order_line, "quantity": quantity_1},
],
str(warehouse2.pk): [
{"order_line": order_line, "quantity": quantity_2},
],
}
mock_create_fulfillments.assert_called_once_with(
None,
app,
order,
fulfillment_lines_for_warehouses,
ANY,
True,
allow_stock_to_be_exceeded=False,
approved=True,
)
mock_send_notification.assert_not_called
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_without_notification(
mock_create_fulfillments,
staff_api_client,
staff_user,
order_with_lines,
permission_manage_orders,
warehouse,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line = order.lines.first()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"notifyCustomer": False,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 1, "warehouse": warehouse_id}],
}
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert not data["errors"]
fulfillment_lines_for_warehouses = {
str(warehouse.pk): [{"order_line": order_line, "quantity": 1}]
}
mock_create_fulfillments.assert_called_once_with(
staff_user,
None,
order,
fulfillment_lines_for_warehouses,
ANY,
False,
allow_stock_to_be_exceeded=False,
approved=True,
)
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_lines_with_empty_quantity(
mock_create_fulfillments,
staff_api_client,
staff_user,
order_with_lines,
permission_manage_orders,
warehouse,
warehouse_no_shipping_zone,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line, order_line2 = order.lines.all()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
order_line2_id = graphene.Node.to_global_id("OrderLine", order_line2.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
warehouse2_id = graphene.Node.to_global_id(
"Warehouse", warehouse_no_shipping_zone.pk
)
assert not order.events.all()
variables = {
"order": order_id,
"input": {
"lines": [
{
"orderLineId": order_line_id,
"stocks": [
{"quantity": 0, "warehouse": warehouse_id},
{"quantity": 0, "warehouse": warehouse2_id},
],
},
{
"orderLineId": order_line2_id,
"stocks": [
{"quantity": 2, "warehouse": warehouse_id},
{"quantity": 0, "warehouse": warehouse2_id},
],
},
],
},
}
variables["input"]["lines"][0]["stocks"][0]["quantity"] = 0
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert not data["errors"]
fulfillment_lines_for_warehouses = {
str(warehouse.pk): [{"order_line": order_line2, "quantity": 2}]
}
mock_create_fulfillments.assert_called_once_with(
staff_user,
None,
order,
fulfillment_lines_for_warehouses,
ANY,
True,
allow_stock_to_be_exceeded=False,
approved=True,
)
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_zero_quantity(
mock_create_fulfillments,
staff_api_client,
staff_user,
order_with_lines,
permission_manage_orders,
warehouse,
):
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order_with_lines.id)
order_line = order_with_lines.lines.first()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 0, "warehouse": warehouse_id}],
}
]
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert data["errors"]
error = data["errors"][0]
assert error["field"] == "lines"
assert error["code"] == OrderErrorCode.ZERO_QUANTITY.name
assert not error["orderLines"]
assert not error["warehouse"]
mock_create_fulfillments.assert_not_called()
def test_order_fulfill_channel_without_shipping_zones(
staff_api_client,
order_with_lines,
permission_manage_orders,
warehouse,
):
order = order_with_lines
order.channel.shipping_zones.clear()
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line = order.lines.first()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"notifyCustomer": True,
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 3, "warehouse": warehouse_id}],
},
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert len(data["errors"]) == 1
error = data["errors"][0]
assert error["field"] == "stocks"
assert error["code"] == OrderErrorCode.INSUFFICIENT_STOCK.name
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_fulfilled_order(
mock_create_fulfillments,
staff_api_client,
staff_user,
order_with_lines,
permission_manage_orders,
warehouse,
):
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order_with_lines.id)
order_line = order_with_lines.lines.first()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 100, "warehouse": warehouse_id}],
}
]
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert data["errors"]
error = data["errors"][0]
assert error["field"] == "orderLineId"
assert error["code"] == OrderErrorCode.FULFILL_ORDER_LINE.name
assert error["orderLines"] == [order_line_id]
assert not error["warehouse"]
mock_create_fulfillments.assert_not_called()
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments")
def test_order_fulfill_unpaid_order_and_disallow_unpaid(
mock_create_fulfillments,
staff_api_client,
order_with_lines,
permission_manage_orders,
warehouse,
site_settings,
):
site_settings.fulfillment_allow_unpaid = False
site_settings.save(update_fields=["fulfillment_allow_unpaid"])
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order_with_lines.id)
order_line = order_with_lines.lines.first()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 100, "warehouse": warehouse_id}],
}
]
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert data["errors"]
error = data["errors"][0]
assert error["field"] == "order"
assert error["code"] == OrderErrorCode.CANNOT_FULFILL_UNPAID_ORDER.name
mock_create_fulfillments.assert_not_called()
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments", autospec=True)
def test_order_fulfill_warehouse_with_insufficient_stock_exception(
mock_create_fulfillments,
staff_api_client,
order_with_lines,
permission_manage_orders,
warehouse_no_shipping_zone,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line = order.lines.first()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
warehouse_id = graphene.Node.to_global_id(
"Warehouse", warehouse_no_shipping_zone.pk
)
variables = {
"order": order_id,
"input": {
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 1, "warehouse": warehouse_id}],
}
]
},
}
mock_create_fulfillments.side_effect = InsufficientStock(
[
InsufficientStockData(
variant=order_line.variant,
order_line=order_line,
warehouse_pk=warehouse_no_shipping_zone.pk,
)
]
)
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert data["errors"]
error = data["errors"][0]
assert error["field"] == "stocks"
assert error["code"] == OrderErrorCode.INSUFFICIENT_STOCK.name
assert error["orderLines"] == [order_line_id]
assert error["warehouse"] == warehouse_id
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments", autospec=True)
def test_order_fulfill_warehouse_duplicated_warehouse_id(
mock_create_fulfillments,
staff_api_client,
order_with_lines,
permission_manage_orders,
warehouse,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line = order.lines.first()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"lines": [
{
"orderLineId": order_line_id,
"stocks": [
{"quantity": 1, "warehouse": warehouse_id},
{"quantity": 2, "warehouse": warehouse_id},
],
}
]
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert data["errors"]
error = data["errors"][0]
assert error["field"] == "warehouse"
assert error["code"] == OrderErrorCode.DUPLICATED_INPUT_ITEM.name
assert not error["orderLines"]
assert error["warehouse"] == warehouse_id
mock_create_fulfillments.assert_not_called()
@patch("saleor.graphql.order.mutations.fulfillments.create_fulfillments", autospec=True)
def test_order_fulfill_warehouse_duplicated_order_line_id(
mock_create_fulfillments,
staff_api_client,
order_with_lines,
permission_manage_orders,
warehouse,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line = order.lines.first()
order_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"lines": [
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 3, "warehouse": warehouse_id}],
},
{
"orderLineId": order_line_id,
"stocks": [{"quantity": 3, "warehouse": warehouse_id}],
},
]
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfill"]
assert data["errors"]
error = data["errors"][0]
assert error["field"] == "orderLineId"
assert error["code"] == OrderErrorCode.DUPLICATED_INPUT_ITEM.name
assert error["orderLines"] == [order_line_id]
assert not error["warehouse"]
mock_create_fulfillments.assert_not_called()
@patch("saleor.plugins.manager.PluginsManager.notify")
def test_fulfillment_update_tracking(
send_fulfillment_update_mock,
staff_api_client,
fulfillment,
permission_manage_orders,
):
query = """
mutation updateFulfillment($id: ID!, $tracking: String) {
orderFulfillmentUpdateTracking(
id: $id, input: { trackingNumber: $tracking }
) {
fulfillment {
trackingNumber
}
}
}
"""
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
tracking = "stationary tracking"
variables = {"id": fulfillment_id, "tracking": tracking}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentUpdateTracking"]["fulfillment"]
assert data["trackingNumber"] == tracking
send_fulfillment_update_mock.assert_not_called()
FULFILLMENT_UPDATE_TRACKING_WITH_SEND_NOTIFICATION_QUERY = """
mutation updateFulfillment(
$id: ID!
$tracking: String
$notifyCustomer: Boolean
) {
orderFulfillmentUpdateTracking(
id: $id
input: { trackingNumber: $tracking, notifyCustomer: $notifyCustomer }
) {
fulfillment {
trackingNumber
}
}
}
"""
@patch("saleor.graphql.order.mutations.fulfillments.send_fulfillment_update")
def test_fulfillment_update_tracking_send_notification_true(
send_fulfillment_update_mock,
staff_api_client,
fulfillment,
permission_manage_orders,
):
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
tracking = "stationary tracking"
variables = {"id": fulfillment_id, "tracking": tracking, "notifyCustomer": True}
response = staff_api_client.post_graphql(
FULFILLMENT_UPDATE_TRACKING_WITH_SEND_NOTIFICATION_QUERY,
variables,
permissions=[permission_manage_orders],
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentUpdateTracking"]["fulfillment"]
assert data["trackingNumber"] == tracking
send_fulfillment_update_mock.assert_called_once_with(
fulfillment.order, fulfillment, ANY
)
@patch("saleor.order.notifications.send_fulfillment_update")
def test_fulfillment_update_tracking_send_notification_false(
send_fulfillment_update_mock,
staff_api_client,
fulfillment,
permission_manage_orders,
):
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
tracking = "stationary tracking"
variables = {"id": fulfillment_id, "tracking": tracking, "notifyCustomer": False}
response = staff_api_client.post_graphql(
FULFILLMENT_UPDATE_TRACKING_WITH_SEND_NOTIFICATION_QUERY,
variables,
permissions=[permission_manage_orders],
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentUpdateTracking"]["fulfillment"]
assert data["trackingNumber"] == tracking
send_fulfillment_update_mock.assert_not_called()
CANCEL_FULFILLMENT_MUTATION = """
mutation cancelFulfillment($id: ID!, $warehouseId: ID) {
orderFulfillmentCancel(id: $id, input: {warehouseId: $warehouseId}) {
fulfillment {
status
}
order {
status
}
errors {
code
field
}
}
}
"""
def test_cancel_fulfillment(
staff_api_client, fulfillment, staff_user, permission_manage_orders, warehouse
):
query = CANCEL_FULFILLMENT_MUTATION
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.id)
variables = {"id": fulfillment_id, "warehouseId": warehouse_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentCancel"]
assert data["fulfillment"]["status"] == FulfillmentStatus.CANCELED.upper()
assert data["order"]["status"] == OrderStatus.UNFULFILLED.upper()
event_cancelled, event_restocked_items = fulfillment.order.events.all()
assert event_cancelled.type == (OrderEvents.FULFILLMENT_CANCELED)
assert event_cancelled.parameters == {"composed_id": fulfillment.composed_id}
assert event_cancelled.user == staff_user
assert event_restocked_items.type == (OrderEvents.FULFILLMENT_RESTOCKED_ITEMS)
assert event_restocked_items.parameters == {
"quantity": fulfillment.get_total_quantity(),
"warehouse": str(warehouse.pk),
}
assert event_restocked_items.user == staff_user
assert Fulfillment.objects.filter(
pk=fulfillment.pk, status=FulfillmentStatus.CANCELED
).exists()
def test_cancel_fulfillment_for_order_with_gift_card_lines(
staff_api_client,
fulfillment,
gift_card_shippable_order_line,
staff_user,
permission_manage_orders,
warehouse,
):
query = CANCEL_FULFILLMENT_MUTATION
order = gift_card_shippable_order_line.order
order_fulfillment = order.fulfillments.first()
fulfillment_id = graphene.Node.to_global_id("Fulfillment", order_fulfillment.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.id)
variables = {"id": fulfillment_id, "warehouseId": warehouse_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentCancel"]
assert not data["fulfillment"]
assert len(data["errors"]) == 1
assert data["errors"][0]["code"] == OrderErrorCode.CANNOT_CANCEL_FULFILLMENT.name
assert data["errors"][0]["field"] == "fulfillment"
def test_cancel_fulfillment_no_warehouse_id(
staff_api_client, fulfillment, permission_manage_orders
):
query = """
mutation cancelFulfillment($id: ID!) {
orderFulfillmentCancel(id: $id) {
fulfillment {
status
}
order {
status
}
errors {
code
field
}
}
}
"""
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
variables = {"id": fulfillment_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
errors = content["data"]["orderFulfillmentCancel"]["errors"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "warehouseId"
assert error["code"] == OrderErrorCode.REQUIRED.name
@patch("saleor.order.actions.restock_fulfillment_lines")
def test_cancel_fulfillment_awaiting_approval(
mock_restock_lines, staff_api_client, fulfillment, permission_manage_orders
):
fulfillment.status = FulfillmentStatus.WAITING_FOR_APPROVAL
fulfillment.save(update_fields=["status"])
query = CANCEL_FULFILLMENT_MUTATION
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
variables = {"id": fulfillment_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentCancel"]
assert data["fulfillment"] is None
mock_restock_lines.assert_not_called()
event_cancelled = fulfillment.order.events.get()
assert event_cancelled.type == (OrderEvents.FULFILLMENT_CANCELED)
assert event_cancelled.parameters == {}
assert event_cancelled.user == staff_api_client.user
assert not Fulfillment.objects.filter(pk=fulfillment.pk).exists()
@patch("saleor.order.actions.restock_fulfillment_lines")
def test_cancel_fulfillment_awaiting_approval_warehouse_specified(
mock_restock_lines,
staff_api_client,
fulfillment,
permission_manage_orders,
warehouse,
):
fulfillment.status = FulfillmentStatus.WAITING_FOR_APPROVAL
fulfillment.save(update_fields=["status"])
query = CANCEL_FULFILLMENT_MUTATION
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.id)
variables = {"id": fulfillment_id, "warehouseId": warehouse_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentCancel"]
assert data["fulfillment"] is None
mock_restock_lines.assert_not_called()
event_cancelled = fulfillment.order.events.get()
assert event_cancelled.type == (OrderEvents.FULFILLMENT_CANCELED)
assert event_cancelled.parameters == {}
assert event_cancelled.user == staff_api_client.user
assert not Fulfillment.objects.filter(pk=fulfillment.pk).exists()
def test_cancel_fulfillment_canceled_state(
staff_api_client, fulfillment, permission_manage_orders, warehouse
):
query = CANCEL_FULFILLMENT_MUTATION
fulfillment.status = FulfillmentStatus.CANCELED
fulfillment.save(update_fields=["status"])
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.id)
variables = {"id": fulfillment_id, "warehouseId": warehouse_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
errors = content["data"]["orderFulfillmentCancel"]["errors"]
assert len(errors) == 1
error = errors[0]
assert error["field"] == "fulfillment"
assert error["code"] == OrderErrorCode.CANNOT_CANCEL_FULFILLMENT.name
def test_cancel_fulfillment_warehouse_without_stock(
order_line, warehouse, staff_api_client, permission_manage_orders, staff_user
):
query = CANCEL_FULFILLMENT_MUTATION
order = order_line.order
fulfillment = order.fulfillments.create(tracking_number="123")
fulfillment.lines.create(order_line=order_line, quantity=order_line.quantity)
order.status = OrderStatus.FULFILLED
order.save(update_fields=["status"])
assert not Stock.objects.filter(
warehouse=warehouse, product_variant=order_line.variant
)
assert not Allocation.objects.filter(order_line=order_line)
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.id)
variables = {"id": fulfillment_id, "warehouseId": warehouse_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentCancel"]["fulfillment"]
assert data["status"] == FulfillmentStatus.CANCELED.upper()
event_cancelled, event_restocked_items = fulfillment.order.events.all()
assert event_cancelled.type == (OrderEvents.FULFILLMENT_CANCELED)
assert event_cancelled.parameters == {"composed_id": fulfillment.composed_id}
assert event_cancelled.user == staff_user
assert event_restocked_items.type == (OrderEvents.FULFILLMENT_RESTOCKED_ITEMS)
assert event_restocked_items.parameters == {
"quantity": fulfillment.get_total_quantity(),
"warehouse": str(warehouse.pk),
}
assert event_restocked_items.user == staff_user
stock = Stock.objects.filter(
warehouse=warehouse, product_variant=order_line.variant
).first()
assert stock.quantity == order_line.quantity
allocation = order_line.allocations.filter(stock=stock).first()
assert allocation.quantity_allocated == order_line.quantity
@patch("saleor.order.actions.send_fulfillment_confirmation_to_customer", autospec=True)
def test_create_digital_fulfillment(
mock_email_fulfillment,
digital_content,
staff_api_client,
order_with_lines,
warehouse,
permission_manage_orders,
):
order = order_with_lines
query = ORDER_FULFILL_QUERY
order_id = graphene.Node.to_global_id("Order", order.id)
order_line = order.lines.first()
order_line.variant = digital_content.product_variant
order_line.save()
order_line.allocations.all().delete()
stock = digital_content.product_variant.stocks.get(warehouse=warehouse)
Allocation.objects.create(
order_line=order_line, stock=stock, quantity_allocated=order_line.quantity
)
second_line = order.lines.last()
first_line_id = graphene.Node.to_global_id("OrderLine", order_line.id)
second_line_id = graphene.Node.to_global_id("OrderLine", second_line.id)
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"order": order_id,
"input": {
"notifyCustomer": True,
"lines": [
{
"orderLineId": first_line_id,
"stocks": [{"quantity": 1, "warehouse": warehouse_id}],
},
{
"orderLineId": second_line_id,
"stocks": [{"quantity": 1, "warehouse": warehouse_id}],
},
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
get_graphql_content(response)
assert mock_email_fulfillment.call_count == 1
APPROVE_FULFILLMENT_MUTATION = """
mutation approveFulfillment(
$id: ID!, $notifyCustomer: Boolean!, $allowStockToBeExceeded: Boolean = false
) {
orderFulfillmentApprove(
id: $id,
notifyCustomer: $notifyCustomer,
allowStockToBeExceeded: $allowStockToBeExceeded) {
fulfillment {
status
}
order {
status
}
errors {
field
code
}
}
}
"""
@patch("saleor.order.actions.send_fulfillment_confirmation_to_customer", autospec=True)
def test_fulfillment_approve(
mock_email_fulfillment,
staff_api_client,
fulfillment,
permission_manage_orders,
):
fulfillment.status = FulfillmentStatus.WAITING_FOR_APPROVAL
fulfillment.save(update_fields=["status"])
query = APPROVE_FULFILLMENT_MUTATION
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
variables = {"id": fulfillment_id, "notifyCustomer": True}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentApprove"]
assert not data["errors"]
assert data["fulfillment"]["status"] == FulfillmentStatus.FULFILLED.upper()
assert data["order"]["status"] == OrderStatus.FULFILLED.upper()
fulfillment.refresh_from_db()
assert fulfillment.status == FulfillmentStatus.FULFILLED
assert mock_email_fulfillment.call_count == 1
events = fulfillment.order.events.all()
assert len(events) == 1
event = events[0]
assert event.type == OrderEvents.FULFILLMENT_FULFILLED_ITEMS
assert event.user == staff_api_client.user
@patch("saleor.order.actions.send_fulfillment_confirmation_to_customer", autospec=True)
def test_fulfillment_approve_gift_cards_created(
mock_email_fulfillment,
staff_api_client,
fulfillment,
permission_manage_orders,
gift_card_shippable_order_line,
gift_card_non_shippable_order_line,
):
fulfillment.status = FulfillmentStatus.WAITING_FOR_APPROVAL
fulfillment.save(update_fields=["status"])
gift_card_line_1 = gift_card_shippable_order_line
gift_card_line_2 = gift_card_non_shippable_order_line
stock_1 = gift_card_line_1.variant.stocks.first()
stock_2 = gift_card_line_2.variant.stocks.first()
fulfillment.lines.create(
order_line=gift_card_line_1, quantity=gift_card_line_1.quantity, stock=stock_1
)
fulfillment.lines.create(
order_line=gift_card_line_2, quantity=gift_card_line_2.quantity, stock=stock_2
)
fulfill_order_lines(
[
OrderLineData(
line=gift_card_line_1,
quantity=gift_card_line_1.quantity,
warehouse_pk=stock_1.warehouse.pk,
),
OrderLineData(
line=gift_card_line_2,
quantity=gift_card_line_2.quantity,
warehouse_pk=stock_2.warehouse.pk,
),
],
manager=get_plugins_manager(),
)
query = APPROVE_FULFILLMENT_MUTATION
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
variables = {"id": fulfillment_id, "notifyCustomer": True}
assert GiftCard.objects.count() == 0
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentApprove"]
assert not data["errors"]
assert data["fulfillment"]["status"] == FulfillmentStatus.FULFILLED.upper()
assert data["order"]["status"] == OrderStatus.FULFILLED.upper()
fulfillment.refresh_from_db()
assert fulfillment.status == FulfillmentStatus.FULFILLED
assert mock_email_fulfillment.call_count == 1
events = fulfillment.order.events.all()
assert len(events) == 1
event = events[0]
assert event.type == OrderEvents.FULFILLMENT_FULFILLED_ITEMS
assert event.user == staff_api_client.user
assert (
GiftCard.objects.count()
== gift_card_line_1.quantity + gift_card_line_2.quantity
)
@patch("saleor.order.actions.send_fulfillment_confirmation_to_customer", autospec=True)
def test_fulfillment_approve_when_stock_is_exceeded_and_flag_enabled(
mock_email_fulfillment,
staff_api_client,
fulfillment,
permission_manage_orders,
):
# make stocks exceeded
for stock in [line.stock for line in fulfillment.lines.all()]:
stock.quantity = -99
stock.save()
fulfillment.status = FulfillmentStatus.WAITING_FOR_APPROVAL
fulfillment.save(update_fields=["status"])
query = APPROVE_FULFILLMENT_MUTATION
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
# make response with flag disabled, raised error is expected
variables = {
"id": fulfillment_id,
"notifyCustomer": True,
"allowStockToBeExceeded": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentApprove"]
assert not data["errors"]
assert data["fulfillment"]["status"] == FulfillmentStatus.FULFILLED.upper()
assert data["order"]["status"] == OrderStatus.FULFILLED.upper()
fulfillment.refresh_from_db()
assert fulfillment.status == FulfillmentStatus.FULFILLED
assert mock_email_fulfillment.call_count == 1
events = fulfillment.order.events.all()
assert len(events) == 1
event = events[0]
assert event.type == OrderEvents.FULFILLMENT_FULFILLED_ITEMS
assert event.user == staff_api_client.user
@patch("saleor.order.actions.send_fulfillment_confirmation_to_customer", autospec=True)
def test_fulfillment_approve_when_stock_is_exceeded_and_flag_disabled(
mock_email_fulfillment,
staff_api_client,
fulfillment,
permission_manage_orders,
):
# make stocks exceeded
for stock in [line.stock for line in fulfillment.lines.all()]:
stock.quantity = -99
stock.save()
fulfillment.status = FulfillmentStatus.WAITING_FOR_APPROVAL
fulfillment.save(update_fields=["status"])
query = APPROVE_FULFILLMENT_MUTATION
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
variables = {
"id": fulfillment_id,
"notifyCustomer": True,
"allowStockToBeExceeded": False,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response, ignore_errors=True)
assert content["errors"]
assert content["errors"][0]["message"] == "Insufficient stock for SKU_AA, SKU_B"
@patch("saleor.order.actions.send_fulfillment_confirmation_to_customer", autospec=True)
def test_fulfillment_approve_partial_order_fulfill(
mock_email_fulfillment,
staff_api_client,
fulfillment_awaiting_approval,
permission_manage_orders,
):
query = APPROVE_FULFILLMENT_MUTATION
second_fulfillment = Fulfillment.objects.get(pk=fulfillment_awaiting_approval.pk)
second_fulfillment.pk = None
second_fulfillment.save()
fulfillment_id = graphene.Node.to_global_id(
"Fulfillment", fulfillment_awaiting_approval.id
)
variables = {"id": fulfillment_id, "notifyCustomer": False}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentApprove"]
assert not data["errors"]
assert data["fulfillment"]["status"] == FulfillmentStatus.FULFILLED.upper()
assert data["order"]["status"] == "PARTIALLY_FULFILLED"
fulfillment_awaiting_approval.refresh_from_db()
assert fulfillment_awaiting_approval.status == FulfillmentStatus.FULFILLED
assert mock_email_fulfillment.call_count == 0
def test_fulfillment_approve_invalid_status(
staff_api_client,
fulfillment,
permission_manage_orders,
):
query = APPROVE_FULFILLMENT_MUTATION
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
variables = {"id": fulfillment_id, "notifyCustomer": True}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentApprove"]
assert data["errors"][0]["code"] == OrderErrorCode.INVALID.name
def test_fulfillment_approve_order_unpaid(
staff_api_client,
fulfillment,
site_settings,
permission_manage_orders,
):
site_settings.fulfillment_allow_unpaid = False
site_settings.save(update_fields=["fulfillment_allow_unpaid"])
fulfillment.status = FulfillmentStatus.WAITING_FOR_APPROVAL
fulfillment.save(update_fields=["status"])
query = APPROVE_FULFILLMENT_MUTATION
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id)
variables = {"id": fulfillment_id, "notifyCustomer": True}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["orderFulfillmentApprove"]
assert data["errors"][0]["code"] == OrderErrorCode.CANNOT_FULFILL_UNPAID_ORDER.name
QUERY_FULFILLMENT = """
query fulfillment($id: ID!) {
order(id: $id) {
fulfillments {
id
fulfillmentOrder
status
trackingNumber
warehouse {
id
}
lines {
orderLine {
id
}
quantity
}
}
}
}
"""
def test_fulfillment_query(
staff_api_client,
fulfilled_order,
warehouse,
permission_manage_orders,
):
order = fulfilled_order
order_line_1, order_line_2 = order.lines.all()
order_id = graphene.Node.to_global_id("Order", order.pk)
order_line_1_id = graphene.Node.to_global_id("OrderLine", order_line_1.pk)
order_line_2_id = graphene.Node.to_global_id("OrderLine", order_line_2.pk)
warehose_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {"id": order_id}
response = staff_api_client.post_graphql(
QUERY_FULFILLMENT, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["order"]["fulfillments"]
assert len(data) == 1
fulfillment_data = data[0]
assert fulfillment_data["fulfillmentOrder"] == 1
assert fulfillment_data["status"] == FulfillmentStatus.FULFILLED.upper()
assert fulfillment_data["trackingNumber"] == "123"
assert fulfillment_data["warehouse"]["id"] == warehose_id
assert len(fulfillment_data["lines"]) == 2
assert {
"orderLine": {"id": order_line_1_id},
"quantity": order_line_1.quantity,
} in fulfillment_data["lines"]
assert {
"orderLine": {"id": order_line_2_id},
"quantity": order_line_2.quantity,
} in fulfillment_data["lines"]
QUERY_ORDER_FULFILL_DATA = """
query OrderFulfillData($id: ID!) {
order(id: $id) {
id
lines {
variant {
stocks {
warehouse {
id
}
quantity
quantityAllocated
}
}
}
}
}
"""
def test_staff_can_query_order_fulfill_data(
staff_api_client, order_with_lines, permission_manage_orders
):
order_id = graphene.Node.to_global_id("Order", order_with_lines.pk)
variables = {"id": order_id}
response = staff_api_client.post_graphql(
QUERY_ORDER_FULFILL_DATA, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["order"]["lines"]
assert len(data) == 2
assert data[0]["variant"]["stocks"][0]["quantity"] == 5
assert data[0]["variant"]["stocks"][0]["quantityAllocated"] == 3
assert data[1]["variant"]["stocks"][0]["quantity"] == 2
assert data[1]["variant"]["stocks"][0]["quantityAllocated"] == 2
def test_staff_can_query_order_fulfill_data_without_permission(
staff_api_client, order_with_lines
):
order_id = graphene.Node.to_global_id("Order", order_with_lines.pk)
variables = {"id": order_id}
response = staff_api_client.post_graphql(QUERY_ORDER_FULFILL_DATA, variables)
assert_no_permission(response)
| 34.266844
| 88
| 0.658585
|
3ed424fb9084d192848326e1e81241f18beaa1f4
| 5,696
|
py
|
Python
|
test/unit/test_batch.py
|
Prompsit/joeynmt
|
54ed51c5d524e46c053dafb01b92c80854e7744b
|
[
"Apache-2.0"
] | 10
|
2021-11-19T06:24:51.000Z
|
2022-02-09T15:44:00.000Z
|
test/unit/test_batch.py
|
Prompsit/joeynmt
|
54ed51c5d524e46c053dafb01b92c80854e7744b
|
[
"Apache-2.0"
] | 9
|
2021-10-01T11:06:27.000Z
|
2021-12-23T02:10:52.000Z
|
test/unit/test_batch.py
|
Prompsit/joeynmt
|
54ed51c5d524e46c053dafb01b92c80854e7744b
|
[
"Apache-2.0"
] | 2
|
2021-09-14T04:08:36.000Z
|
2021-11-19T06:24:54.000Z
|
import random
import torch
from torchtext.legacy.data.batch import Batch as TorchTBatch
from joeynmt.batch import Batch
from joeynmt.data import load_data, make_data_iter
from joeynmt.constants import PAD_TOKEN
from .test_helpers import TensorTestCase
class TestData(TensorTestCase):
def setUp(self):
self.train_path = "test/data/toy/train"
self.dev_path = "test/data/toy/dev"
self.test_path = "test/data/toy/test"
self.levels = ["char", "word"] # bpe is equivalently processed to word
self.max_sent_length = 20
# minimal data config
self.data_cfg = {"src": "de", "trg": "en", "train": self.train_path,
"dev": self.dev_path, "level": "char",
"lowercase": True,
"max_sent_length": self.max_sent_length}
# load the data
self.train_data, self.dev_data, self.test_data, src_vocab, trg_vocab = \
load_data(self.data_cfg)
self.pad_index = trg_vocab.stoi[PAD_TOKEN]
# random seeds
seed = 42
torch.manual_seed(seed)
random.seed(42)
def testBatchTrainIterator(self):
batch_size = 4
self.assertEqual(len(self.train_data), 27)
# make data iterator
train_iter = make_data_iter(self.train_data, train=True, shuffle=True,
batch_size=batch_size)
self.assertEqual(train_iter.batch_size, batch_size)
self.assertTrue(train_iter.shuffle)
self.assertTrue(train_iter.train)
self.assertEqual(train_iter.epoch, 0)
self.assertEqual(train_iter.iterations, 0)
expected_src0 = torch.Tensor(
[[21, 10, 4, 16, 4, 5, 21, 4, 12, 33, 6, 14, 4, 12, 23, 6, 18, 4,
6, 9, 3],
[20, 28, 4, 10, 28, 4, 6, 5, 14, 8, 6, 15, 4, 5, 7, 17, 11, 27,
6, 9, 3],
[24, 8, 7, 5, 24, 10, 12, 14, 5, 18, 4, 7, 17, 11, 4, 11, 4, 6,
25, 3, 1]]).long()
expected_src0_len = torch.Tensor([21, 21, 20]).long()
expected_trg0 = torch.Tensor(
[[6, 4, 27, 5, 8, 4, 5, 31, 4, 26, 7, 6, 10, 20, 11,
9, 3],
[8, 7, 6, 10, 17, 4, 13, 5, 15, 9, 3, 1, 1, 1, 1,
1, 1],
[12, 5, 4, 25, 7, 6, 8, 4, 7, 6, 18, 18, 11, 10, 12,
23, 3]]).long()
expected_trg0_len = torch.Tensor([18, 12, 18]).long()
total_samples = 0
for b in iter(train_iter):
b = Batch(torch_batch=b, pad_index=self.pad_index)
if total_samples == 0:
self.assertTensorEqual(b.src, expected_src0)
self.assertTensorEqual(b.src_length, expected_src0_len)
self.assertTensorEqual(b.trg, expected_trg0)
self.assertTensorEqual(b.trg_length, expected_trg0_len)
total_samples += b.nseqs
self.assertLessEqual(b.nseqs, batch_size)
self.assertEqual(total_samples, len(self.train_data))
def testBatchDevIterator(self):
batch_size = 3
self.assertEqual(len(self.dev_data), 20)
# make data iterator
dev_iter = make_data_iter(self.dev_data, train=False, shuffle=False,
batch_size=batch_size)
self.assertEqual(dev_iter.batch_size, batch_size)
self.assertFalse(dev_iter.shuffle)
self.assertFalse(dev_iter.train)
self.assertEqual(dev_iter.epoch, 0)
self.assertEqual(dev_iter.iterations, 0)
expected_src0 = torch.Tensor(
[[29, 8, 5, 22, 5, 8, 16, 7, 19, 5, 22, 5, 24, 8, 7, 5, 7, 19,
16, 16, 5, 31, 10, 19, 11, 8, 17, 15, 10, 6, 18, 5, 7, 4, 10, 6,
5, 25, 3],
[10, 17, 11, 5, 28, 12, 4, 23, 4, 5, 0, 10, 17, 11, 5, 22, 5, 14,
8, 7, 7, 5, 10, 17, 11, 5, 14, 8, 5, 31, 10, 6, 5, 9, 3, 1,
1, 1, 1],
[29, 8, 5, 22, 5, 18, 23, 13, 4, 6, 5, 13, 8, 18, 5, 9, 3, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1]]).long()
expected_src0_len = torch.Tensor([39, 35, 17]).long()
expected_trg0 = torch.Tensor(
[[13, 11, 12, 4, 22, 4, 12, 5, 4, 22, 4, 25, 7, 6, 8, 4, 14, 12,
4, 24, 14, 5, 7, 6, 26, 17, 14, 10, 20, 4, 23, 3],
[14, 0, 28, 4, 7, 6, 18, 18, 13, 4, 8, 5, 4, 24, 11, 4, 7, 11,
16, 11, 4, 9, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[13, 11, 12, 4, 22, 4, 7, 11, 27, 27, 5, 4, 9, 3, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]).long()
expected_trg0_len = torch.Tensor([33, 24, 15]).long()
total_samples = 0
for b in iter(dev_iter):
self.assertEqual(type(b), TorchTBatch)
b = Batch(b, pad_index=self.pad_index)
# test the sorting by src length
self.assertEqual(type(b), Batch)
before_sort = b.src_length
b.sort_by_src_length()
after_sort = b.src_length
self.assertTensorEqual(torch.sort(before_sort, descending=True)[0],
after_sort)
self.assertEqual(type(b), Batch)
if total_samples == 0:
self.assertTensorEqual(b.src, expected_src0)
self.assertTensorEqual(b.src_length, expected_src0_len)
self.assertTensorEqual(b.trg, expected_trg0)
self.assertTensorEqual(b.trg_length, expected_trg0_len)
total_samples += b.nseqs
self.assertLessEqual(b.nseqs, batch_size)
self.assertEqual(total_samples, len(self.dev_data))
| 42.507463
| 80
| 0.533532
|
74b50ac60b79a6f7218b0f00a105ac9d2ed633e5
| 448
|
py
|
Python
|
tests/core/test_Version.py
|
dc-blockchain/dc-core
|
fc6af8ce04d7b52f94c069f6ec05b0e419e07d70
|
[
"MIT"
] | 1
|
2021-03-05T14:24:32.000Z
|
2021-03-05T14:24:32.000Z
|
tests/core/test_Version.py
|
dc-blockchain/dc-core
|
fc6af8ce04d7b52f94c069f6ec05b0e419e07d70
|
[
"MIT"
] | null | null | null |
tests/core/test_Version.py
|
dc-blockchain/dc-core
|
fc6af8ce04d7b52f94c069f6ec05b0e419e07d70
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from unittest import TestCase
from dc.core import config
from dc.core.misc import logger
logger.initialize_default()
class TestVersion(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_version(self):
print(config.dev.version)
| 24.888889
| 69
| 0.734375
|
0a5c60872d969ca705a122c4763352a2374955c8
| 626
|
py
|
Python
|
data/migrations/0068_populate_workflow_configuration_tag.py
|
Duke-GCB/bespin-api
|
cea5c20fb2ff592adabe6ebb7ca934939aa11a34
|
[
"MIT"
] | null | null | null |
data/migrations/0068_populate_workflow_configuration_tag.py
|
Duke-GCB/bespin-api
|
cea5c20fb2ff592adabe6ebb7ca934939aa11a34
|
[
"MIT"
] | 137
|
2016-12-09T18:59:45.000Z
|
2021-06-10T18:55:47.000Z
|
data/migrations/0068_populate_workflow_configuration_tag.py
|
Duke-GCB/bespin-api
|
cea5c20fb2ff592adabe6ebb7ca934939aa11a34
|
[
"MIT"
] | 3
|
2017-11-14T16:05:58.000Z
|
2018-12-28T18:07:43.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-11-29 17:20
from __future__ import unicode_literals
from django.db import migrations
def populate_workflow_configuration_tag(apps, schema_editor):
WorkflowConfiguration = apps.get_model("data", "WorkflowConfiguration")
for obj in WorkflowConfiguration.objects.all():
obj.tag = obj.name
obj.save()
class Migration(migrations.Migration):
dependencies = [
('data', '0067_workflowconfiguration_tag'),
]
operations = [
migrations.RunPython(populate_workflow_configuration_tag, migrations.RunPython.noop),
]
| 26.083333
| 93
| 0.715655
|
a7cf1e169a0417baa1a9815d0e49b86b7ba12ac8
| 4,360
|
py
|
Python
|
core/src/zeit/cms/checkout/tests/test_webhook.py
|
louika/vivi
|
3cc213b873d527127aa6f0dd3c79a542299a8a0e
|
[
"BSD-3-Clause"
] | null | null | null |
core/src/zeit/cms/checkout/tests/test_webhook.py
|
louika/vivi
|
3cc213b873d527127aa6f0dd3c79a542299a8a0e
|
[
"BSD-3-Clause"
] | null | null | null |
core/src/zeit/cms/checkout/tests/test_webhook.py
|
louika/vivi
|
3cc213b873d527127aa6f0dd3c79a542299a8a0e
|
[
"BSD-3-Clause"
] | null | null | null |
from zeit.cms.checkout.helper import checked_out
from zeit.cms.content.sources import Product
from zeit.cms.repository.interfaces import IAutomaticallyRenameable
from zeit.cms.testcontenttype.testcontenttype import ExampleContentType
import celery.exceptions
import lxml.objectify
import mock
import plone.testing
import requests.exceptions
import zeit.cms.checkout.webhook
import zeit.cms.testing
HTTP_LAYER = zeit.cms.testing.HTTPLayer(
zeit.cms.testing.RecordingRequestHandler,
name='HTTPLayer', module=__name__)
WEBHOOK_LAYER = plone.testing.Layer(
bases=(zeit.cms.testing.ZOPE_LAYER, HTTP_LAYER),
name='WebhookLayer', module=__name__)
class WebhookTest(zeit.cms.testing.ZeitCmsTestCase):
layer = WEBHOOK_LAYER
def setUp(self):
super(WebhookTest, self).setUp()
self.config = (
'<webhooks><webhook url="http://localhost:%s"/></webhooks>' %
self.layer['http_port'])
self.patch = mock.patch(
'zeit.cms.checkout.webhook.HookSource._get_tree',
new=lambda x: lxml.objectify.fromstring(self.config))
self.patch.start()
source = zeit.cms.checkout.webhook.HOOKS.factory
# XXX Have to pass the instance because of zc.factory init shenanigans.
source._values.invalidate(source)
def tearDown(self):
self.patch.stop()
super(WebhookTest, self).tearDown()
def test_calls_post_with_uniqueId_for_configured_urls(self):
with checked_out(self.repository['testcontent']):
pass
requests = self.layer['request_handler'].requests
self.assertEqual(1, len(requests))
request = requests[0]
del request['headers']
self.assertEqual(
{'body': '["http://xml.zeit.de/testcontent"]',
'path': '/', 'verb': 'POST'}, request)
def test_calls_hook_when_adding_new_object_to_repository(self):
self.repository['testcontent2'] = ExampleContentType()
requests = self.layer['request_handler'].requests
self.assertEqual(1, len(requests))
request = requests[0]
del request['headers']
self.assertEqual(
{'body': '["http://xml.zeit.de/testcontent2"]',
'path': '/', 'verb': 'POST'}, request)
def test_does_not_call_hook_when_exclude_matches(self):
self.config = """<webhooks>
<webhook url="http://localhost:%s">
<exclude>
<type>testcontenttype</type>
</exclude>
</webhook>
</webhooks>
""" % self.layer['http_port']
with checked_out(self.repository['testcontent']):
pass
self.assertEqual([], self.layer['request_handler'].requests)
def test_retry_on_technical_error(self):
self.layer['request_handler'].response_code = [503, 200]
with self.assertRaises(celery.exceptions.Retry):
with checked_out(self.repository['testcontent']):
pass
def test_no_retry_on_semantic_error(self):
self.layer['request_handler'].response_code = [400, 200]
with self.assertRaises(requests.exceptions.HTTPError):
with checked_out(self.repository['testcontent']):
pass
class WebhookExcludeTest(zeit.cms.testing.ZeitCmsTestCase):
def test_match_contenttype(self):
hook = zeit.cms.checkout.webhook.Hook(None)
hook.add_exclude('type', 'testcontenttype')
self.assertTrue(hook.should_exclude(self.repository['testcontent']))
self.assertFalse(hook.should_exclude(self.repository['politik.feed']))
def test_match_product(self):
hook = zeit.cms.checkout.webhook.Hook(None)
hook.add_exclude('product', 'ZEI')
self.assertFalse(hook.should_exclude(self.repository['testcontent']))
with checked_out(self.repository['testcontent']) as co:
co.product = Product('ZEI')
self.assertTrue(hook.should_exclude(self.repository['testcontent']))
def test_skip_auto_renameable(self):
hook = zeit.cms.checkout.webhook.Hook(None)
self.assertFalse(hook.should_exclude(self.repository['testcontent']))
with checked_out(self.repository['testcontent']) as co:
IAutomaticallyRenameable(co).renameable = True
self.assertTrue(hook.should_exclude(self.repository['testcontent']))
| 38.245614
| 79
| 0.669266
|
5a89ffb670673f905cf63e874dac1b9b7ae8d22b
| 2,964
|
py
|
Python
|
common/trainer.py
|
kentakuramochi/deep_learning_from_scratch
|
8b78369f6da316a6c14c16f729c658d959454b0e
|
[
"CC0-1.0"
] | null | null | null |
common/trainer.py
|
kentakuramochi/deep_learning_from_scratch
|
8b78369f6da316a6c14c16f729c658d959454b0e
|
[
"CC0-1.0"
] | null | null | null |
common/trainer.py
|
kentakuramochi/deep_learning_from_scratch
|
8b78369f6da316a6c14c16f729c658d959454b0e
|
[
"CC0-1.0"
] | null | null | null |
# !usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.pardir)
import numpy as np
from common.optimizer import *
class Trainer:
def __init__(self, network, x_train, t_train, x_test, t_test,
epochs=20, mini_batch_size=100,
optimizer="SGD", optimizer_param={"lr":0.01},
evaluate_sample_num_per_epoch=None, verbose=True):
self.network = network
self.verbose = verbose
self.x_train = x_train
self.t_train = t_train
self.x_test = x_test
self.t_test = t_test
self.epochs = epochs
self.batch_size = mini_batch_size
self.evaluate_sample_num_per_epoch = evaluate_sample_num_per_epoch
optimizer_class_dict = {"sgd":SGD, "momentum":Momentum,
"adagrad":AdaGrad, "adam":Adam}
self.optimizer = optimizer_class_dict[optimizer.lower()](**optimizer_param)
self.train_size = x_train.shape[0]
self.iter_per_epoch = max(self.train_size / mini_batch_size, 1)
self.max_iter = int(epochs * self.iter_per_epoch)
self.current_iter = 0
self.current_epoch = 0
self.train_loss_list = []
self.train_acc_list = []
self.test_acc_list = []
def train_step(self):
batch_mask = np.random.choice(self.train_size, self.batch_size)
x_batch = self.x_train[batch_mask]
t_batch = self.t_train[batch_mask]
grads = self.network.gradient(x_batch, t_batch)
self.optimizer.update(self.network.params, grads)
loss = self.network.loss(x_batch, t_batch)
self.train_loss_list.append(loss)
if self.verbose: print("train loss:" + str(loss))
if self.current_iter % self.iter_per_epoch == 0:
self.current_epoch += 1
x_train_sample, t_train_sample = self.x_train, self.t_train
x_test_sample, t_test_sample = self.x_test, self.t_test
if not self.evaluate_sample_num_per_epoch is None:
t = self.evaluate_sample_num_per_epoch
x_train_sample, t_train_sample = self.x_train[:t], self.t_train[:t]
x_test_sample, t_test_sample = self.x_test[:t], self.t_test[:t]
train_acc = self.network.accuracy(x_train_sample, t_train_sample)
test_acc = self.network.accuracy(x_test_sample, t_test_sample)
self.train_acc_list.append(train_acc)
self.test_acc_list.append(test_acc)
if self.verbose:
print("=== epoch:" + str(self.current_epoch) + ", train acc:" + str(train_acc) +
", test acc:" + str(test_acc) + " ===")
self.current_iter += 1
def train(self):
for i in range(self.max_iter):
self.train_step()
test_acc = self.network.accuracy(self.x_test, self.t_test)
if self.verbose:
print("===== Final Test Accuracy =====")
print("test acc:" + str(test_acc))
| 36.592593
| 97
| 0.628205
|
290d0fa16fd299de4a001bdfd4f61715fe2b968b
| 415
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/scatterpolargl/line/_color.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/scatterpolargl/line/_color.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/scatterpolargl/line/_color.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scatterpolargl.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| 29.642857
| 78
| 0.653012
|
38dc7f4016a6ac6977ed3e1adc29d1d1fbcc7011
| 694
|
py
|
Python
|
core_get/vendor/diamond/diamond_project_discoverer.py
|
core-get/core-get
|
8fb960e4e51d0d46b5e3b2f4832eb4a39e0e60f7
|
[
"MIT"
] | null | null | null |
core_get/vendor/diamond/diamond_project_discoverer.py
|
core-get/core-get
|
8fb960e4e51d0d46b5e3b2f4832eb4a39e0e60f7
|
[
"MIT"
] | null | null | null |
core_get/vendor/diamond/diamond_project_discoverer.py
|
core-get/core-get
|
8fb960e4e51d0d46b5e3b2f4832eb4a39e0e60f7
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from pathlib import PurePath
from typing import List
from injector import inject
from core_get.file.file_system import FileSystem
from core_get.vendor.diamond.diamond_project import DiamondProjectReader
from core_get.vendor.project import Project
from core_get.vendor.project_discoverer import ProjectDiscoverer
@inject
@dataclass
class DiamondProjectDiscoverer(ProjectDiscoverer):
diamond_project_reader: DiamondProjectReader
file_system: FileSystem
def discover(self, directory: PurePath) -> List[Project]:
return [self.diamond_project_reader.read(ldf_file)
for ldf_file in self.file_system.glob(directory, '*.ldf')]
| 31.545455
| 74
| 0.806916
|
755d9829b6ede13d9ed8dc1e81ea79bcb17e69a3
| 21,687
|
py
|
Python
|
helper/configuration.py
|
PINTO0309/multi-mono-sf
|
563d65f34624c9281571e8b8b6f1f25599a94a34
|
[
"Apache-2.0"
] | 69
|
2021-05-06T01:31:54.000Z
|
2022-03-30T02:52:53.000Z
|
helper/configuration.py
|
PINTO0309/multi-mono-sf
|
563d65f34624c9281571e8b8b6f1f25599a94a34
|
[
"Apache-2.0"
] | 7
|
2021-05-11T06:39:44.000Z
|
2022-02-28T10:13:53.000Z
|
helper/configuration.py
|
PINTO0309/multi-mono-sf
|
563d65f34624c9281571e8b8b6f1f25599a94a34
|
[
"Apache-2.0"
] | 11
|
2021-05-09T13:34:46.000Z
|
2021-11-24T09:33:49.000Z
|
## Portions of Code from, copyright 2018 Jochen Gast
from __future__ import absolute_import, division, print_function
import os
import torch
from torch import nn
from torch.utils.data import DataLoader
import numpy as np
import logging
import shutil
import random
import fnmatch
from helper import logger, tools
from torch.utils.data.sampler import RandomSampler
from datasets.custom_batchsampler import CustomBatchSampler_Multi
# Class that contains both the network model and loss
class ModelAndLoss(nn.Module):
def __init__(self, args, model, training_loss, evaluation_loss=None):
super(ModelAndLoss, self).__init__()
self._model = model
self._training_loss = training_loss
self._evaluation_loss = evaluation_loss
@property
def training_loss(self):
return self._training_loss
@property
def evaluation_loss(self):
return self._evaluation_loss
@property
def model(self):
return self._model
def num_parameters(self):
return sum([p.data.nelement() if p.requires_grad else 0 for p in self.parameters()])
# Note: We merge inputs and targets into a single dictionary !
def forward(self, example_dict):
# Run forward pass
output_dict = self._model(example_dict)
# Compute losses
if self.training:
loss_dict = self._training_loss(output_dict, example_dict)
else:
loss_dict = self._evaluation_loss(output_dict, example_dict)
# Return losses and outputs
return loss_dict, output_dict
def configure_runtime_augmentations(args):
with logger.LoggingBlock("Runtime Augmentations", emph=True):
training_augmentation = None
validation_augmentation = None
# Training Augmentation
if args.training_augmentation is not None:
kwargs = tools.kwargs_from_args(args, "training_augmentation")
logging.info("training_augmentation: %s" % args.training_augmentation)
for param, default in sorted(kwargs.items()):
logging.info(" %s: %s" % (param, default))
kwargs["args"] = args
training_augmentation = tools.instance_from_kwargs(
args.training_augmentation_class, kwargs)
training_augmentation = training_augmentation.to(args.device)
else:
logging.info("training_augmentation: None")
# Validation Augmentation
if args.validation_augmentation is not None:
kwargs = tools.kwargs_from_args(args, "validation_augmentation")
logging.info("validation_augmentation: %s" % args.validation_augmentation)
for param, default in sorted(kwargs.items()):
logging.info(" %s: %s" % (param, default))
kwargs["args"] = args
validation_augmentation = tools.instance_from_kwargs(
args.validation_augmentation_class, kwargs)
validation_augmentation = validation_augmentation.to(args.device)
else:
logging.info("validation_augmentation: None")
return training_augmentation, validation_augmentation
def configure_model_and_loss(args):
# Dynamically load model and loss class with parameters
# passed in via "--model_[param]=[value]" or "--loss_[param]=[value]" arguments
with logger.LoggingBlock("Model and Loss", emph=True):
# Model
kwargs = tools.kwargs_from_args(args, "model")
kwargs["args"] = args
model = tools.instance_from_kwargs(args.model_class, kwargs)
# Training loss
training_loss = None
if args.training_loss is not None:
kwargs = tools.kwargs_from_args(args, "training_loss")
kwargs["args"] = args
training_loss = tools.instance_from_kwargs(args.training_loss_class, kwargs)
# Validation loss
validation_loss = None
if args.validation_loss is not None:
kwargs = tools.kwargs_from_args(args, "validation_loss")
kwargs["args"] = args
validation_loss = tools.instance_from_kwargs(args.validation_loss_class, kwargs)
# Model and loss
model_and_loss = ModelAndLoss(args, model, training_loss, validation_loss)
# Report some network statistics
logging.info("Batch Size: %i" % args.batch_size)
logging.info("GPGPU: Cuda") if args.cuda else logging.info("GPGPU: off")
logging.info("Network: %s" % args.model)
logging.info("Number of parameters: %i" % tools.x2module(model_and_loss).num_parameters())
if training_loss is not None:
logging.info("Training Key: %s" % args.training_key)
logging.info("Training Loss: %s" % args.training_loss)
if validation_loss is not None:
logging.info("Validation Key: %s" % args.validation_key)
logging.info("Validation Loss: %s" % args.validation_loss)
return model_and_loss
def configure_random_seed(args):
with logger.LoggingBlock("Random Seeds", emph=True):
# python
seed = args.seed
random.seed(seed)
logging.info("Python seed: %i" % seed)
# numpy
seed += 1
np.random.seed(seed)
logging.info("Numpy seed: %i" % seed)
# torch
seed += 1
torch.manual_seed(seed)
logging.info("Torch CPU seed: %i" % seed)
# torch cuda
seed += 1
torch.cuda.manual_seed(seed)
logging.info("Torch CUDA seed: %i" % seed)
# Checkpoint loader/saver.
class CheckpointSaver:
def __init__(self,
args,
prefix="checkpoint",
latest_postfix="_latest",
best_postfix="_best",
extension=".ckpt"):
self._args = args
self._prefix = prefix
self._model_key = "state_dict"
self._scheduler_key = "scheduler"
self._optimizer_key = "optimizer"
self._latest_postfix = latest_postfix
self._best_postfix = best_postfix
self._extension = extension
# the purpose of rewriting the loading function is we sometimes want to
# initialize parameters in modules without knowing the dimensions at runtime
#
# This function here will resize these parameters to whatever size required.
#
def _load_state_dict_into_module(self, state_dict, module, strict=True):
own_state = module.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].resize_as_(param)
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
raise KeyError('unexpected key "{}" in state_dict'.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
remaining = set(state_dict.keys()) - set(own_state.keys())
if len(missing) > 0 or len(remaining):
logging.info('missing keys in state_dict: "{}"'.format(missing))
logging.info('remaining keys in state_dict: "{}"'.format(remaining))
raise KeyError('loading state_dict failed')
def restore(self, filename, model_and_loss, optimizer, scheduler, include_params="*", exclude_params=()):
# Make sure file exists
if not os.path.isfile(filename):
logging.info("Could not find checkpoint file '%s'!" % filename)
quit()
# Load checkpoint from file including the state_dict
checkpoint_with_state = torch.load(filename)
# Load filtered state dictionary
state_dict = checkpoint_with_state[self._model_key]
restore_keys = tools.filter_list_of_strings(
state_dict.keys(),
include=include_params,
exclude=exclude_params)
state_dict = {key: value for key, value in state_dict.items() if key in restore_keys}
self._load_state_dict_into_module(state_dict, model_and_loss)
logging.info(" Restore keys:")
for key in restore_keys:
logging.info(" %s" % key)
# Load optimizer and scheduler
# if not self._args.evaluation and not self._args.finetuning:
# optimizer.load_state_dict(checkpoint_with_state[self._optimizer_key])
# scheduler.load_state_dict(checkpoint_with_state[self._scheduler_key])
# Get checkpoint statistics without the state dict
checkpoint_stats = {
key: value for key, value in checkpoint_with_state.items() if key != self._model_key or key != self._optimizer_key or key != self._scheduler_key
}
return checkpoint_stats, filename
def restore_latest(self, directory, model_and_loss, optimizer, scheduler, include_params="*", exclude_params=()):
latest_checkpoint_filename = os.path.join(
directory, self._prefix + self._latest_postfix + self._extension)
return self.restore(latest_checkpoint_filename, model_and_loss, optimizer, scheduler, include_params, exclude_params)
def restore_best(self, directory, model_and_loss, optimizer, scheduler, include_params="*", exclude_params=()):
best_checkpoint_filename = os.path.join(
directory, self._prefix + self._best_postfix + self._extension)
return self.restore(best_checkpoint_filename, model_and_loss, optimizer, scheduler, include_params, exclude_params)
def save_latest(self, directory, model_and_loss, optimizer, scheduler, stats_dict, store_as_best=False):
# Make sure directory exists
tools.ensure_dir(directory)
# Save
save_dict = dict(stats_dict)
save_dict[self._model_key] = model_and_loss.state_dict()
save_dict[self._optimizer_key] = optimizer.state_dict()
save_dict[self._scheduler_key] = scheduler.state_dict()
latest_checkpoint_filename = os.path.join(directory, self._prefix + self._latest_postfix + self._extension)
torch.save(save_dict, latest_checkpoint_filename)
# Possibly store as best
if store_as_best:
best_checkpoint_filename = os.path.join(directory, self._prefix + self._best_postfix + self._extension)
logging.info("Saved checkpoint as best model..")
shutil.copyfile(latest_checkpoint_filename, best_checkpoint_filename)
def configure_checkpoint_saver(args, model_and_loss, optimizer, scheduler):
with logger.LoggingBlock("Checkpoint", emph=True):
checkpoint_saver = CheckpointSaver(args)
checkpoint_stats = None
if args.checkpoint is None:
logging.info("No checkpoint given.")
logging.info("Starting from scratch with random initialization.")
elif os.path.isfile(args.checkpoint):
checkpoint_stats, filename = checkpoint_saver.restore(
filename=args.checkpoint,
model_and_loss=model_and_loss,
optimizer=optimizer,
scheduler=scheduler,
include_params=args.checkpoint_include_params,
exclude_params=args.checkpoint_exclude_params)
elif os.path.isdir(args.checkpoint):
if args.checkpoint_mode in ["resume_from_best"]:
logging.info("Loading best checkpoint in %s" % args.checkpoint)
checkpoint_stats, filename = checkpoint_saver.restore_best(
directory=args.checkpoint,
model_and_loss=model_and_loss,
optimizer=optimizer,
scheduler=scheduler,
include_params=args.checkpoint_include_params,
exclude_params=args.checkpoint_exclude_params)
elif args.checkpoint_mode in ["resume_from_latest"]:
logging.info("Loading latest checkpoint in %s" % args.checkpoint)
checkpoint_stats, filename = checkpoint_saver.restore_latest(
directory=args.checkpoint,
model_and_loss=model_and_loss,
optimizer=optimizer,
scheduler=scheduler,
include_params=args.checkpoint_include_params,
exclude_params=args.checkpoint_exclude_params)
else:
logging.info("Unknown checkpoint_restore '%s' given!" % args.checkpoint_restore)
quit()
else:
logging.info("Could not find checkpoint file or directory '%s'" % args.checkpoint)
quit()
return checkpoint_saver, checkpoint_stats
# Configure data loading
def configure_data_loaders(args):
with logger.LoggingBlock("Datasets", emph=True):
def _sizes_to_str(value):
if np.isscalar(value):
return '[1L]'
else:
return ' '.join([str([d for d in value.size()])])
def _log_statistics(dataset, prefix, name):
with logger.LoggingBlock("%s Dataset: %s" % (prefix, name)):
example_dict = dataset[0] # get sizes from first dataset example
for key, value in sorted(example_dict.items()):
if key in ["index", "basename"]: # no need to display these
continue
if isinstance(value, str):
logging.info("{}: {}".format(key, value))
else:
logging.info("%s: %s" % (key, _sizes_to_str(value)))
logging.info("num_examples: %i" % len(dataset))
# GPU parameters
gpuargs = {"num_workers": args.num_workers, "pin_memory": True} if args.cuda else {}
train_loader = None
validation_loader = None
# Training dataset
if args.training_dataset is not None:
# Figure out training_dataset arguments
kwargs = tools.kwargs_from_args(args, "training_dataset")
kwargs["is_cropped"] = True
kwargs["args"] = args
# Create training dataset
train_dataset = tools.instance_from_kwargs(args.training_dataset_class, kwargs)
# Create training loader
if args.training_dataset == 'KITTI_Comb_Multi_Train' or args.training_dataset == 'KITTI_Comb_Multi_Full':
custom_batch_sampler = CustomBatchSampler_Multi([RandomSampler(train_dataset.dataset1), RandomSampler(train_dataset.dataset2)])
train_loader = DataLoader(dataset=train_dataset, batch_sampler=custom_batch_sampler, **gpuargs)
else:
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
**gpuargs)
_log_statistics(train_dataset, prefix="Training", name=args.training_dataset)
# Validation dataset
if args.validation_dataset is not None:
# Figure out validation_dataset arguments
kwargs = tools.kwargs_from_args(args, "validation_dataset")
kwargs["is_cropped"] = True
kwargs["args"] = args
# Create validation dataset
validation_dataset = tools.instance_from_kwargs(args.validation_dataset_class, kwargs)
# Create validation loader
validation_loader = DataLoader(
validation_dataset,
batch_size=args.batch_size_val,
shuffle=False,
drop_last=False,
**gpuargs)
_log_statistics(validation_dataset, prefix="Validation", name=args.validation_dataset)
return train_loader, validation_loader
# Generator for trainable parameters by pattern matching
def _print_trainable_params(model_and_loss, match="*"):
sum = 0
for name, p in model_and_loss.named_parameters():
if fnmatch.fnmatch(name, match):
if p.requires_grad:
logging.info(name)
logging.info(str(p.numel()))
print(name)
print(p.numel())
sum += p.numel()
logging.info(str(sum))
def _generate_trainable_params(model_and_loss, match="*"):
for name, p in model_and_loss.named_parameters():
if fnmatch.fnmatch(name, match):
if p.requires_grad:
yield p
def _param_names_and_trainable_generator(model_and_loss, match="*"):
names = []
for name, p in model_and_loss.named_parameters():
if fnmatch.fnmatch(name, match):
if p.requires_grad:
names.append(name)
return names, _generate_trainable_params(model_and_loss, match=match)
# Build optimizer:
def configure_optimizer(args, model_and_loss):
optimizer = None
with logger.LoggingBlock("Optimizer", emph=True):
if args.optimizer is not None:
if model_and_loss.num_parameters() == 0:
logging.info("No trainable parameters detected.")
logging.info("Setting optimizer to None.")
else:
logging.info(args.optimizer)
# Figure out all optimizer arguments
all_kwargs = tools.kwargs_from_args(args, "optimizer")
# Get the split of param groups
kwargs_without_groups = {
key: value for key,value in all_kwargs.items() if key != "group"
}
param_groups = all_kwargs["group"]
# Print arguments (without groups)
for param, default in sorted(kwargs_without_groups.items()):
logging.info("%s: %s" % (param, default))
# Construct actual optimizer params
kwargs = dict(kwargs_without_groups)
if param_groups is None:
# Add all trainable parameters if there is no param groups
all_trainable_parameters = _generate_trainable_params(model_and_loss)
kwargs["params"] = all_trainable_parameters
else:
# Add list of parameter groups instead
trainable_parameter_groups = []
dnames, dparams = _param_names_and_trainable_generator(model_and_loss)
dnames = set(dnames)
dparams = set(list(dparams))
with logger.LoggingBlock("parameter_groups:"):
for group in param_groups:
# log group settings
group_match = group["params"]
group_args = {
key: value for key, value in group.items() if key != "params"
}
with logger.LoggingBlock("%s: %s" % (group_match, group_args)):
# retrieve parameters by matching name
gnames, gparams = _param_names_and_trainable_generator(
model_and_loss, match=group_match)
# log all names affected
for n in sorted(gnames):
logging.info(n)
# set generator for group
group_args["params"] = gparams
# append parameter group
trainable_parameter_groups.append(group_args)
# update remaining trainable parameters
dnames -= set(gnames)
dparams -= set(list(gparams))
# append default parameter group
trainable_parameter_groups.append({"params": list(dparams)})
# and log its parameter names
with logger.LoggingBlock("default:"):
for dname in sorted(dnames):
logging.info(dname)
# set params in optimizer kwargs
kwargs["params"] = trainable_parameter_groups
# Create optimizer instance
optimizer = tools.instance_from_kwargs(args.optimizer_class, kwargs)
return optimizer
# Configure learning rate scheduler
def configure_lr_scheduler(args, optimizer):
lr_scheduler = None
with logger.LoggingBlock("Learning Rate Scheduler", emph=True):
logging.info("class: %s" % args.lr_scheduler)
if args.lr_scheduler is not None:
# Figure out lr_scheduler arguments
kwargs = tools.kwargs_from_args(args, "lr_scheduler")
# Print arguments
for param, default in sorted(kwargs.items()):
logging.info("%s: %s" % (param, default))
# Add optimizer
kwargs["optimizer"] = optimizer
# Create lr_scheduler instance
lr_scheduler = tools.instance_from_kwargs(args.lr_scheduler_class, kwargs)
return lr_scheduler
| 41.151803
| 156
| 0.608521
|
16889165513c682396dfd76bf180aced0791ddc2
| 9,724
|
py
|
Python
|
photutils/psf/utils.py
|
rosteen/photutils
|
5821bddc2d3fa2709b8de79c18efe99cff1ecb71
|
[
"BSD-3-Clause"
] | 167
|
2015-05-17T15:03:58.000Z
|
2022-03-23T13:31:33.000Z
|
photutils/psf/utils.py
|
rosteen/photutils
|
5821bddc2d3fa2709b8de79c18efe99cff1ecb71
|
[
"BSD-3-Clause"
] | 701
|
2015-01-05T11:47:12.000Z
|
2022-03-29T14:37:03.000Z
|
photutils/psf/utils.py
|
rosteen/photutils
|
5821bddc2d3fa2709b8de79c18efe99cff1ecb71
|
[
"BSD-3-Clause"
] | 119
|
2015-02-04T21:43:02.000Z
|
2022-02-15T10:55:13.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides utilities for PSF-fitting photometry.
"""
from astropy.table import QTable
from astropy.modeling.models import Const2D, Identity, Shift
from astropy.nddata.utils import add_array, extract_array
import numpy as np
__all__ = ['prepare_psf_model', 'get_grouped_psf_model', 'subtract_psf']
class _InverseShift(Shift):
@staticmethod
def evaluate(x, offset):
return x - offset
@staticmethod
def fit_deriv(x, *params):
"""
One dimensional Shift model derivative with respect to parameter.
"""
d_offset = -np.ones_like(x)
return [d_offset]
def prepare_psf_model(psfmodel, xname=None, yname=None, fluxname=None,
renormalize_psf=True):
"""
Convert a 2D PSF model to one suitable for use with
`BasicPSFPhotometry` or its subclasses.
.. note::
This function is needed only in special cases where the PSF
model does not have ``x_0``, ``y_0``, and ``flux`` model
parameters. In particular, it is not needed for any of the PSF
models provided by photutils (e.g., `~photutils.psf.EPSFModel`,
`~photutils.psf.IntegratedGaussianPRF`,
`~photutils.psf.FittableImageModel`,
`~photutils.psf.GriddedPSFModel`, etc).
Parameters
----------
psfmodel : `~astropy.modeling.Fittable2DModel`
The model to assume as representative of the PSF.
xname : `str` or `None`, optional
The name of the ``psfmodel`` parameter that corresponds to the
x-axis center of the PSF. If `None`, the model will be assumed
to be centered at x=0, and a new parameter will be added for the
offset.
yname : `str` or `None`, optional
The name of the ``psfmodel`` parameter that corresponds to the
y-axis center of the PSF. If `None`, the model will be assumed
to be centered at y=0, and a new parameter will be added for the
offset.
fluxname : `str` or `None`, optional
The name of the ``psfmodel`` parameter that corresponds to the
total flux of the star. If `None`, a scaling factor will be
added to the model.
renormalize_psf : bool, optional
If `True`, the model will be integrated from -inf to inf and
rescaled so that the total integrates to 1. Note that this
renormalization only occurs *once*, so if the total flux of
``psfmodel`` depends on position, this will *not* be correct.
Returns
-------
result : `~astropy.modeling.Fittable2DModel`
A new model ready to be passed into `BasicPSFPhotometry` or its
subclasses.
"""
if xname is None:
xinmod = _InverseShift(0, name='x_offset')
xname = 'offset_0'
else:
xinmod = Identity(1)
xname = xname + '_2'
xinmod.fittable = True
if yname is None:
yinmod = _InverseShift(0, name='y_offset')
yname = 'offset_1'
else:
yinmod = Identity(1)
yname = yname + '_2'
yinmod.fittable = True
outmod = (xinmod & yinmod) | psfmodel.copy()
if fluxname is None:
outmod = outmod * Const2D(1, name='flux_scaling')
fluxname = 'amplitude_3'
else:
fluxname = fluxname + '_2'
if renormalize_psf:
# we do the import here because other machinery works w/o scipy
from scipy import integrate
integrand = integrate.dblquad(psfmodel, -np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)[0]
normmod = Const2D(1./integrand, name='renormalize_scaling')
outmod = outmod * normmod
# final setup of the output model - fix all the non-offset/scale
# parameters
for pnm in outmod.param_names:
outmod.fixed[pnm] = pnm not in (xname, yname, fluxname)
# and set the names so that BasicPSFPhotometry knows what to do
outmod.xname = xname
outmod.yname = yname
outmod.fluxname = fluxname
# now some convenience aliases if reasonable
outmod.psfmodel = outmod[2]
if 'x_0' not in outmod.param_names and 'y_0' not in outmod.param_names:
outmod.x_0 = getattr(outmod, xname)
outmod.y_0 = getattr(outmod, yname)
if 'flux' not in outmod.param_names:
outmod.flux = getattr(outmod, fluxname)
return outmod
def get_grouped_psf_model(template_psf_model, star_group, pars_to_set):
"""
Construct a joint PSF model which consists of a sum of PSF's templated on
a specific model, but whose parameters are given by a table of objects.
Parameters
----------
template_psf_model : `astropy.modeling.Fittable2DModel` instance
The model to use for *individual* objects. Must have parameters named
``x_0``, ``y_0``, and ``flux``.
star_group : `~astropy.table.Table`
Table of stars for which the compound PSF will be constructed. It
must have columns named ``x_0``, ``y_0``, and ``flux_0``.
pars_to_set : `dict`
A dictionary of parameter names and values to set.
Returns
-------
group_psf
An `astropy.modeling` ``CompoundModel`` instance which is a sum of the
given PSF models.
"""
group_psf = None
for index, star in enumerate(star_group):
psf_to_add = template_psf_model.copy()
# we 'tag' the model here so that later we don't have to rely
# on possibly mangled names of the compound model to find
# the parameters again
psf_to_add.name = index
for param_tab_name, param_name in pars_to_set.items():
setattr(psf_to_add, param_name, star[param_tab_name])
if group_psf is None:
# this is the first one only
group_psf = psf_to_add
else:
group_psf = group_psf + psf_to_add
return group_psf
def _extract_psf_fitting_names(psf):
"""
Determine the names of the x coordinate, y coordinate, and flux from
a model. Returns (xname, yname, fluxname)
"""
if hasattr(psf, 'xname'):
xname = psf.xname
elif 'x_0' in psf.param_names:
xname = 'x_0'
else:
raise ValueError('Could not determine x coordinate name for '
'psf_photometry.')
if hasattr(psf, 'yname'):
yname = psf.yname
elif 'y_0' in psf.param_names:
yname = 'y_0'
else:
raise ValueError('Could not determine y coordinate name for '
'psf_photometry.')
if hasattr(psf, 'fluxname'):
fluxname = psf.fluxname
elif 'flux' in psf.param_names:
fluxname = 'flux'
else:
raise ValueError('Could not determine flux name for psf_photometry.')
return xname, yname, fluxname
def _call_fitter(fitter, psf, x, y, data, weights):
"""
Not all fitters have to support a weight array. This function
includes the weight in the fitter call only if really needed.
"""
if np.all(weights == 1.):
return fitter(psf, x, y, data)
else:
return fitter(psf, x, y, data, weights=weights)
def subtract_psf(data, psf, posflux, subshape=None):
"""
Subtract PSF/PRFs from an image.
Parameters
----------
data : `~astropy.nddata.NDData` or array (must be 2D)
Image data.
psf : `astropy.modeling.Fittable2DModel` instance
PSF/PRF model to be subtracted from the data.
posflux : Array-like of shape (3, N) or `~astropy.table.Table`
Positions and fluxes for the objects to subtract. If an array,
it is interpreted as ``(x, y, flux)`` If a table, the columns
'x_fit', 'y_fit', and 'flux_fit' must be present.
subshape : length-2 or None
The shape of the region around the center of the location to
subtract the PSF from. If None, subtract from the whole image.
Returns
-------
subdata : same shape and type as ``data``
The image with the PSF subtracted
"""
if data.ndim != 2:
raise ValueError(f'{data.ndim}-d array not supported. Only 2-d '
'arrays can be passed to subtract_psf.')
# translate array input into table
if hasattr(posflux, 'colnames'):
if 'x_fit' not in posflux.colnames:
raise ValueError('Input table does not have x_fit')
if 'y_fit' not in posflux.colnames:
raise ValueError('Input table does not have y_fit')
if 'flux_fit' not in posflux.colnames:
raise ValueError('Input table does not have flux_fit')
else:
posflux = QTable(names=['x_fit', 'y_fit', 'flux_fit'], data=posflux)
# Set up constants across the loop
psf = psf.copy()
xname, yname, fluxname = _extract_psf_fitting_names(psf)
indices = np.indices(data.shape)
subbeddata = data.copy()
if subshape is None:
indicies_reversed = indices[::-1]
for row in posflux:
getattr(psf, xname).value = row['x_fit']
getattr(psf, yname).value = row['y_fit']
getattr(psf, fluxname).value = row['flux_fit']
subbeddata -= psf(*indicies_reversed)
else:
for row in posflux:
x_0, y_0 = row['x_fit'], row['y_fit']
# float dtype needed for fill_value=np.nan
y = extract_array(indices[0].astype(float), subshape, (y_0, x_0))
x = extract_array(indices[1].astype(float), subshape, (y_0, x_0))
getattr(psf, xname).value = x_0
getattr(psf, yname).value = y_0
getattr(psf, fluxname).value = row['flux_fit']
subbeddata = add_array(subbeddata, -psf(x, y), (y_0, x_0))
return subbeddata
| 33.647059
| 78
| 0.627725
|
636d063eb96a6e50cfb06a2a8cd3a7574710b97f
| 8,084
|
py
|
Python
|
appGUI/preferences/tools/Tools2QRCodePrefGroupUI.py
|
DannyPol/flatcam
|
25a8634d0658e98b7fae31a095f8bef40c1b3067
|
[
"MIT"
] | 1
|
2022-02-11T06:19:34.000Z
|
2022-02-11T06:19:34.000Z
|
appGUI/preferences/tools/Tools2QRCodePrefGroupUI.py
|
MRemy2/FlatCam
|
d4f941335ca8a8d5351aab23b396f99da06a9029
|
[
"MIT"
] | null | null | null |
appGUI/preferences/tools/Tools2QRCodePrefGroupUI.py
|
MRemy2/FlatCam
|
d4f941335ca8a8d5351aab23b396f99da06a9029
|
[
"MIT"
] | null | null | null |
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import Qt, QSettings
from appGUI.GUIElements import FCSpinner, RadioSet, FCTextArea, FCEntry, FCColorEntry
from appGUI.preferences.OptionsGroupUI import OptionsGroupUI
import gettext
import appTranslation as fcTranslate
import builtins
fcTranslate.apply_language('strings')
if '_' not in builtins.__dict__:
_ = gettext.gettext
settings = QSettings("Open Source", "FlatCAM")
if settings.contains("machinist"):
machinist_setting = settings.value('machinist', type=int)
else:
machinist_setting = 0
class Tools2QRCodePrefGroupUI(OptionsGroupUI):
def __init__(self, decimals=4, parent=None):
super(Tools2QRCodePrefGroupUI, self).__init__(self, parent=parent)
self.setTitle(str(_("QRCode Tool Options")))
self.decimals = decimals
# ## Parameters
self.qrlabel = QtWidgets.QLabel("<b>%s:</b>" % _("Parameters"))
self.qrlabel.setToolTip(
_("A tool to create a QRCode that can be inserted\n"
"into a selected Gerber file, or it can be exported as a file.")
)
self.layout.addWidget(self.qrlabel)
# ## Grid Layout
grid_lay = QtWidgets.QGridLayout()
self.layout.addLayout(grid_lay)
grid_lay.setColumnStretch(0, 0)
grid_lay.setColumnStretch(1, 1)
# VERSION #
self.version_label = QtWidgets.QLabel('%s:' % _("Version"))
self.version_label.setToolTip(
_("QRCode version can have values from 1 (21x21 boxes)\n"
"to 40 (177x177 boxes).")
)
self.version_entry = FCSpinner()
self.version_entry.set_range(1, 40)
self.version_entry.setWrapping(True)
grid_lay.addWidget(self.version_label, 1, 0)
grid_lay.addWidget(self.version_entry, 1, 1)
# ERROR CORRECTION #
self.error_label = QtWidgets.QLabel('%s:' % _("Error correction"))
self.error_label.setToolTip(
_("Parameter that controls the error correction used for the QR Code.\n"
"L = maximum 7%% errors can be corrected\n"
"M = maximum 15%% errors can be corrected\n"
"Q = maximum 25%% errors can be corrected\n"
"H = maximum 30%% errors can be corrected.")
)
self.error_radio = RadioSet([{'label': 'L', 'value': 'L'},
{'label': 'M', 'value': 'M'},
{'label': 'Q', 'value': 'Q'},
{'label': 'H', 'value': 'H'}])
self.error_radio.setToolTip(
_("Parameter that controls the error correction used for the QR Code.\n"
"L = maximum 7%% errors can be corrected\n"
"M = maximum 15%% errors can be corrected\n"
"Q = maximum 25%% errors can be corrected\n"
"H = maximum 30%% errors can be corrected.")
)
grid_lay.addWidget(self.error_label, 2, 0)
grid_lay.addWidget(self.error_radio, 2, 1)
# BOX SIZE #
self.bsize_label = QtWidgets.QLabel('%s:' % _("Box Size"))
self.bsize_label.setToolTip(
_("Box size control the overall size of the QRcode\n"
"by adjusting the size of each box in the code.")
)
self.bsize_entry = FCSpinner()
self.bsize_entry.set_range(1, 9999)
self.bsize_entry.setWrapping(True)
grid_lay.addWidget(self.bsize_label, 3, 0)
grid_lay.addWidget(self.bsize_entry, 3, 1)
# BORDER SIZE #
self.border_size_label = QtWidgets.QLabel('%s:' % _("Border Size"))
self.border_size_label.setToolTip(
_("Size of the QRCode border. How many boxes thick is the border.\n"
"Default value is 4. The width of the clearance around the QRCode.")
)
self.border_size_entry = FCSpinner()
self.border_size_entry.set_range(1, 9999)
self.border_size_entry.setWrapping(True)
grid_lay.addWidget(self.border_size_label, 4, 0)
grid_lay.addWidget(self.border_size_entry, 4, 1)
# Text box
self.text_label = QtWidgets.QLabel('%s:' % _("QRCode Data"))
self.text_label.setToolTip(
_("QRCode Data. Alphanumeric text to be encoded in the QRCode.")
)
self.text_data = FCTextArea()
self.text_data.setPlaceholderText(
_("Add here the text to be included in the QRCode...")
)
grid_lay.addWidget(self.text_label, 5, 0)
grid_lay.addWidget(self.text_data, 6, 0, 1, 2)
# POLARITY CHOICE #
self.pol_label = QtWidgets.QLabel('%s:' % _("Polarity"))
self.pol_label.setToolTip(
_("Choose the polarity of the QRCode.\n"
"It can be drawn in a negative way (squares are clear)\n"
"or in a positive way (squares are opaque).")
)
self.pol_radio = RadioSet([{'label': _('Negative'), 'value': 'neg'},
{'label': _('Positive'), 'value': 'pos'}])
self.pol_radio.setToolTip(
_("Choose the type of QRCode to be created.\n"
"If added on a Silkscreen Gerber file the QRCode may\n"
"be added as positive. If it is added to a Copper Gerber\n"
"file then perhaps the QRCode can be added as negative.")
)
grid_lay.addWidget(self.pol_label, 7, 0)
grid_lay.addWidget(self.pol_radio, 7, 1)
# BOUNDING BOX TYPE #
self.bb_label = QtWidgets.QLabel('%s:' % _("Bounding Box"))
self.bb_label.setToolTip(
_("The bounding box, meaning the empty space that surrounds\n"
"the QRCode geometry, can have a rounded or a square shape.")
)
self.bb_radio = RadioSet([{'label': _('Rounded'), 'value': 'r'},
{'label': _('Square'), 'value': 's'}])
self.bb_radio.setToolTip(
_("The bounding box, meaning the empty space that surrounds\n"
"the QRCode geometry, can have a rounded or a square shape.")
)
grid_lay.addWidget(self.bb_label, 8, 0)
grid_lay.addWidget(self.bb_radio, 8, 1)
# FILL COLOR #
self.fill_color_label = QtWidgets.QLabel('%s:' % _('Fill Color'))
self.fill_color_label.setToolTip(
_("Set the QRCode fill color (squares color).")
)
self.fill_color_entry = FCColorEntry()
grid_lay.addWidget(self.fill_color_label, 9, 0)
grid_lay.addWidget(self.fill_color_entry, 9, 1)
# BACK COLOR #
self.back_color_label = QtWidgets.QLabel('%s:' % _('Back Color'))
self.back_color_label.setToolTip(
_("Set the QRCode background color.")
)
self.back_color_entry = FCColorEntry()
grid_lay.addWidget(self.back_color_label, 10, 0)
grid_lay.addWidget(self.back_color_entry, 10, 1)
# Selection Limit
self.sel_limit_label = QtWidgets.QLabel('%s:' % _("Selection limit"))
self.sel_limit_label.setToolTip(
_("Set the number of selected geometry\n"
"items above which the utility geometry\n"
"becomes just a selection rectangle.\n"
"Increases the performance when moving a\n"
"large number of geometric elements.")
)
self.sel_limit_entry = FCSpinner()
self.sel_limit_entry.set_range(0, 9999)
grid_lay.addWidget(self.sel_limit_label, 11, 0)
grid_lay.addWidget(self.sel_limit_entry, 11, 1)
# self.layout.addStretch()
# QRCode Tool
self.fill_color_entry.editingFinished.connect(self.on_qrcode_fill_color_entry)
self.back_color_entry.editingFinished.connect(self.on_qrcode_back_color_entry)
def on_qrcode_fill_color_entry(self):
self.app.defaults['tools_qrcode_fill_color'] = self.fill_color_entry.get_value()
def on_qrcode_back_color_entry(self):
self.app.defaults['tools_qrcode_back_color'] = self.back_color_entry.get_value()
| 41.244898
| 88
| 0.609104
|
dcdb1e22bdece0a3fda7984a68d7b8edc62514a7
| 78,110
|
py
|
Python
|
release/scripts/startup/bl_ui/properties_data_modifier.py
|
awebneck/blender
|
bfbee8783138c87c0f805bcb69540f7391bf2ad3
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3
|
2019-09-16T10:29:19.000Z
|
2022-02-11T14:43:18.000Z
|
release/scripts/startup/bl_ui/properties_data_modifier.py
|
awebneck/blender
|
bfbee8783138c87c0f805bcb69540f7391bf2ad3
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/startup/bl_ui/properties_data_modifier.py
|
awebneck/blender
|
bfbee8783138c87c0f805bcb69540f7391bf2ad3
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2019-09-05T05:11:15.000Z
|
2019-09-05T05:11:15.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Panel
from bpy.app.translations import pgettext_iface as iface_
class ModifierButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "modifier"
bl_options = {'HIDE_HEADER'}
class DATA_PT_modifiers(ModifierButtonsPanel, Panel):
bl_label = "Modifiers"
@classmethod
def poll(cls, context):
ob = context.object
return ob and ob.type != 'GPENCIL'
def draw(self, context):
layout = self.layout
ob = context.object
layout.operator_menu_enum("object.modifier_add", "type")
for md in ob.modifiers:
box = layout.template_modifier(md)
if box:
# match enum type to our functions, avoids a lookup table.
getattr(self, md.type)(box, ob, md)
# the mt.type enum is (ab)used for a lookup on function names
# ...to avoid lengthy if statements
# so each type must have a function here.
def ARMATURE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
col.prop(md, "use_deform_preserve_volume")
col = split.column()
col.label(text="Bind To:")
col.prop(md, "use_vertex_groups", text="Vertex Groups")
col.prop(md, "use_bone_envelopes", text="Bone Envelopes")
layout.separator()
split = layout.split()
row = split.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
split.prop(md, "use_multi_modifier")
def ARRAY(self, layout, _ob, md):
layout.prop(md, "fit_type")
if md.fit_type == 'FIXED_COUNT':
layout.prop(md, "count")
elif md.fit_type == 'FIT_LENGTH':
layout.prop(md, "fit_length")
elif md.fit_type == 'FIT_CURVE':
layout.prop(md, "curve")
layout.separator()
split = layout.split()
col = split.column()
col.prop(md, "use_constant_offset")
sub = col.column()
sub.active = md.use_constant_offset
sub.prop(md, "constant_offset_displace", text="")
col.separator()
col.prop(md, "use_merge_vertices", text="Merge")
sub = col.column()
sub.active = md.use_merge_vertices
sub.prop(md, "use_merge_vertices_cap", text="First Last")
sub.prop(md, "merge_threshold", text="Distance")
col = split.column()
col.prop(md, "use_relative_offset")
sub = col.column()
sub.active = md.use_relative_offset
sub.prop(md, "relative_offset_displace", text="")
col.separator()
col.prop(md, "use_object_offset")
sub = col.column()
sub.active = md.use_object_offset
sub.prop(md, "offset_object", text="")
row = layout.row()
split = row.split()
col = split.column()
col.label(text="UVs:")
sub = col.column(align=True)
sub.prop(md, "offset_u")
sub.prop(md, "offset_v")
layout.separator()
layout.prop(md, "start_cap")
layout.prop(md, "end_cap")
def BEVEL(self, layout, ob, md):
split = layout.split()
col = split.column()
if md.offset_type == 'PERCENT':
col.prop(md, "width_pct")
else:
col.prop(md, "width")
col.prop(md, "segments")
col.prop(md, "profile")
col.prop(md, "material")
col = split.column()
col.prop(md, "use_only_vertices")
col.prop(md, "use_clamp_overlap")
col.prop(md, "loop_slide")
col.prop(md, "mark_seam")
col.prop(md, "mark_sharp")
col.prop(md, "harden_normals")
layout.label(text="Limit Method:")
layout.row().prop(md, "limit_method", expand=True)
if md.limit_method == 'ANGLE':
layout.prop(md, "angle_limit")
elif md.limit_method == 'VGROUP':
layout.label(text="Vertex Group:")
layout.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
layout.label(text="Width Method:")
layout.row().prop(md, "offset_type", expand=True)
layout.label(text="Set Face Strength Mode")
layout.row().prop(md, "face_strength_mode", expand=True)
layout.label(text="Miter Patterns")
layout.row().prop(md, "miter_outer")
layout.row().prop(md, "miter_inner")
layout.row().prop(md, "spread")
def BOOLEAN(self, layout, _ob, md):
split = layout.split()
col = split.column()
col.label(text="Operation:")
col.prop(md, "operation", text="")
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
layout.prop(md, "double_threshold")
if bpy.app.debug:
layout.prop(md, "debug_options")
def BUILD(self, layout, _ob, md):
split = layout.split()
col = split.column()
col.prop(md, "frame_start")
col.prop(md, "frame_duration")
col.prop(md, "use_reverse")
col = split.column()
col.prop(md, "use_random_order")
sub = col.column()
sub.active = md.use_random_order
sub.prop(md, "seed")
def MESH_CACHE(self, layout, _ob, md):
layout.prop(md, "cache_format")
layout.prop(md, "filepath")
if md.cache_format == 'ABC':
layout.prop(md, "sub_object")
layout.label(text="Evaluation:")
layout.prop(md, "factor", slider=True)
layout.prop(md, "deform_mode")
layout.prop(md, "interpolation")
layout.label(text="Time Mapping:")
row = layout.row()
row.prop(md, "time_mode", expand=True)
row = layout.row()
row.prop(md, "play_mode", expand=True)
if md.play_mode == 'SCENE':
layout.prop(md, "frame_start")
layout.prop(md, "frame_scale")
else:
time_mode = md.time_mode
if time_mode == 'FRAME':
layout.prop(md, "eval_frame")
elif time_mode == 'TIME':
layout.prop(md, "eval_time")
elif time_mode == 'FACTOR':
layout.prop(md, "eval_factor")
layout.label(text="Axis Mapping:")
split = layout.split(factor=0.5, align=True)
split.alert = (md.forward_axis[-1] == md.up_axis[-1])
split.label(text="Forward/Up Axis:")
split.prop(md, "forward_axis", text="")
split.prop(md, "up_axis", text="")
split = layout.split(factor=0.5)
split.label(text="Flip Axis:")
row = split.row()
row.prop(md, "flip_axis")
def MESH_SEQUENCE_CACHE(self, layout, ob, md):
layout.label(text="Cache File Properties:")
box = layout.box()
box.template_cache_file(md, "cache_file")
cache_file = md.cache_file
layout.label(text="Modifier Properties:")
box = layout.box()
if cache_file is not None:
box.prop_search(md, "object_path", cache_file, "object_paths")
if ob.type == 'MESH':
box.row().prop(md, "read_data")
def CAST(self, layout, ob, md):
split = layout.split(factor=0.25)
split.label(text="Cast Type:")
split.prop(md, "cast_type", text="")
split = layout.split(factor=0.25)
col = split.column()
col.prop(md, "use_x")
col.prop(md, "use_y")
col.prop(md, "use_z")
col = split.column()
col.prop(md, "factor")
col.prop(md, "radius")
col.prop(md, "size")
col.prop(md, "use_radius_as_size")
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col = split.column()
col.label(text="Control Object:")
col.prop(md, "object", text="")
if md.object:
col.prop(md, "use_transform")
def CLOTH(self, layout, _ob, _md):
layout.label(text="Settings are inside the Physics tab")
def COLLISION(self, layout, _ob, _md):
layout.label(text="Settings are inside the Physics tab")
def CURVE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
layout.label(text="Deformation Axis:")
layout.row().prop(md, "deform_axis", expand=True)
def DECIMATE(self, layout, ob, md):
decimate_type = md.decimate_type
row = layout.row()
row.prop(md, "decimate_type", expand=True)
if decimate_type == 'COLLAPSE':
has_vgroup = bool(md.vertex_group)
layout.prop(md, "ratio")
split = layout.split()
col = split.column()
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
layout_info = col
col = split.column()
row = col.row()
row.active = has_vgroup
row.prop(md, "vertex_group_factor")
col.prop(md, "use_collapse_triangulate")
row = col.split(factor=0.75)
row.prop(md, "use_symmetry")
row.prop(md, "symmetry_axis", text="")
elif decimate_type == 'UNSUBDIV':
layout.prop(md, "iterations")
layout_info = layout
else: # decimate_type == 'DISSOLVE':
layout.prop(md, "angle_limit")
layout.prop(md, "use_dissolve_boundaries")
layout.label(text="Delimit:")
row = layout.row()
row.prop(md, "delimit")
layout_info = layout
layout_info.label(
text=iface_("Face Count: {:,}".format(md.face_count)),
translate=False,
)
def DISPLACE(self, layout, ob, md):
has_texture = (md.texture is not None)
col = layout.column(align=True)
col.label(text="Texture:")
col.template_ID(md, "texture", new="texture.new")
split = layout.split()
col = split.column(align=True)
col.label(text="Direction:")
col.prop(md, "direction", text="")
if md.direction in {'X', 'Y', 'Z', 'RGB_TO_XYZ'}:
col.label(text="Space:")
col.prop(md, "space", text="")
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col = split.column(align=True)
col.active = has_texture
col.label(text="Texture Coordinates:")
col.prop(md, "texture_coords", text="")
if md.texture_coords == 'OBJECT':
col.label(text="Object:")
col.prop(md, "texture_coords_object", text="")
elif md.texture_coords == 'UV' and ob.type == 'MESH':
col.label(text="UV Map:")
col.prop_search(md, "uv_layer", ob.data, "uv_layers", text="")
layout.separator()
row = layout.row()
row.prop(md, "mid_level")
row.prop(md, "strength")
def DYNAMIC_PAINT(self, layout, _ob, _md):
layout.label(text="Settings are inside the Physics tab")
def EDGE_SPLIT(self, layout, _ob, md):
split = layout.split()
col = split.column()
col.prop(md, "use_edge_angle", text="Edge Angle")
sub = col.column()
sub.active = md.use_edge_angle
sub.prop(md, "split_angle")
split.prop(md, "use_edge_sharp", text="Sharp Edges")
def EXPLODE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = col.column()
sub.active = bool(md.vertex_group)
sub.prop(md, "protect")
col.label(text="Particle UV")
col.prop_search(md, "particle_uv", ob.data, "uv_layers", text="")
col = split.column()
col.prop(md, "use_edge_cut")
col.prop(md, "show_unborn")
col.prop(md, "show_alive")
col.prop(md, "show_dead")
col.prop(md, "use_size")
layout.operator("object.explode_refresh", text="Refresh")
def FLUID_SIMULATION(self, layout, _ob, _md):
layout.label(text="Settings are inside the Physics tab")
def HOOK(self, layout, ob, md):
use_falloff = (md.falloff_type != 'NONE')
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
if md.object and md.object.type == 'ARMATURE':
col.label(text="Bone:")
col.prop_search(md, "subtarget", md.object.data, "bones", text="")
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
layout.separator()
row = layout.row(align=True)
if use_falloff:
row.prop(md, "falloff_radius")
row.prop(md, "strength", slider=True)
layout.prop(md, "falloff_type")
col = layout.column()
if use_falloff:
if md.falloff_type == 'CURVE':
col.template_curve_mapping(md, "falloff_curve")
split = layout.split()
col = split.column()
col.prop(md, "use_falloff_uniform")
if ob.mode == 'EDIT':
row = col.row(align=True)
row.operator("object.hook_reset", text="Reset")
row.operator("object.hook_recenter", text="Recenter")
row = layout.row(align=True)
row.operator("object.hook_select", text="Select")
row.operator("object.hook_assign", text="Assign")
def LAPLACIANDEFORM(self, layout, ob, md):
is_bind = md.is_bind
layout.prop(md, "iterations")
row = layout.row()
row.active = not is_bind
row.label(text="Anchors Vertex Group:")
row = layout.row()
row.enabled = not is_bind
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
layout.separator()
row = layout.row()
row.enabled = bool(md.vertex_group)
row.operator("object.laplaciandeform_bind", text="Unbind" if is_bind else "Bind")
def LAPLACIANSMOOTH(self, layout, ob, md):
layout.prop(md, "iterations")
split = layout.split(factor=0.25)
col = split.column()
col.label(text="Axis:")
col.prop(md, "use_x")
col.prop(md, "use_y")
col.prop(md, "use_z")
col = split.column()
col.label(text="Lambda:")
col.prop(md, "lambda_factor", text="Factor")
col.prop(md, "lambda_border", text="Border")
col.separator()
col.prop(md, "use_volume_preserve")
col.prop(md, "use_normalized")
layout.label(text="Vertex Group:")
layout.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
def LATTICE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
layout.separator()
layout.prop(md, "strength", slider=True)
def MASK(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Mode:")
col.prop(md, "mode", text="")
col = split.column()
if md.mode == 'ARMATURE':
col.label(text="Armature:")
row = col.row(align=True)
row.prop(md, "armature", text="")
sub = row.row(align=True)
sub.active = (md.armature is not None)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
elif md.mode == 'VERTEX_GROUP':
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.prop(md, "threshold")
def MESH_DEFORM(self, layout, ob, md):
split = layout.split()
col = split.column()
col.enabled = not md.is_bound
col.label(text="Object:")
col.prop(md, "object", text="")
col = split.column()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
layout.separator()
row = layout.row()
row.enabled = not md.is_bound
row.prop(md, "precision")
row.prop(md, "use_dynamic_bind")
layout.separator()
if md.is_bound:
layout.operator("object.meshdeform_bind", text="Unbind")
else:
layout.operator("object.meshdeform_bind", text="Bind")
def MIRROR(self, layout, _ob, md):
axis_text = "XYZ"
split = layout.split(factor=0.33)
col = split.column()
col.label(text="Axis:")
for i, text in enumerate(axis_text):
col.prop(md, "use_axis", text=text, index=i)
col = split.column()
col.label(text="Bisect:")
for i, text in enumerate(axis_text):
colsub = col.column()
colsub.prop(md, "use_bisect_axis", text=text, index=i)
colsub.active = md.use_axis[i]
col = split.column()
col.label(text="Flip:")
for i, text in enumerate(axis_text):
colsub = col.column()
colsub.prop(md, "use_bisect_flip_axis", text=text, index=i)
colsub.active = md.use_axis[i] and md.use_bisect_axis[i]
layout.separator()
col = layout.column()
col.label(text="Mirror Object:")
col.prop(md, "mirror_object", text="")
layout.separator()
col = layout.column()
col.label(text="Options:")
row = layout.row()
row.prop(md, "use_mirror_vertex_groups", text="Vertex Groups")
row.prop(md, "use_clip", text="Clipping")
row = layout.row()
row.prop(md, "use_mirror_merge", text="Merge")
col = layout.column()
if md.use_mirror_merge is True:
col.prop(md, "merge_threshold")
layout.separator()
col = layout.column()
col.label(text="Textures:")
row = layout.row()
row.prop(md, "use_mirror_u", text="Flip U")
row.prop(md, "use_mirror_v", text="Flip V")
col = layout.column(align=True)
if md.use_mirror_u:
col.prop(md, "mirror_offset_u")
if md.use_mirror_v:
col.prop(md, "mirror_offset_v")
col = layout.column(align=True)
col.prop(md, "offset_u")
col.prop(md, "offset_v")
def MULTIRES(self, layout, ob, md):
layout.row().prop(md, "subdivision_type", expand=True)
split = layout.split()
col = split.column()
col.prop(md, "levels", text="Preview")
col.prop(md, "sculpt_levels", text="Sculpt")
col.prop(md, "render_levels", text="Render")
col.prop(md, "quality")
col = split.column()
col.enabled = ob.mode != 'EDIT'
col.operator("object.multires_subdivide", text="Subdivide")
col.operator("object.multires_higher_levels_delete", text="Delete Higher")
col.operator("object.multires_reshape", text="Reshape")
col.operator("object.multires_base_apply", text="Apply Base")
col.prop(md, "uv_smooth", text="")
col.prop(md, "show_only_control_edges")
col.prop(md, "use_creases")
layout.separator()
col = layout.column()
row = col.row()
if md.is_external:
row.operator("object.multires_external_pack", text="Pack External")
row.label()
row = col.row()
row.prop(md, "filepath", text="")
else:
row.operator("object.multires_external_save", text="Save External...")
row.label()
def OCEAN(self, layout, _ob, md):
if not bpy.app.build_options.mod_oceansim:
layout.label(text="Built without OceanSim modifier")
return
layout.prop(md, "geometry_mode")
if md.geometry_mode == 'GENERATE':
row = layout.row()
row.prop(md, "repeat_x")
row.prop(md, "repeat_y")
layout.separator()
split = layout.split()
col = split.column()
col.prop(md, "time")
col.prop(md, "depth")
col.prop(md, "random_seed")
col = split.column()
col.prop(md, "resolution")
col.prop(md, "size")
col.prop(md, "spatial_size")
layout.label(text="Waves:")
split = layout.split()
col = split.column()
col.prop(md, "choppiness")
col.prop(md, "wave_scale", text="Scale")
col.prop(md, "wave_scale_min")
col.prop(md, "wind_velocity")
col = split.column()
col.prop(md, "wave_alignment", text="Alignment")
sub = col.column()
sub.active = (md.wave_alignment > 0.0)
sub.prop(md, "wave_direction", text="Direction")
sub.prop(md, "damping")
layout.separator()
layout.prop(md, "use_normals")
split = layout.split()
col = split.column()
col.prop(md, "use_foam")
sub = col.row()
sub.active = md.use_foam
sub.prop(md, "foam_coverage", text="Coverage")
col = split.column()
col.active = md.use_foam
col.label(text="Foam Data Layer Name:")
col.prop(md, "foam_layer_name", text="")
layout.separator()
if md.is_cached:
layout.operator("object.ocean_bake", text="Delete Bake").free = True
else:
layout.operator("object.ocean_bake").free = False
split = layout.split()
split.enabled = not md.is_cached
col = split.column(align=True)
col.prop(md, "frame_start", text="Start")
col.prop(md, "frame_end", text="End")
col = split.column(align=True)
col.label(text="Cache path:")
col.prop(md, "filepath", text="")
split = layout.split()
split.enabled = not md.is_cached
col = split.column()
col.active = md.use_foam
col.prop(md, "bake_foam_fade")
col = split.column()
def PARTICLE_INSTANCE(self, layout, ob, md):
layout.prop(md, "object")
if md.object:
layout.prop_search(md, "particle_system", md.object, "particle_systems", text="Particle System")
else:
layout.prop(md, "particle_system_index", text="Particle System")
split = layout.split()
col = split.column()
col.label(text="Create From:")
layout.prop(md, "space", text="")
col.prop(md, "use_normal")
col.prop(md, "use_children")
col.prop(md, "use_size")
col = split.column()
col.label(text="Show Particles When:")
col.prop(md, "show_alive")
col.prop(md, "show_unborn")
col.prop(md, "show_dead")
row = layout.row(align=True)
row.prop(md, "particle_amount", text="Amount")
row.prop(md, "particle_offset", text="Offset")
row = layout.row(align=True)
row.prop(md, "axis", expand=True)
layout.separator()
layout.prop(md, "use_path", text="Create Along Paths")
col = layout.column()
col.active = md.use_path
col.prop(md, "use_preserve_shape")
row = col.row(align=True)
row.prop(md, "position", slider=True)
row.prop(md, "random_position", text="Random", slider=True)
row = col.row(align=True)
row.prop(md, "rotation", slider=True)
row.prop(md, "random_rotation", text="Random", slider=True)
layout.separator()
col = layout.column()
col.prop_search(md, "index_layer_name", ob.data, "vertex_colors", text="Index Layer")
col.prop_search(md, "value_layer_name", ob.data, "vertex_colors", text="Value Layer")
def PARTICLE_SYSTEM(self, layout, _ob, _md):
layout.label(text="Settings can be found inside the Particle context")
def SCREW(self, layout, _ob, md):
split = layout.split()
col = split.column()
col.prop(md, "axis")
col.prop(md, "object", text="AxisOb")
col.prop(md, "angle")
col.prop(md, "steps")
col.prop(md, "render_steps")
col.prop(md, "use_smooth_shade")
col.prop(md, "use_merge_vertices")
sub = col.column()
sub.active = md.use_merge_vertices
sub.prop(md, "merge_threshold")
col = split.column()
row = col.row()
row.active = (md.object is None or md.use_object_screw_offset is False)
row.prop(md, "screw_offset")
row = col.row()
row.active = (md.object is not None)
row.prop(md, "use_object_screw_offset")
col.prop(md, "use_normal_calculate")
col.prop(md, "use_normal_flip")
col.prop(md, "iterations")
col.prop(md, "use_stretch_u")
col.prop(md, "use_stretch_v")
def SHRINKWRAP(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Target:")
col.prop(md, "target", text="")
col = split.column()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
split = layout.split()
col = split.column()
col.prop(md, "offset")
col = split.column()
col.label(text="Mode:")
col.prop(md, "wrap_method", text="")
if md.wrap_method in {'PROJECT', 'NEAREST_SURFACEPOINT', 'TARGET_PROJECT'}:
col.prop(md, "wrap_mode", text="")
if md.wrap_method == 'PROJECT':
split = layout.split()
col = split.column()
col.prop(md, "subsurf_levels")
col = split.column()
col.prop(md, "project_limit", text="Limit")
split = layout.split(factor=0.25)
col = split.column()
col.label(text="Axis:")
col.prop(md, "use_project_x")
col.prop(md, "use_project_y")
col.prop(md, "use_project_z")
col = split.column()
col.label(text="Direction:")
col.prop(md, "use_negative_direction")
col.prop(md, "use_positive_direction")
subcol = col.column()
subcol.active = md.use_negative_direction and md.cull_face != 'OFF'
subcol.prop(md, "use_invert_cull")
col = split.column()
col.label(text="Cull Faces:")
col.prop(md, "cull_face", expand=True)
layout.prop(md, "auxiliary_target")
def SIMPLE_DEFORM(self, layout, ob, md):
layout.row().prop(md, "deform_method", expand=True)
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
split = layout.split()
col = split.column()
col.label(text="Axis, Origin:")
col.prop(md, "origin", text="")
col.prop(md, "deform_axis")
if md.deform_method in {'TAPER', 'STRETCH', 'TWIST'}:
row = col.row(align=True)
row.label(text="Lock:")
deform_axis = md.deform_axis
if deform_axis != 'X':
row.prop(md, "lock_x")
if deform_axis != 'Y':
row.prop(md, "lock_y")
if deform_axis != 'Z':
row.prop(md, "lock_z")
col = split.column()
col.label(text="Deform:")
if md.deform_method in {'TAPER', 'STRETCH'}:
col.prop(md, "factor")
else:
col.prop(md, "angle")
col.prop(md, "limits", slider=True)
def SMOKE(self, layout, _ob, _md):
layout.label(text="Settings are inside the Physics tab")
def SMOOTH(self, layout, ob, md):
split = layout.split(factor=0.25)
col = split.column()
col.label(text="Axis:")
col.prop(md, "use_x")
col.prop(md, "use_y")
col.prop(md, "use_z")
col = split.column()
col.prop(md, "factor")
col.prop(md, "iterations")
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
def SOFT_BODY(self, layout, _ob, _md):
layout.label(text="Settings are inside the Physics tab")
def SOLIDIFY(self, layout, ob, md):
split = layout.split()
col = split.column()
col.prop(md, "thickness")
col.prop(md, "thickness_clamp")
col.separator()
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
sub = col.row()
sub.active = bool(md.vertex_group)
sub.prop(md, "thickness_vertex_group", text="Factor")
col.label(text="Crease:")
col.prop(md, "edge_crease_inner", text="Inner")
col.prop(md, "edge_crease_outer", text="Outer")
col.prop(md, "edge_crease_rim", text="Rim")
col = split.column()
col.prop(md, "offset")
col.prop(md, "use_flip_normals")
col.prop(md, "use_even_offset")
col.prop(md, "use_quality_normals")
col.prop(md, "use_rim")
col_rim = col.column()
col_rim.active = md.use_rim
col_rim.prop(md, "use_rim_only")
col.separator()
col.label(text="Material Index Offset:")
sub = col.column()
row = sub.split(factor=0.4, align=True)
row.prop(md, "material_offset", text="")
row = row.row(align=True)
row.active = md.use_rim
row.prop(md, "material_offset_rim", text="Rim")
def SUBSURF(self, layout, ob, md):
from bpy import context
layout.row().prop(md, "subdivision_type", expand=True)
split = layout.split()
col = split.column()
scene = context.scene
engine = context.engine
show_adaptive_options = (
engine == 'CYCLES' and md == ob.modifiers[-1] and
scene.cycles.feature_set == 'EXPERIMENTAL'
)
if show_adaptive_options:
col.label(text="Render:")
col.prop(ob.cycles, "use_adaptive_subdivision", text="Adaptive")
if ob.cycles.use_adaptive_subdivision:
col.prop(ob.cycles, "dicing_rate")
else:
col.prop(md, "render_levels", text="Levels")
col.separator()
col.label(text="Viewport:")
col.prop(md, "levels", text="Levels")
else:
col.label(text="Subdivisions:")
sub = col.column(align=True)
sub.prop(md, "render_levels", text="Render")
sub.prop(md, "levels", text="Viewport")
col.prop(md, "quality")
col = split.column()
col.label(text="Options:")
sub = col.column()
sub.active = (not show_adaptive_options) or (not ob.cycles.use_adaptive_subdivision)
sub.prop(md, "uv_smooth", text="")
col.prop(md, "show_only_control_edges")
col.prop(md, "use_creases")
if show_adaptive_options and ob.cycles.use_adaptive_subdivision:
col = layout.column(align=True)
col.scale_y = 0.6
col.separator()
col.label(text="Final Dicing Rate:")
col.separator()
render = max(scene.cycles.dicing_rate * ob.cycles.dicing_rate, 0.1)
preview = max(scene.cycles.preview_dicing_rate * ob.cycles.dicing_rate, 0.1)
col.label(text=f"Render {render:.2f} px, Preview {preview:.2f} px")
def SURFACE(self, layout, _ob, _md):
layout.label(text="Settings are inside the Physics tab")
def SURFACE_DEFORM(self, layout, _ob, md):
col = layout.column()
col.active = not md.is_bound
col.prop(md, "target")
col.prop(md, "falloff")
layout.separator()
col = layout.column()
if md.is_bound:
col.operator("object.surfacedeform_bind", text="Unbind")
else:
col.active = md.target is not None
col.operator("object.surfacedeform_bind", text="Bind")
def UV_PROJECT(self, layout, ob, md):
split = layout.split()
col = split.column()
col.prop_search(md, "uv_layer", ob.data, "uv_layers")
col.separator()
col.prop(md, "projector_count", text="Projectors")
for proj in md.projectors:
col.prop(proj, "object", text="")
col = split.column()
sub = col.column(align=True)
sub.prop(md, "aspect_x", text="Aspect X")
sub.prop(md, "aspect_y", text="Aspect Y")
sub = col.column(align=True)
sub.prop(md, "scale_x", text="Scale X")
sub.prop(md, "scale_y", text="Scale Y")
def WARP(self, layout, ob, md):
use_falloff = (md.falloff_type != 'NONE')
split = layout.split()
col = split.column()
col.label(text="From:")
col.prop(md, "object_from", text="")
col.prop(md, "use_volume_preserve")
col = split.column()
col.label(text="To:")
col.prop(md, "object_to", text="")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col = layout.column()
row = col.row(align=True)
row.prop(md, "strength")
if use_falloff:
row.prop(md, "falloff_radius")
col.prop(md, "falloff_type")
if use_falloff:
if md.falloff_type == 'CURVE':
col.template_curve_mapping(md, "falloff_curve")
# 2 new columns
split = layout.split()
col = split.column()
col.label(text="Texture:")
col.template_ID(md, "texture", new="texture.new")
col = split.column()
col.label(text="Texture Coordinates:")
col.prop(md, "texture_coords", text="")
if md.texture_coords == 'OBJECT':
layout.prop(md, "texture_coords_object", text="Object")
elif md.texture_coords == 'UV' and ob.type == 'MESH':
layout.prop_search(md, "uv_layer", ob.data, "uv_layers")
def WAVE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Motion:")
col.prop(md, "use_x")
col.prop(md, "use_y")
col.prop(md, "use_cyclic")
col = split.column()
col.prop(md, "use_normal")
sub = col.column()
sub.active = md.use_normal
sub.prop(md, "use_normal_x", text="X")
sub.prop(md, "use_normal_y", text="Y")
sub.prop(md, "use_normal_z", text="Z")
split = layout.split()
col = split.column()
col.label(text="Time:")
sub = col.column(align=True)
sub.prop(md, "time_offset", text="Offset")
sub.prop(md, "lifetime", text="Life")
col.prop(md, "damping_time", text="Damping")
col = split.column()
col.label(text="Position:")
sub = col.column(align=True)
sub.prop(md, "start_position_x", text="X")
sub.prop(md, "start_position_y", text="Y")
col.prop(md, "falloff_radius", text="Falloff")
layout.separator()
layout.prop(md, "start_position_object")
layout.prop_search(md, "vertex_group", ob, "vertex_groups")
split = layout.split(factor=0.33)
col = split.column()
col.label(text="Texture")
col = split.column()
col.template_ID(md, "texture", new="texture.new")
layout.prop(md, "texture_coords")
if md.texture_coords == 'UV' and ob.type == 'MESH':
layout.prop_search(md, "uv_layer", ob.data, "uv_layers")
elif md.texture_coords == 'OBJECT':
layout.prop(md, "texture_coords_object")
layout.separator()
split = layout.split()
col = split.column()
col.prop(md, "speed", slider=True)
col.prop(md, "height", slider=True)
col = split.column()
col.prop(md, "width", slider=True)
col.prop(md, "narrowness", slider=True)
def REMESH(self, layout, _ob, md):
if not bpy.app.build_options.mod_remesh:
layout.label(text="Built without Remesh modifier")
return
layout.prop(md, "mode")
row = layout.row()
row.prop(md, "octree_depth")
row.prop(md, "scale")
if md.mode == 'SHARP':
layout.prop(md, "sharpness")
layout.prop(md, "use_smooth_shade")
layout.prop(md, "use_remove_disconnected")
row = layout.row()
row.active = md.use_remove_disconnected
row.prop(md, "threshold")
@staticmethod
def vertex_weight_mask(layout, ob, md):
layout.label(text="Influence/Mask Options:")
split = layout.split(factor=0.4)
split.label(text="Global Influence:")
split.prop(md, "mask_constant", text="")
if not md.mask_texture:
split = layout.split(factor=0.4)
split.label(text="Vertex Group Mask:")
split.prop_search(md, "mask_vertex_group", ob, "vertex_groups", text="")
if not md.mask_vertex_group:
split = layout.split(factor=0.4)
split.label(text="Texture Mask:")
split.template_ID(md, "mask_texture", new="texture.new")
if md.mask_texture:
split = layout.split()
col = split.column()
col.label(text="Texture Coordinates:")
col.prop(md, "mask_tex_mapping", text="")
col = split.column()
col.label(text="Use Channel:")
col.prop(md, "mask_tex_use_channel", text="")
if md.mask_tex_mapping == 'OBJECT':
layout.prop(md, "mask_tex_map_object", text="Object")
elif md.mask_tex_mapping == 'UV' and ob.type == 'MESH':
layout.prop_search(md, "mask_tex_uv_layer", ob.data, "uv_layers")
def VERTEX_WEIGHT_EDIT(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col.label(text="Default Weight:")
col.prop(md, "default_weight", text="")
col = split.column()
col.prop(md, "use_add")
sub = col.column()
sub.active = md.use_add
sub.prop(md, "add_threshold")
col = col.column()
col.prop(md, "use_remove")
sub = col.column()
sub.active = md.use_remove
sub.prop(md, "remove_threshold")
layout.separator()
layout.prop(md, "falloff_type")
if md.falloff_type == 'CURVE':
layout.template_curve_mapping(md, "map_curve")
# Common mask options
layout.separator()
self.vertex_weight_mask(layout, ob, md)
def VERTEX_WEIGHT_MIX(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Vertex Group A:")
col.prop_search(md, "vertex_group_a", ob, "vertex_groups", text="")
col.label(text="Default Weight A:")
col.prop(md, "default_weight_a", text="")
col.label(text="Mix Mode:")
col.prop(md, "mix_mode", text="")
col = split.column()
col.label(text="Vertex Group B:")
col.prop_search(md, "vertex_group_b", ob, "vertex_groups", text="")
col.label(text="Default Weight B:")
col.prop(md, "default_weight_b", text="")
col.label(text="Mix Set:")
col.prop(md, "mix_set", text="")
# Common mask options
layout.separator()
self.vertex_weight_mask(layout, ob, md)
def VERTEX_WEIGHT_PROXIMITY(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col = split.column()
col.label(text="Target Object:")
col.prop(md, "target", text="")
split = layout.split()
col = split.column()
col.label(text="Distance:")
col.prop(md, "proximity_mode", text="")
if md.proximity_mode == 'GEOMETRY':
col.row().prop(md, "proximity_geometry")
col = split.column()
col.label()
col.prop(md, "min_dist")
col.prop(md, "max_dist")
layout.separator()
layout.prop(md, "falloff_type")
# Common mask options
layout.separator()
self.vertex_weight_mask(layout, ob, md)
def SKIN(self, layout, _ob, md):
row = layout.row()
row.operator("object.skin_armature_create", text="Create Armature")
row.operator("mesh.customdata_skin_add")
layout.separator()
row = layout.row(align=True)
row.prop(md, "branch_smoothing")
row.prop(md, "use_smooth_shade")
split = layout.split()
col = split.column()
col.label(text="Selected Vertices:")
sub = col.column(align=True)
sub.operator("object.skin_loose_mark_clear", text="Mark Loose").action = 'MARK'
sub.operator("object.skin_loose_mark_clear", text="Clear Loose").action = 'CLEAR'
sub = col.column()
sub.operator("object.skin_root_mark", text="Mark Root")
sub.operator("object.skin_radii_equalize", text="Equalize Radii")
col = split.column()
col.label(text="Symmetry Axes:")
col.prop(md, "use_x_symmetry")
col.prop(md, "use_y_symmetry")
col.prop(md, "use_z_symmetry")
def TRIANGULATE(self, layout, _ob, md):
row = layout.row()
col = row.column()
col.label(text="Quad Method:")
col.prop(md, "quad_method", text="")
col.prop(md, "keep_custom_normals")
col = row.column()
col.label(text="Ngon Method:")
col.prop(md, "ngon_method", text="")
col.label(text="Minimum Vertices:")
col.prop(md, "min_vertices", text="")
def UV_WARP(self, layout, ob, md):
split = layout.split()
col = split.column()
col.prop(md, "center")
col = split.column()
col.label(text="UV Axis:")
col.prop(md, "axis_u", text="")
col.prop(md, "axis_v", text="")
split = layout.split()
col = split.column()
col.label(text="From:")
col.prop(md, "object_from", text="")
col = split.column()
col.label(text="To:")
col.prop(md, "object_to", text="")
split = layout.split()
col = split.column()
obj = md.object_from
if obj and obj.type == 'ARMATURE':
col.label(text="Bone:")
col.prop_search(md, "bone_from", obj.data, "bones", text="")
col = split.column()
obj = md.object_to
if obj and obj.type == 'ARMATURE':
col.label(text="Bone:")
col.prop_search(md, "bone_to", obj.data, "bones", text="")
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col = split.column()
col.label(text="UV Map:")
col.prop_search(md, "uv_layer", ob.data, "uv_layers", text="")
def WIREFRAME(self, layout, ob, md):
has_vgroup = bool(md.vertex_group)
split = layout.split()
col = split.column()
col.prop(md, "thickness", text="Thickness")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = has_vgroup
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
row = col.row(align=True)
row.active = has_vgroup
row.prop(md, "thickness_vertex_group", text="Factor")
col.prop(md, "use_crease", text="Crease Edges")
row = col.row()
row.active = md.use_crease
row.prop(md, "crease_weight", text="Crease Weight")
col = split.column()
col.prop(md, "offset")
col.prop(md, "use_even_offset", text="Even Thickness")
col.prop(md, "use_relative_offset", text="Relative Thickness")
col.prop(md, "use_boundary", text="Boundary")
col.prop(md, "use_replace", text="Replace Original")
col.prop(md, "material_offset", text="Material Offset")
def DATA_TRANSFER(self, layout, ob, md):
row = layout.row(align=True)
row.prop(md, "object")
sub = row.row(align=True)
sub.active = bool(md.object)
sub.prop(md, "use_object_transform", text="", icon='GROUP')
layout.separator()
split = layout.split(factor=0.333)
split.prop(md, "use_vert_data")
use_vert = md.use_vert_data
row = split.row()
row.active = use_vert
row.prop(md, "vert_mapping", text="")
if use_vert:
col = layout.column(align=True)
split = col.split(factor=0.333, align=True)
sub = split.column(align=True)
sub.prop(md, "data_types_verts")
sub = split.column(align=True)
row = sub.row(align=True)
row.prop(md, "layers_vgroup_select_src", text="")
row.label(icon='RIGHTARROW')
row.prop(md, "layers_vgroup_select_dst", text="")
row = sub.row(align=True)
row.label(text="", icon='NONE')
layout.separator()
split = layout.split(factor=0.333)
split.prop(md, "use_edge_data")
use_edge = md.use_edge_data
row = split.row()
row.active = use_edge
row.prop(md, "edge_mapping", text="")
if use_edge:
col = layout.column(align=True)
split = col.split(factor=0.333, align=True)
sub = split.column(align=True)
sub.prop(md, "data_types_edges")
layout.separator()
split = layout.split(factor=0.333)
split.prop(md, "use_loop_data")
use_loop = md.use_loop_data
row = split.row()
row.active = use_loop
row.prop(md, "loop_mapping", text="")
if use_loop:
col = layout.column(align=True)
split = col.split(factor=0.333, align=True)
sub = split.column(align=True)
sub.prop(md, "data_types_loops")
sub = split.column(align=True)
row = sub.row(align=True)
row.label(text="", icon='NONE')
row = sub.row(align=True)
row.prop(md, "layers_vcol_select_src", text="")
row.label(icon='RIGHTARROW')
row.prop(md, "layers_vcol_select_dst", text="")
row = sub.row(align=True)
row.prop(md, "layers_uv_select_src", text="")
row.label(icon='RIGHTARROW')
row.prop(md, "layers_uv_select_dst", text="")
col.prop(md, "islands_precision")
layout.separator()
split = layout.split(factor=0.333)
split.prop(md, "use_poly_data")
use_poly = md.use_poly_data
row = split.row()
row.active = use_poly
row.prop(md, "poly_mapping", text="")
if use_poly:
col = layout.column(align=True)
split = col.split(factor=0.333, align=True)
sub = split.column(align=True)
sub.prop(md, "data_types_polys")
layout.separator()
split = layout.split()
col = split.column()
row = col.row(align=True)
sub = row.row(align=True)
sub.active = md.use_max_distance
sub.prop(md, "max_distance")
row.prop(md, "use_max_distance", text="", icon='STYLUS_PRESSURE')
col = split.column()
col.prop(md, "ray_radius")
layout.separator()
split = layout.split()
col = split.column()
col.prop(md, "mix_mode")
col.prop(md, "mix_factor")
col = split.column()
row = col.row()
row.active = bool(md.object)
row.operator("object.datalayout_transfer", text="Generate Data Layers")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
def NORMAL_EDIT(self, layout, ob, md):
has_vgroup = bool(md.vertex_group)
do_polynors_fix = not md.no_polynors_fix
needs_object_offset = (((md.mode == 'RADIAL') and not md.target) or
((md.mode == 'DIRECTIONAL') and md.use_direction_parallel))
row = layout.row()
row.prop(md, "mode", expand=True)
split = layout.split()
col = split.column()
col.prop(md, "target", text="")
sub = col.column(align=True)
sub.active = needs_object_offset
sub.prop(md, "offset")
row = col.row(align=True)
col = split.column()
row = col.row()
row.active = (md.mode == 'DIRECTIONAL')
row.prop(md, "use_direction_parallel")
subcol = col.column(align=True)
subcol.label(text="Mix Mode:")
subcol.prop(md, "mix_mode", text="")
subcol.prop(md, "mix_factor")
row = subcol.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = has_vgroup
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
row = subcol.row(align=True)
row.prop(md, "mix_limit")
row.prop(md, "no_polynors_fix", text="", icon='UNLOCKED' if do_polynors_fix else 'LOCKED')
def CORRECTIVE_SMOOTH(self, layout, ob, md):
is_bind = md.is_bind
layout.prop(md, "factor", text="Factor")
layout.prop(md, "iterations")
row = layout.row()
row.prop(md, "smooth_type")
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
col = split.column()
col.prop(md, "use_only_smooth")
col.prop(md, "use_pin_boundary")
layout.prop(md, "rest_source")
if md.rest_source == 'BIND':
layout.operator("object.correctivesmooth_bind", text="Unbind" if is_bind else "Bind")
def WEIGHTED_NORMAL(self, layout, ob, md):
layout.label(text="Weighting Mode:")
split = layout.split(align=True)
col = split.column(align=True)
col.prop(md, "mode", text="")
col.prop(md, "weight", text="Weight")
col.prop(md, "keep_sharp")
col = split.column(align=True)
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.active = bool(md.vertex_group)
row.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
col.prop(md, "thresh", text="Threshold")
col.prop(md, "face_influence")
class DATA_PT_gpencil_modifiers(ModifierButtonsPanel, Panel):
bl_label = "Modifiers"
def check_conflicts(self, layout, ob):
for md in ob.grease_pencil_modifiers:
if md.type == 'GP_TIME':
row = layout.row()
row.label(text="Build and Time Offset modifier not compatible", icon='ERROR')
break
@classmethod
def poll(cls, context):
ob = context.object
return ob and ob.type == 'GPENCIL'
def draw(self, context):
layout = self.layout
ob = context.object
layout.operator_menu_enum("object.gpencil_modifier_add", "type")
for md in ob.grease_pencil_modifiers:
box = layout.template_greasepencil_modifier(md)
if box:
# match enum type to our functions, avoids a lookup table.
getattr(self, md.type)(box, ob, md)
# the mt.type enum is (ab)used for a lookup on function names
# ...to avoid lengthy if statements
# so each type must have a function here.
def GP_NOISE(self, layout, ob, md):
gpd = ob.data
split = layout.split()
col = split.column()
row = col.row(align=True)
row.prop(md, "factor")
row.prop(md, "random", text="", icon='TIME', toggle=True)
row = col.row()
row.enabled = md.random
row.prop(md, "step")
row = col.row()
row.enabled = md.random
row.prop(md, "seed")
col.prop(md, "full_stroke")
col.prop(md, "move_extreme")
row = layout.row(align=True)
row.label(text="Affect:")
row = layout.row(align=True)
row.prop(md, "use_edit_position", text="Position", icon='MESH_DATA', toggle=True)
row.prop(md, "use_edit_strength", text="Strength", icon='COLOR', toggle=True)
row.prop(md, "use_edit_thickness", text="Thickness", icon='LINE_DATA', toggle=True)
row.prop(md, "use_edit_uv", text="UV", icon='MOD_UVPROJECT', toggle=True)
col = layout.column()
col.separator()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_SMOOTH(self, layout, ob, md):
gpd = ob.data
col = layout.column()
col.prop(md, "factor")
col.prop(md, "step")
col.label(text="Affect:")
row = col.row(align=True)
row.prop(md, "use_edit_position", text="Position", icon='MESH_DATA', toggle=True)
row.prop(md, "use_edit_strength", text="Strength", icon='COLOR', toggle=True)
row.prop(md, "use_edit_thickness", text="Thickness", icon='LINE_DATA', toggle=True)
row.prop(md, "use_edit_uv", text="UV", icon='MOD_UVPROJECT', toggle=True)
col.separator()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_SUBDIV(self, layout, ob, md):
gpd = ob.data
split = layout.split()
col = split.column()
row = col.row(align=True)
row.prop(md, "level")
row.prop(md, "simple", text="", icon='PARTICLE_POINT')
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_SIMPLIFY(self, layout, ob, md):
gpd = ob.data
row = layout.row()
row.prop(md, "mode")
split = layout.split()
col = split.column()
col.label(text="Settings:")
if md.mode == 'FIXED':
col.prop(md, "step")
elif md.mode == 'ADAPTIVE':
col.prop(md, "factor")
elif md.mode == 'SAMPLE':
col.prop(md, "length")
elif md.mode == 'MERGE':
col.prop(md, "distance")
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_THICK(self, layout, ob, md):
gpd = ob.data
split = layout.split()
col = split.column()
row = col.row(align=True)
row.prop(md, "thickness", text="Thickness Factor")
col.prop(md, "normalize_thickness")
if not md.normalize_thickness:
split = layout.split()
col = split.column()
col.prop(md, "use_custom_curve")
if md.use_custom_curve:
col.template_curve_mapping(md, "curve")
col = layout.column()
col.separator()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_TINT(self, layout, ob, md):
gpd = ob.data
split = layout.split()
col = split.column()
col.prop(md, "color")
col.prop(md, "factor")
row = layout.row()
row.prop(md, "create_materials")
row.prop(md, "modify_color")
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_TIME(self, layout, ob, md):
gpd = ob.data
row = layout.row()
row.prop(md, "mode", text="Mode")
row = layout.row()
if md.mode == 'FIX':
txt = "Frame"
else:
txt = "Frame Offset"
row.prop(md, "offset", text=txt)
row = layout.row()
row.enabled = md.mode != 'FIX'
row.prop(md, "frame_scale")
row = layout.row()
row.separator()
row = layout.row()
row.enabled = md.mode != 'FIX'
row.prop(md, "use_custom_frame_range")
row = layout.row(align=True)
row.enabled = md.mode != 'FIX' and md.use_custom_frame_range is True
row.prop(md, "frame_start")
row.prop(md, "frame_end")
row = layout.row()
row.enabled = md.mode != 'FIX'
row.prop(md, "use_keep_loop")
row = layout.row()
row.label(text="Layer:")
row = layout.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_COLOR(self, layout, ob, md):
gpd = ob.data
split = layout.split()
col = split.column()
col.label(text="Color:")
col.prop(md, "hue", text="H", slider=True)
col.prop(md, "saturation", text="S", slider=True)
col.prop(md, "value", text="V", slider=True)
row = layout.row()
row.prop(md, "create_materials")
row.prop(md, "modify_color")
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_OPACITY(self, layout, ob, md):
gpd = ob.data
split = layout.split()
col = split.column()
col.label(text="Opacity:")
col.prop(md, "factor")
row = layout.row()
row.prop(md, "opacity_mode", text="Mode")
if md.opacity_mode == 'MATERIAL':
row = layout.row()
row.prop(md, "create_materials")
row.prop(md, "modify_color", text="Change")
else:
col = layout.column()
col.separator()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_ARRAY(self, layout, ob, md):
gpd = ob.data
col = layout.column()
col.prop(md, "count")
split = layout.split()
col = split.column()
col.label(text="Offset:")
col.prop(md, "offset", text="")
col.prop(md, "offset_object", text="Object")
col = split.column()
col.label(text="Shift:")
col.prop(md, "shift", text="")
split = layout.split()
col = split.column()
col.label(text="Rotation:")
col.prop(md, "rotation", text="")
col.separator()
row = col.row(align=True)
row.prop(md, "random_rot", text="", icon='TIME', toggle=True)
row.prop(md, "rot_factor", text="")
col = split.column()
col.label(text="Scale:")
col.prop(md, "scale", text="")
col.separator()
row = col.row(align=True)
row.prop(md, "random_scale", text="", icon='TIME', toggle=True)
row.prop(md, "scale_factor", text="")
col = layout.column()
col.prop(md, "replace_material", text="Material")
col.prop(md, "keep_on_top", text="Keep original stroke on top")
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_BUILD(self, layout, ob, md):
gpd = ob.data
split = layout.split()
col = split.column()
self.check_conflicts(col, ob)
col.prop(md, "mode")
if md.mode == 'CONCURRENT':
col.prop(md, "concurrent_time_alignment")
col.separator()
col.prop(md, "transition")
sub = col.column(align=True)
sub.prop(md, "start_delay")
sub.prop(md, "length")
col = layout.column(align=True)
col.prop(md, "use_restrict_frame_range")
sub = col.column(align=True)
sub.active = md.use_restrict_frame_range
sub.prop(md, "frame_start", text="Start")
sub.prop(md, "frame_end", text="End")
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_LATTICE(self, layout, ob, md):
gpd = ob.data
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
layout.prop(md, "strength", slider=True)
col = layout.column()
col.separator()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_MIRROR(self, layout, ob, md):
gpd = ob.data
row = layout.row(align=True)
row.prop(md, "x_axis")
row.prop(md, "y_axis")
row.prop(md, "z_axis")
layout.label(text="Object:")
layout.prop(md, "object", text="")
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_HOOK(self, layout, ob, md):
gpd = ob.data
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
if md.object and md.object.type == 'ARMATURE':
col.label(text="Bone:")
col.prop_search(md, "subtarget", md.object.data, "bones", text="")
use_falloff = (md.falloff_type != 'NONE')
layout.separator()
row = layout.row(align=True)
if use_falloff:
row.prop(md, "falloff_radius")
row.prop(md, "strength", slider=True)
layout.prop(md, "falloff_type")
col = layout.column()
if use_falloff:
if md.falloff_type == 'CURVE':
col.template_curve_mapping(md, "falloff_curve")
split = layout.split()
col = split.column()
col.prop(md, "use_falloff_uniform")
col = layout.column()
col.separator()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_OFFSET(self, layout, ob, md):
gpd = ob.data
col = layout.column()
col.prop(md, "location")
col.prop(md, "scale")
col.prop(md, "rotation")
col = layout.column()
col.separator()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Material:")
row = col.row(align=True)
row.prop_search(md, "material", gpd, "materials", text="", icon='SHADING_TEXTURE')
row.prop(md, "invert_materials", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "pass_index", text="Pass")
row.prop(md, "invert_material_pass", text="", icon='ARROW_LEFTRIGHT')
col = layout.column()
col.separator()
col.label(text="Layer:")
row = col.row(align=True)
row.prop_search(md, "layer", gpd, "layers", text="", icon='GREASEPENCIL')
row.prop(md, "invert_layers", text="", icon='ARROW_LEFTRIGHT')
row = layout.row(align=True)
row.prop(md, "layer_pass", text="Pass")
row.prop(md, "invert_layer_pass", text="", icon='ARROW_LEFTRIGHT')
def GP_ARMATURE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
# col.prop(md, "use_deform_preserve_volume")
col = split.column()
col.label(text="Bind To:")
col.prop(md, "use_vertex_groups", text="Vertex Groups")
col.prop(md, "use_bone_envelopes", text="Bone Envelopes")
layout.separator()
row = layout.row(align=True)
row.label(text="Vertex Group:")
row = layout.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
classes = (
DATA_PT_modifiers,
DATA_PT_gpencil_modifiers,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 33.280784
| 108
| 0.569953
|
d54146806f5cab20064c2ab91efced62d8ebf1b0
| 3,223
|
py
|
Python
|
optimization-cloud-functions/energy-management/energy_management.py
|
josealvarez97/Scientific-Computing
|
b46b96a5566b34ece483c6e64a9fa43446d572df
|
[
"Unlicense"
] | null | null | null |
optimization-cloud-functions/energy-management/energy_management.py
|
josealvarez97/Scientific-Computing
|
b46b96a5566b34ece483c6e64a9fa43446d572df
|
[
"Unlicense"
] | null | null | null |
optimization-cloud-functions/energy-management/energy_management.py
|
josealvarez97/Scientific-Computing
|
b46b96a5566b34ece483c6e64a9fa43446d572df
|
[
"Unlicense"
] | null | null | null |
from cvxpower import *
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
# import graphviz
# import agraph
def optimize_static_network(json_body):
g = nx.DiGraph()
devices = []
for load in json_body["loads"]:
load_obj = FixedLoad(power=load["power"], name=load["name"])
g.add_node(load["name"], cvxp_obj = load_obj)
devices.append(load_obj)
for gen in json_body["generators"]:
gen_obj = Generator(power_max=gen["power_max"], alpha=gen["alpha"],
beta=gen["beta"], name=gen["name"])
g.add_node(gen_obj.name, cvxp_obj = gen_obj)
devices.append(gen_obj)
for line in json_body["lines"]:
line_obj = TransmissionLine(power_max=line["power_max"], name=line["name"])
g.add_node(line_obj.name, cvxp_obj = line_obj)
devices.append(line_obj)
nets = []
for net in json_body["nets"]:
net_terminals = [nx.get_node_attributes(g, "cvxp_obj")[terminal["device"]]
.terminals[terminal["terminal"]]
for terminal in net["terminals"]]
net_obj = Net(net_terminals, name=net["name"])
g.add_node(net_obj.name, cvxp_obj=net_obj)
for terminal in net["terminals"]:
g.add_edge(net["name"], terminal["device"])
nets.append(net_obj)
g_nodes = g.nodes()
color_map = []
for n in g:
if type(g_nodes[n]['cvxp_obj']) == FixedLoad:
color_map.append('lightgray')
elif type(g_nodes[n]['cvxp_obj']) == Generator:
color_map.append('lightgray')
elif type(g_nodes[n]['cvxp_obj']) == TransmissionLine:
color_map.append('lightgray')
elif type(g_nodes[n]['cvxp_obj']) == Net:
color_map.append('gray')
network = Group(devices,
nets)
network.init_problem()
network.optimize(solver="ECOS")
return network.results.summary()
json_body = {
"loads": [{"name":"load1", "power": 50}, {"name": "load2", "power": 100}],
"generators": [{"name": "gen1", "power_max":1000, "alpha": 0.02, "beta": 30},
{"name": "gen2", "power_max": 100, "alpha": 0.2, "beta": 0}],
"lines": [{"name": "line1", "power_max": 50}, {"name": "line2", "power_max": 10},
{"name": "line3", "power_max": 50}],
"nets": [{"name": "net1",
"terminals": [{"device": "load1", "terminal": 0},
{"device": "gen1", "terminal": 0},
{"device": "line1", "terminal": 0},
{"device": "line2", "terminal": 0}]},
{"name": "net2",
"terminals": [{"device": "load2", "terminal": 0},
{"device": "line1", "terminal": 1},
{"device": "line3", "terminal": 0}]},
{"name": "net3",
"terminals": [{"device": "gen2", "terminal": 0},
{"device": "line2", "terminal": 1},
{"device": "line3", "terminal": 1}]}]
}
def application():
result = optimize_static_network(json_body)
print(result)
if __name__ == '__main__':
application()
| 36.625
| 85
| 0.533975
|
860c7fb3eee48391f2cb4815a9745310b44fbe56
| 493
|
py
|
Python
|
modules/vtk_basic/vtkImageShiftScale.py
|
chrisidefix/devide
|
99bfe156e710fa47ba7ae88b0ce1eef592a3a439
|
[
"BSD-3-Clause"
] | 25
|
2015-08-24T16:05:14.000Z
|
2020-12-09T20:07:14.000Z
|
modules/vtk_basic/vtkImageShiftScale.py
|
chrisidefix/devide
|
99bfe156e710fa47ba7ae88b0ce1eef592a3a439
|
[
"BSD-3-Clause"
] | 1
|
2016-02-16T21:18:10.000Z
|
2016-02-16T21:18:10.000Z
|
modules/vtk_basic/vtkImageShiftScale.py
|
chrisidefix/devide
|
99bfe156e710fa47ba7ae88b0ce1eef592a3a439
|
[
"BSD-3-Clause"
] | 5
|
2016-02-16T20:05:37.000Z
|
2020-01-31T11:27:39.000Z
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageShiftScale(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageShiftScale(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| 37.923077
| 63
| 0.713996
|
7733ca2ec38a7b35ece34f46906655864bbc97e0
| 2,641
|
py
|
Python
|
basic/wordcount.py
|
ptmccarthy/google-python-exercises
|
d3193d743f36e8026e9851d59be39f77d16b21a6
|
[
"Apache-2.0"
] | null | null | null |
basic/wordcount.py
|
ptmccarthy/google-python-exercises
|
d3193d743f36e8026e9851d59be39f77d16b21a6
|
[
"Apache-2.0"
] | null | null | null |
basic/wordcount.py
|
ptmccarthy/google-python-exercises
|
d3193d743f36e8026e9851d59be39f77d16b21a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
def print_words(filename):
words = read_file_words(filename)
for word in sorted(words.keys()):
print word, words[word]
sys.exit(0)
def print_top(filename):
words = read_file_words(filename)
sorted_top_words = sorted(words.items(), key=getCount, reverse=True)
for word in sorted_top_words[:20]:
print word[0], word[1]
sys.exit(0)
def getCount(item):
return item[1]
def read_file_words(filename):
count = {}
f = open(filename, 'rU')
for line in f:
words = line.split()
for word in words:
word = word.lower()
if word not in count:
count[word] = 1
else:
count[word] += 1
f.close()
return count
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| 24.915094
| 79
| 0.673987
|
2461aee41de3e949c04f20c94b17deb97c2a6aff
| 4,266
|
py
|
Python
|
misc.py
|
Steve-Teal/eforth-misc16
|
1975068b33b2df5beb1c38efc27f4151d59bb766
|
[
"MIT"
] | 5
|
2021-12-28T23:50:21.000Z
|
2022-02-07T01:53:31.000Z
|
misc.py
|
Steve-Teal/misc-cpu-forth
|
f7f369b2c027775c087386b9c84019e78f125b1f
|
[
"MIT"
] | 1
|
2022-01-20T21:50:13.000Z
|
2022-01-20T21:50:13.000Z
|
misc.py
|
Steve-Teal/misc-cpu-forth
|
f7f369b2c027775c087386b9c84019e78f125b1f
|
[
"MIT"
] | null | null | null |
import sys
import miscsim
import miscasm
import time
def filename(extension):
ignore = True
for arg in sys.argv:
if not ignore and len(arg) > 4:
if arg[-4:].lower() == extension.lower():
return arg
ignore = False
return ""
def makemif(filename,image,length):
try:
file = open(filename,'wt')
except IOError:
print("Failed to open output file {:s}".format(filename))
sys.exit()
# Write header to file
file.write("DEPTH = {:d};\n".format(length))
file.write("WIDTH = 16;\n")
file.write("ADDRESS_RADIX = HEX;\n")
file.write("DATA_RADIX = HEX;\n")
file.write("CONTENT\nBEGIN\n")
# Write data
for address in range(0,length):
file.write("{:03X} : {:04X} ;\n".format(address,image[address]))
# End and close file
file.write("END\n")
file.close()
print("MIF file {:s} created".format(filename))
def makebin(filename,image,length):
try:
file = open(filename,'wb')
except IOError:
print("Failed to open output file {:s}".format(filename))
sys.exit()
for address in range(0,length):
file.write(bytes([image[address]>>8,image[address]&0xff]))
file.close()
print("BIN file {:s} created".format(filename))
def makelist(filename,asm):
try:
file = open(filename,'wt')
except IOError:
print("Failed to open output file {:s}".format(filename))
sys.exit()
text = " MISC-16 assembler V1.0 listing file {:s} {:s} ".format(filename,time.strftime('%d/%m/%Y %H:%M:%S', time.localtime()))
dash = '-' * len(text)
file.write(dash+'\n')
file.write(text+'\n')
file.write(dash+'\n')
for (linenumber,start,end,linetext) in asm.listing:
linetext = linetext.rstrip()
linenumber = "{:d}".format(linenumber)
data = ""
memoryindex = start
while True:
address = "{:04X} ".format(memoryindex)
if memoryindex < end:
data = "{:04X}".format(asm.image[memoryindex])
memoryindex += 1
if memoryindex < end:
data += "{:04X}".format(asm.image[memoryindex])
memoryindex += 1
if data == "":
address = ""
data = data.ljust(10)
address = address.ljust(6)
file.write(linenumber.ljust(6)+address+data+linetext+'\n')
linenumber = ""
linetext = ""
if memoryindex >= end:
break
text = " Symbol Table"
dash = "-------------------"
file.write(dash+'\n')
file.write(text+'\n')
file.write(dash+'\n')
for label in asm.labels:
file.write(label.ljust(15)+"{:04X}\n".format(asm.labels[label]))
text = " End of File"
dash = "-------------------"
file.write(dash+'\n')
file.write(text+'\n')
file.write(dash+'\n')
file.close()
print("LST file {:s} created".format(filename))
if __name__ == "__main__":
# Extract filenames from command line arguments
sourcefile = filename(".asm")
binfile = filename(".bin")
miffile = filename(".mif")
lstfile = filename(".lst")
# Display usage if no source file specified
if not sourcefile:
print("Usage: python misc.py <input.asm> [out.mif][out.bin][out.lst]")
sys.exit()
# Open source file
try:
file = open(sourcefile,"rt")
except IOError:
print("Could not open file {:s}".format(sourcefile))
sys.exit()
# Assemble file
asm = miscasm.miscasm(file)
file.close()
# Bail out if we have errors
if asm.errorcount != 0:
print("Assembly failed with {:d} errors".format(asm.errorcount))
sys.exit()
# Success
print("Success: assembly completed {:d} bytes".format(asm.memoryindex<<1))
# Generate FPGA file
if miffile:
makemif(miffile,asm.image,asm.memoryindex)
# Generate BIN file
if binfile:
makebin(binfile,asm.image,asm.memoryindex)
# Generate listing file
if lstfile:
makelist(lstfile,asm)
# Run simulator if no output files specified
if not (miffile or binfile or lstfile):
miscsim.miscsim(asm.image)
# end of file
| 29.219178
| 134
| 0.570323
|
50ce04ec142d11b0bc7a16141c080b21612cd827
| 22,223
|
py
|
Python
|
redditpost/redditpost.py
|
sravan1946/flare-cogs
|
9128911c97a49f0de252d3ec9ce691e3f8a655aa
|
[
"MIT"
] | 1
|
2022-01-23T22:59:54.000Z
|
2022-01-23T22:59:54.000Z
|
redditpost/redditpost.py
|
sravan1946/flare-cogs
|
9128911c97a49f0de252d3ec9ce691e3f8a655aa
|
[
"MIT"
] | null | null | null |
redditpost/redditpost.py
|
sravan1946/flare-cogs
|
9128911c97a49f0de252d3ec9ce691e3f8a655aa
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
import re
from datetime import datetime, timedelta
from html import unescape
from typing import Optional
import aiohttp
import asyncpraw
import asyncprawcore
import discord
import tabulate
import validators
from discord.http import Route
from redbot.core import Config, commands
from redbot.core.commands.converter import TimedeltaConverter
from redbot.core.utils.chat_formatting import box, humanize_timedelta, pagify, spoiler
log = logging.getLogger("red.flare.redditpost")
REDDIT_LOGO = "https://www.redditinc.com/assets/images/site/reddit-logo.png"
REDDIT_REGEX = re.compile(
r"(?i)\A(((https?://)?(www\.)?reddit\.com/)?r/)?([A-Za-z0-9][A-Za-z0-9_]{2,20})/?\Z"
)
class RedditPost(commands.Cog):
"""A reddit auto posting cog."""
__version__ = "0.4.0"
def format_help_for_context(self, ctx):
"""Thanks Sinbad."""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\nCog Version: {self.__version__}"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=959327661803438081, force_registration=True)
self.config.register_channel(reddits={})
self.config.register_global(delay=300, SCHEMA_VERSION=1)
self.session = aiohttp.ClientSession()
self.bg_loop_task: Optional[asyncio.Task] = None
self.notified = False
self.client = None
self.bot.loop.create_task(self.init())
async def red_get_data_for_user(self, *, user_id: int):
# this cog does not story any data
return {}
async def red_delete_data_for_user(self, *, requester, user_id: int) -> None:
# this cog does not story any data
pass
async def init(self):
await self.bot.wait_until_red_ready()
if await self.config.SCHEMA_VERSION() == 1:
data = await self.config.all_channels()
for channel, _ in data.items():
async with self.config.channel_from_id(channel).reddits() as sub_data:
for feed in sub_data:
try:
sub_data[feed]["subreddit"] = sub_data[feed]["url"].split("/")[4]
except IndexError:
sub_data[feed]["subreddit"] = None
await self.bot.send_to_owners(
"Hi there.\nRedditPost has now been given an update to accomodate the new reddit ratelimits. This cog now requires authenthication.\nTo setup the cog create an application via https://www.reddit.com/prefs/apps/. Once this is done, copy the client ID found under the name and the secret found inside.\nYou can then setup this cog by using `[p]set api redditpost clientid CLIENT_ID_HERE clientsecret CLIENT_SECRET_HERE`\n"
)
await self.config.SCHEMA_VERSION.set(2)
token = await self.bot.get_shared_api_tokens("redditpost")
try:
self.client = asyncpraw.Reddit(
client_id=token.get("clientid", None),
client_secret=token.get("clientsecret", None),
user_agent=f"{self.bot.user.name} Discord Bot",
)
self.bg_loop_task = self.bot.loop.create_task(self.bg_loop())
except Exception as exc:
log.error("Exception in init: ", exc_info=exc)
await self.bot.send_to_owners(
"An exception occured in the authenthication. Please ensure the client id and secret are set correctly.\nTo setup the cog create an application via https://www.reddit.com/prefs/apps/. Once this is done, copy the client ID found under the name and the secret found inside.\nYou can then setup this cog by using `[p]set api redditpost clientid CLIENT_ID_HERE clientsecret CLIENT_SECRET_HERE`"
)
def cog_unload(self):
if self.bg_loop_task:
self.bg_loop_task.cancel()
self.bot.loop.create_task(self.session.close())
self.bot.loop.create_task(self.client.close())
@commands.Cog.listener()
async def on_red_api_tokens_update(self, service_name, api_tokens):
if service_name == "redditpost":
try:
self.client = asyncpraw.Reddit(
client_id=api_tokens.get("clientid", None),
client_secret=api_tokens.get("clientsecret", None),
user_agent=f"{self.bot.user.name} Discord Bot",
)
except Exception as exc:
log.error("Exception in init: ", exc_info=exc)
await self.bot.send_to_owners(
"An exception occured in the authenthication. Please ensure the client id and secret are set correctly.\nTo setup the cog create an application via https://www.reddit.com/prefs/apps/. Once this is done, copy the client ID found under the name and the secret found inside.\nYou can then setup this cog by using `[p]set api redditpost clientid CLIENT_ID_HERE clientsecret CLIENT_SECRET_HERE`"
)
async def bg_loop(self):
await self.bot.wait_until_ready()
while True:
try:
await self.do_feeds()
delay = await self.config.delay()
await asyncio.sleep(delay)
except Exception as exc:
log.error("Exception in bg_loop: ", exc_info=exc)
if not self.notified:
msg = "An exception occured in the background loop for `redditpost`. Check your logs for more details and if possible, report them to the cog creator.\nYou will no longer receive this message until you reload the cog to reduce spam."
await self.bot.send_to_owners(msg)
self.notified = True
async def do_feeds(self):
if self.client is None:
return
feeds = {}
channel_data = await self.config.all_channels()
for channel_id, data in channel_data.items():
channel = self.bot.get_channel(channel_id)
if not channel:
continue
for sub, feed in data["reddits"].items():
url = feed.get("subreddit", None)
if not url:
continue
if url in feeds:
response = feeds[url]
else:
response = await self.fetch_feed(url)
feeds[url] = response
if response is None:
continue
time = await self.format_send(
response,
channel,
feed["last_post"],
url,
{
"latest": feed.get("latest", True),
"webhooks": feed.get("webhooks", False),
"logo": feed.get("logo", REDDIT_LOGO),
"button": feed.get("button", True),
"image_only": feed.get("image_only", False),
},
)
if time is not None:
async with self.config.channel(channel).reddits() as feeds_data:
feeds_data[sub]["last_post"] = time
@staticmethod
def _clean_subreddit(subreddit: str):
subreddit = subreddit.lstrip("/")
match = REDDIT_REGEX.fullmatch(subreddit)
if match:
return match.groups()[-1].lower()
return None
@commands.admin_or_permissions(manage_channels=True)
@commands.guild_only()
@commands.group(aliases=["redditfeed"])
async def redditpost(self, ctx):
"""Reddit auto-feed posting."""
@redditpost.command()
@commands.is_owner()
async def setup(self, ctx):
"""Details on setting up RedditPost"""
msg = "To setup the cog create an application via https://www.reddit.com/prefs/apps/. Once this is done, copy the client ID found under the name and the secret found inside.\nYou can then setup this cog by using `[p]set api redditpost clientid CLIENT_ID_HERE clientsecret CLIENT_SECRET_HERE`"
await ctx.send(msg)
@redditpost.command()
@commands.is_owner()
async def delay(
self,
ctx,
time: TimedeltaConverter(
minimum=timedelta(seconds=15), maximum=timedelta(hours=3), default_unit="seconds"
),
):
"""Set the delay used to check for new content."""
seconds = time.total_seconds()
await self.config.delay.set(seconds)
await ctx.tick()
await ctx.send(
f"The {humanize_timedelta(seconds=seconds)} delay will come into effect on the next loop."
)
@redditpost.command()
@commands.bot_has_permissions(send_messages=True, embed_links=True)
async def add(self, ctx, subreddit: str, channel: Optional[discord.TextChannel] = None):
"""Add a subreddit to post new content from."""
channel = channel or ctx.channel
subreddit = self._clean_subreddit(subreddit)
if not subreddit:
return await ctx.send("That doesn't look like a subreddit name to me.")
if self.client is None:
await ctx.send(
f"Please setup the client correctly, `{ctx.clean_prefix}redditpost setup` for more information"
)
return
async with ctx.typing():
try:
subreddit_info = await self.client.subreddit(subreddit, fetch=True)
except asyncprawcore.Forbidden:
return await ctx.send("I can't view private subreddits.")
except asyncprawcore.NotFound:
return await ctx.send("This subreddit doesn't exist.")
except Exception:
return await ctx.send("Something went wrong while searching for this subreddit.")
if subreddit_info.over18 and not channel.is_nsfw():
return await ctx.send(
"You're trying to add an NSFW subreddit to a SFW channel. Please edit the channel or try another."
)
logo = REDDIT_LOGO if not subreddit_info.icon_img else subreddit_info.icon_img
async with self.config.channel(channel).reddits() as feeds:
if subreddit in feeds:
return await ctx.send("That subreddit is already set to post.")
response = await self.fetch_feed(subreddit)
if response is None:
return await ctx.send("That didn't seem to be a valid reddit feed.")
feeds[subreddit] = {
"subreddit": subreddit,
"last_post": datetime.now().timestamp(),
"latest": True,
"logo": logo,
"webhooks": False,
}
await ctx.tick()
@redditpost.command()
@commands.bot_has_permissions(send_messages=True, embed_links=True)
async def list(self, ctx, channel: discord.TextChannel = None):
"""Lists the current subreddits for the current channel, or a provided one."""
channel = channel or ctx.channel
data = await self.config.channel(channel).reddits()
if not data:
return await ctx.send("No subreddits here.")
output = [[k, v.get("webhooks", "False"), v.get("latest", True)] for k, v in data.items()]
out = tabulate.tabulate(output, headers=["Subreddit", "Webhooks", "Latest Posts"])
for page in pagify(str(out)):
await ctx.send(
embed=discord.Embed(
title=f"Subreddits for {channel}.",
description=box(page, lang="prolog"),
color=(await ctx.embed_color()),
)
)
@redditpost.command(name="remove")
@commands.bot_has_permissions(send_messages=True, embed_links=True)
async def remove_feed(
self, ctx, subreddit: str, channel: Optional[discord.TextChannel] = None
):
"""Removes a subreddit from the current channel, or a provided one."""
channel = channel or ctx.channel
subreddit = self._clean_subreddit(subreddit)
if not subreddit:
return await ctx.send("That doesn't look like a subreddit name to me.")
async with self.config.channel(channel).reddits() as feeds:
if subreddit not in feeds:
await ctx.send(f"No subreddit named {subreddit} in {channel.mention}.")
return
del feeds[subreddit]
await ctx.tick()
@redditpost.command(name="force")
@commands.bot_has_permissions(send_messages=True, embed_links=True)
async def force(self, ctx, subreddit: str, channel: Optional[discord.TextChannel] = None):
"""Force the latest post."""
channel = channel or ctx.channel
subreddit = self._clean_subreddit(subreddit)
if not subreddit:
return await ctx.send("That doesn't look like a subreddit name to me.")
feeds = await self.config.channel(channel).reddits()
if subreddit not in feeds:
await ctx.send(f"No subreddit named {subreddit} in {channel.mention}.")
return
if self.client is None:
await ctx.send(
f"Please setup the client correctly, `{ctx.clean_prefix}redditpost setup` for more information"
)
return
data = await self.fetch_feed(feeds[subreddit]["subreddit"])
if data is None:
return await ctx.send("No post could be found.")
await self.format_send(
data,
channel,
0,
subreddit,
{
"latest": True,
"webhooks": feeds[subreddit].get("webhooks", False),
"logo": feeds[subreddit].get("logo", REDDIT_LOGO),
"button": feeds[subreddit].get("button", True),
"image_only": False,
},
)
await ctx.tick()
@redditpost.command(name="latest")
@commands.bot_has_permissions(send_messages=True, embed_links=True)
async def latest(self, ctx, subreddit: str, latest: bool, channel: discord.TextChannel = None):
"""Whether to fetch all posts or just the latest post."""
channel = channel or ctx.channel
subreddit = self._clean_subreddit(subreddit)
if not subreddit:
return await ctx.send("That doesn't look like a subreddit name to me.")
async with self.config.channel(channel).reddits() as feeds:
if subreddit not in feeds:
await ctx.send(f"No subreddit named {subreddit} in {channel.mention}.")
return
feeds[subreddit]["latest"] = latest
await ctx.tick()
@redditpost.command()
@commands.bot_has_permissions(send_messages=True, embed_links=True)
async def button(
self, ctx, subreddit: str, use_button: bool, channel: discord.TextChannel = None
):
"""Whether to use buttons for the post URL."""
channel = channel or ctx.channel
subreddit = self._clean_subreddit(subreddit)
if not subreddit:
return await ctx.send("That doesn't look like a subreddit name to me.")
async with self.config.channel(channel).reddits() as feeds:
if subreddit not in feeds:
await ctx.send(f"No subreddit named {subreddit} in {channel.mention}.")
return
feeds[subreddit]["button"] = use_button
await ctx.tick()
@redditpost.command()
@commands.bot_has_permissions(send_messages=True, embed_links=True)
async def imageonly(
self, ctx, subreddit: str, on_or_off: bool, channel: discord.TextChannel = None
):
"""Whether to only post posts that contain an image."""
channel = channel or ctx.channel
subreddit = self._clean_subreddit(subreddit)
if not subreddit:
return await ctx.send("That doesn't look like a subreddit name to me.")
async with self.config.channel(channel).reddits() as feeds:
if subreddit not in feeds:
await ctx.send(f"No subreddit named {subreddit} in {channel.mention}.")
return
feeds[subreddit]["image_only"] = on_or_off
await ctx.tick()
@redditpost.command(
name="webhook", aliases=["webhooks"], usage="<subreddit> <true_or_false> [channel]"
)
@commands.bot_has_permissions(send_messages=True, embed_links=True, manage_webhooks=True)
async def webhook(
self, ctx, subreddit: str, webhook: bool, channel: discord.TextChannel = None
):
"""Whether to send the post as a webhook or message from the bot."""
channel = channel or ctx.channel
subreddit = self._clean_subreddit(subreddit)
if not subreddit:
return await ctx.send("That doesn't look like a subreddit name to me.")
async with self.config.channel(channel).reddits() as feeds:
if subreddit not in feeds:
await ctx.send(f"No subreddit named {subreddit} in {channel.mention}.")
return
feeds[subreddit]["webhooks"] = webhook
if webhook:
await ctx.send(f"New posts from r/{subreddit} will be sent as webhooks.")
else:
await ctx.send(f"New posts from r/{subreddit} will be sent as bot messages.")
await ctx.tick()
async def fetch_feed(self, subreddit: str):
try:
subreddit = await self.client.subreddit(subreddit)
resp = [submission async for submission in subreddit.new(limit=20)]
return resp or None
except Exception:
return None
async def format_send(self, data, channel, last_post, subreddit, settings):
timestamps = []
embeds = []
data = data[:1] if settings.get("latest", True) else data
webhook = None
try:
if (
settings.get("webhooks", False)
and channel.permissions_for(channel.guild.me).manage_webhooks
):
for hook in await channel.webhooks():
if hook.name == channel.guild.me.name:
webhook = hook
if webhook is None:
webhook = await channel.create_webhook(name=channel.guild.me.name)
except Exception as e:
log.error("Error in webhooks during reddit feed posting", exc_info=e)
for feed in data:
timestamp = feed.created_utc
if feed.over_18 and not channel.is_nsfw():
timestamps.append(timestamp)
continue
if timestamp <= last_post:
break
timestamps.append(timestamp)
desc = unescape(feed.selftext)
image = feed.url
link = "https://reddit.com" + feed.permalink
title = feed.title
if len(desc) > 2000:
desc = desc[:2000] + "..."
if len(title) > 252:
title = title[:252] + "..."
if feed.spoiler:
desc = "(spoiler)\n" + spoiler(desc)
embed = discord.Embed(
title=unescape(title),
url=unescape(link),
description=desc,
color=channel.guild.me.color,
timestamp=datetime.utcfromtimestamp(feed.created_utc),
)
embed.set_author(name=f"New post on r/{unescape(subreddit)}")
embed.set_footer(text=f"Submitted by /u/{unescape(feed.author.name)}")
images = False
if image.endswith(("png", "jpg", "jpeg", "gif")) and not feed.spoiler:
embed.set_image(url=unescape(image))
images = True
elif feed.permalink not in image and validators.url(image):
embed.add_field(name="Attachment", value=unescape(image))
if settings.get("image_only") and not images:
continue
embeds.append(embed)
if timestamps:
if embeds:
try:
for emb in embeds[::-1]:
if webhook is None:
try:
if settings.get("button", True):
payload = {"content": "", "embed": emb.to_dict()}
payload["components"] = [
{
"type": 1,
"components": [
{
"label": "Source",
"url": emb.url,
"style": 5,
"type": 2,
}
],
}
]
r = Route(
"POST",
"/channels/{channel_id}/messages",
channel_id=channel.id,
)
await self.bot._connection.http.request(r, json=payload)
else:
await channel.send(embed=emb)
except (discord.Forbidden, discord.HTTPException):
log.info(f"Error sending message feed in {channel}. Bypassing")
else:
await webhook.send(
username=f"r/{feed.subreddit}",
avatar_url=settings.get("icon", REDDIT_LOGO),
embed=emb,
)
except discord.HTTPException as exc:
log.error("Exception in bg_loop while sending message: ", exc_info=exc)
return timestamps[0]
return None
| 44.093254
| 436
| 0.56626
|
736a165b22e5bca0431b544713b899d8a74957fb
| 14,305
|
py
|
Python
|
automaticvolumeadjustment/src/AutomaticVolumeAdjustmentSetup.py
|
FoxyRabbit67/enigma2-plugins
|
f6b94012726931fdf28e80a26226aec612b350de
|
[
"Linux-OpenIB"
] | 41
|
2016-01-21T17:54:44.000Z
|
2021-06-26T05:54:41.000Z
|
automaticvolumeadjustment/src/AutomaticVolumeAdjustmentSetup.py
|
FoxyRabbit67/enigma2-plugins
|
f6b94012726931fdf28e80a26226aec612b350de
|
[
"Linux-OpenIB"
] | 22
|
2016-11-16T11:25:26.000Z
|
2021-12-13T09:13:06.000Z
|
automaticvolumeadjustment/src/AutomaticVolumeAdjustmentSetup.py
|
FoxyRabbit67/enigma2-plugins
|
f6b94012726931fdf28e80a26226aec612b350de
|
[
"Linux-OpenIB"
] | 62
|
2016-02-05T22:55:48.000Z
|
2022-03-12T21:48:22.000Z
|
# -*- coding: utf-8 -*-
#
# AutomaticVolumeAdjustment E2
#
# $Id$
#
# Coded by Dr.Best (c) 2010
# Support: www.dreambox-tools.info
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported
# License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter to Creative
# Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Property GmbH.
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
from enigma import eListboxPythonMultiContent, gFont, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_VALIGN_CENTER, eServiceReference
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.ChannelSelection import SimpleChannelSelection
from Components.MenuList import MenuList
from Components.Sources.StaticText import StaticText
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry, config
from ServiceReference import ServiceReference
from AutomaticVolumeAdjustment import AutomaticVolumeAdjustment
from AutomaticVolumeAdjustmentConfig import AutomaticVolumeAdjustmentConfig
from skin import TemplatedListFonts, componentSizes
class AutomaticVolumeAdjustmentConfigScreen(ConfigListScreen, Screen):
skin = """
<screen name="AutomaticVolumeAdjustmentConfigScreen" position="center,center" size="820,400">
<ePixmap pixmap="skin_default/buttons/red.png" position="10,5" size="200,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="210,5" size="200,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="610,5" size="200,40" alphatest="on" />
<widget source="key_red" render="Label" position="10,5" size="200,40" zPosition="1" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" shadowColor="black" shadowOffset="-2,-2" />
<widget source="key_green" render="Label" position="210,5" size="200,40" zPosition="1" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" shadowColor="black" shadowOffset="-2,-2" />
<widget source="key_blue" render="Label" position="610,5" size="200,40" zPosition="1" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" shadowColor="black" shadowOffset="-2,-2" />
<eLabel position="10,50" size="800,1" backgroundColor="grey" />
<widget name="config" position="10,60" size="800,240" enableWrapAround="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self.title = _("Automatic Volume Adjustment - Config")
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.keySave,
"red": self.keyCancel,
"blue": self.blue,
"cancel": self.keyCancel,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_blue"] = StaticText()
self.configVA = AutomaticVolumeAdjustmentConfig()
self.automaticVolumeAdjustmentInstance = AutomaticVolumeAdjustment.instance
self.list = []
ConfigListScreen.__init__(self, self.list, session = session)
self.createSetup("config")
def createSetup(self, widget):
self.list = []
self.config_enable = getConfigListEntry(_("Enable"), self.configVA.config.enable)
self.list.append(self.config_enable)
if self.configVA.config.enable.value:
self.config_modus = getConfigListEntry(_("Modus"), self.configVA.config.modus)
self.list.append(self.config_modus)
if self.configVA.config.modus.value == "0":
self.list.append(getConfigListEntry(_("Default volume adjustment value for AC3/DTS"), self.configVA.config.adustvalue))
self.list.append(getConfigListEntry(_("Max. volume for mpeg audio"), self.configVA.config.mpeg_max_volume))
self["key_blue"].text = _("Services")
else:
self["key_blue"].text = ""
self.list.append(getConfigListEntry(_("Show volumebar when volume-value was changed"), self.configVA.config.show_volumebar))
else:
self.config_modus = None
self[widget].list = self.list
self[widget].l.setList(self.list)
def newConfig(self):
if self["config"].getCurrent() in (self.config_enable, self.config_modus):
self.createSetup("config")
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.newConfig()
def blue(self):
if self.configVA.config.modus.value == "0":
self.session.open(AutomaticVolumeAdjustmentEntriesListConfigScreen, self.configVA)
def keySave(self):
for x in self["config"].list:
x[1].save()
self.configVA.save()
if self.automaticVolumeAdjustmentInstance is not None:
self.automaticVolumeAdjustmentInstance.initializeConfigValues(self.configVA, True) # submit config values
self.close()
def keyCancel(self):
ConfigListScreen.cancelConfirm(self, True)
class AutomaticVolumeAdjustmentEntriesListConfigScreen(Screen):
skin = """
<screen position="center,120" size="820,520">
<ePixmap pixmap="skin_default/buttons/red.png" position="10,5" size="200,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="210,5" size="200,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="410,5" size="200,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="610,5" size="200,40" alphatest="on" />
<widget source="key_red" render="Label" position="10,5" size="200,40" zPosition="1" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" shadowColor="black" shadowOffset="-2,-2" />
<widget source="key_green" render="Label" position="210,5" size="200,40" zPosition="1" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" shadowColor="black" shadowOffset="-2,-2" />
<widget source="key_yellow" render="Label" position="410,5" size="200,40" zPosition="1" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" shadowColor="black" shadowOffset="-2,-2" />
<widget source="key_blue" render="Label" position="610,5" size="200,40" zPosition="1" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" shadowColor="black" shadowOffset="-2,-2" />
<eLabel position="10,50" size="800,1" backgroundColor="grey" />
<widget source="name" render="Label" position="10,60" size="520,25" font="Regular;21" halign="left"/>
<widget source="adjustvalue" render="Label" position="570,60" size="240,25" font="Regular;21" halign="right"/>
<eLabel position="10,90" size="800,1" backgroundColor="grey" />
<widget name="entrylist" position="10,100" size="800,390" enableWrapAround="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, configVA):
Screen.__init__(self, session)
self.title = _("Automatic Volume Adjustment - Service Config")
self["name"] = StaticText(_("Servicename"))
self["adjustvalue"] = StaticText(_("Adjustment value"))
self["key_red"] = StaticText(_("Add"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Edit"))
self["key_blue"] = StaticText(_("Delete"))
self["entrylist"] = AutomaticVolumeAdjustmentEntryList([])
self["actions"] = ActionMap(["WizardActions","MenuActions","ShortcutActions"],
{
"ok" : self.keyOK,
"back" : self.keyClose,
"red" : self.keyRed,
"green": self.keyClose,
"yellow": self.keyYellow,
"blue": self.keyDelete,
}, -1)
self.automaticVolumeAdjustmentInstance = AutomaticVolumeAdjustment.instance
self["entrylist"].setConfig(configVA)
self.updateList()
def updateList(self):
self["entrylist"].buildList()
def keyClose(self):
self.close(-1, None)
def keyRed(self):
self.session.openWithCallback(self.updateList,AutomaticVolumeAdjustmentEntryConfigScreen,None, self["entrylist"].configVA)
def keyOK(self):
try:sel = self["entrylist"].l.getCurrentSelection()[0]
except: sel = None
self.close(self["entrylist"].getCurrentIndex(), sel)
def keyYellow(self):
try:sel = self["entrylist"].l.getCurrentSelection()[0]
except: sel = None
if sel is None:
return
self.session.openWithCallback(self.updateList,AutomaticVolumeAdjustmentEntryConfigScreen,sel, self["entrylist"].configVA)
def keyDelete(self):
try:sel = self["entrylist"].l.getCurrentSelection()[0]
except: sel = None
if sel is None:
return
self.session.openWithCallback(self.deleteConfirm, MessageBox, _("Do you really want to delete this entry?"))
def deleteConfirm(self, result):
if not result:
return
sel = self["entrylist"].l.getCurrentSelection()[0]
self["entrylist"].configVA.remove(sel)
if self.automaticVolumeAdjustmentInstance is not None:
self.automaticVolumeAdjustmentInstance.initializeConfigValues(self["entrylist"].configVA, True) # submit config values
self.updateList()
class AutomaticVolumeAdjustmentEntryList(MenuList):
SKIN_COMPONENT_KEY = "AutomaticVolumeAdjustmentList"
SKIN_COMPONENT_TEXT_WIDTH = "textWidth"
SKIN_COMPONENT_TEXT_HEIGHT = "textHeight"
def __init__(self, list, enableWrapAround = True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
tlf = TemplatedListFonts()
self.l.setFont(0, gFont(tlf.face(tlf.MEDIUM), tlf.size(tlf.MEDIUM)))
self.configVA = None
def postWidgetCreate(self, instance):
MenuList.postWidgetCreate(self, instance)
instance.setItemHeight(componentSizes.itemHeight(self.SKIN_COMPONENT_KEY, 30))
def getCurrentIndex(self):
return self.instance.getCurrentIndex()
def setConfig(self, configVA):
self.configVA = configVA
def buildList(self):
list = []
sizes = componentSizes[AutomaticVolumeAdjustmentEntryList.SKIN_COMPONENT_KEY]
textWidth = sizes.get(AutomaticVolumeAdjustmentEntryList.SKIN_COMPONENT_TEXT_WIDTH, 570)
textHeight = sizes.get(AutomaticVolumeAdjustmentEntryList.SKIN_COMPONENT_TEXT_HEIGHT, 30)
for c in self.configVA.config.Entries:
c.name.value = ServiceReference(eServiceReference(c.servicereference.value)).getServiceName()
res = [
c,
(eListboxPythonMultiContent.TYPE_TEXT, 5, 0, textWidth-10, textHeight, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, c.name.value),
(eListboxPythonMultiContent.TYPE_TEXT, textWidth, 0,220, textHeight, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, str(c.adjustvalue.value)),
]
list.append(res)
self.list = list
self.l.setList(list)
self.moveToIndex(0)
class AutomaticVolumeAdjustmentEntryConfigScreen(ConfigListScreen, Screen):
skin = """
<screen name="AutomaticVolumeAdjustmentEntryConfigScreen" position="center,center" size="820,400">
<ePixmap pixmap="skin_default/buttons/red.png" position="10,5" size="200,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="210,5" size="200,40" alphatest="on" />
<widget source="key_red" render="Label" position="10,5" size="200,40" zPosition="1" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" shadowColor="black" shadowOffset="-2,-2" />
<widget source="key_green" render="Label" position="210,5" size="200,40" zPosition="1" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" shadowColor="black" shadowOffset="-2,-2" />
<eLabel position="10,50" size="800,1" backgroundColor="grey" />
<widget name="config" position="10,60" size="800,240" enableWrapAround="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, entry, configVA):
self.session = session
Screen.__init__(self, session)
self.title = _("Automatic Volume Adjustment - Entry Config")
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.keySave,
"red": self.keyCancel,
"cancel": self.keyCancel,
"ok": self.keySelect,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.configVA = configVA
if entry is None:
self.newmode = 1
self.current = self.configVA.initEntryConfig()
self.currentvalue = self.current.adjustvalue.value
else:
self.newmode = 0
self.current = entry
self.currentref = entry.servicereference.value
self.currentvalue = entry.adjustvalue.value
self.list = [ ]
self.service = getConfigListEntry(_("Servicename"), self.current.name)
self.list.append(self.service)
self.adjustValue = getConfigListEntry(_("Adjustment value"), self.current.adjustvalue)
self.list.append(self.adjustValue)
ConfigListScreen.__init__(self, self.list, session)
self.automaticVolumeAdjustmentInstance = AutomaticVolumeAdjustment.instance
def keySelect(self):
cur = self["config"].getCurrent()
if cur == self.service:
self.session.openWithCallback(self.channelSelected, SimpleChannelSelection, _("Channel Selection"))
def channelSelected(self, ref = None):
if ref:
self.current.name.value = ServiceReference(ref).getServiceName()
self.current.servicereference.value = ref.toString()
self.current.save()
def keySave(self):
if self.current.servicereference.value:
if self.newmode == 1:
self.configVA.config.entriescount.value = self.configVA.config.entriescount.value + 1
self.configVA.config.entriescount.save()
for x in self["config"].list:
x[1].save()
self.configVA.save()
if self.automaticVolumeAdjustmentInstance is not None:
self.automaticVolumeAdjustmentInstance.initializeConfigValues(self.configVA, True) # submit config values
self.close()
else:
self.session.open(MessageBox, _("You must select a valid service!"), type = MessageBox.TYPE_INFO)
def keyCancel(self):
if self.newmode == 1:
self.configVA.config.Entries.remove(self.current)
self.configVA.config.Entries.save()
else:
self.current.servicereference.value = self.currentref
self.current.adjustvalue.value = self.currentvalue
self.current.save()
ConfigListScreen.cancelConfirm(self, True)
| 45.996785
| 226
| 0.741
|
ad45e1465c86a1cfecee6aff9675670b396e40d2
| 3,530
|
py
|
Python
|
ModelStats.py
|
leoi137/Asset-Price-Prediction_Machine-Learning
|
9f2a753eb9d4f13b855072bf0e77e5243bc00e72
|
[
"MIT"
] | 1
|
2019-05-02T05:41:53.000Z
|
2019-05-02T05:41:53.000Z
|
ModelStats.py
|
leoi137/Asset-Price-Prediction_Machine-Learning
|
9f2a753eb9d4f13b855072bf0e77e5243bc00e72
|
[
"MIT"
] | null | null | null |
ModelStats.py
|
leoi137/Asset-Price-Prediction_Machine-Learning
|
9f2a753eb9d4f13b855072bf0e77e5243bc00e72
|
[
"MIT"
] | 2
|
2019-07-03T14:04:14.000Z
|
2021-06-21T15:51:28.000Z
|
import pandas as pd
import json
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
class Statistics:
def __init__(self, X_test_selected, y_test, Model, model_features, scores, size = 3):
"""
Parameters:
THIS ONLY WORKS WITH A CLASSIFIER MODEL WITH 3 PREDICTED VALUES
X_test_selected: The selected featues to test on (called selected because it assumes
they have gone through a feature selection process, works either way.)
y_test: The value being predicted for testing
Model: The trained model
model_features: The name of the features used
scores: The results after running k-Fold cross validation
size: Only supports 3, meaning three different predicted values
"""
self.X_test_selected = X_test_selected
self.y_test = y_test
self.size = size
self.Model = Model
self.model_feat = model_features
self.scores = scores
self.data_dict = {}
def statistics(self):
y_preds = self.Model.predict(self.X_test_selected)
conf_matrix = confusion_matrix(self.y_test, y_preds)
conf_DF = pd.DataFrame(conf_matrix, columns = ['-1', '0', '1'], index = ['-1', '0', '1'])
if self.size == 3:
bull_mean = (conf_matrix[0, 0]/ (conf_matrix[0, 0] + conf_matrix[1, 0] + conf_matrix[2, 0]))
bear_mean = (conf_matrix[2, 2]/ (conf_matrix[0, 2] + conf_matrix[1, 2] + conf_matrix[2, 2]))
none_mean = (conf_matrix[1, 1]/ (conf_matrix[0, 1] + conf_matrix[1, 1] + conf_matrix[2, 1]))
# inf = [scores.mean(), bull_mean, bear_mean, none_mean]
# index = ['All', 'Bullish', 'Bearish', 'Stalled']
# stats = pd.DataFrame(inf, index = index)
# stats.columns = ['Accuracy (%)']
# self.data_dict['Confusion Matrix'] = conf_DF
# self.data_dict['Accuracy'] = stats
self.data_dict['Accuracy'] = {'All': accuracy_score(y_preds, self.y_test),
'Bull': bull_mean,
'Bear': bear_mean,
'Stalled': none_mean,
'STDV': self.scores.std()}
self.data_dict['Confusion Matrix'] = {'-1': (int(conf_matrix[0, 0]), int(conf_matrix[1, 0]),
int(conf_matrix[2, 0])),
'0': (int(conf_matrix[0, 1]), int(conf_matrix[1, 1]),
int(conf_matrix[2, 1])),
'1': (int(conf_matrix[0, 2]), int(conf_matrix[1, 2]),
int(conf_matrix[2, 2]))}
self.data_dict['Features'] = self.model_feat
return self.data_dict
def save_file(self, model_name):
"""
Parameters:
model_name: The name of a file to save it as in string format
"""
stats = self.statistics()
with open('{}.json'.format(model_name), 'w') as outfile:
json.dump(stats, outfile)
# with open('DoublePriceClassifierDaily.csv', 'w') as f:
# for key in stats.keys():
# f.write("%s,%s\n"%(key, stats[key]))
| 43.04878
| 105
| 0.513031
|
fec1ab30960dc413e6e05aba540173de339829e5
| 10,080
|
py
|
Python
|
meiduo_1/meiduo_1/apps/goods/migrations/0001_initial.py
|
zjc0520/meiduo
|
1939223b050e7b69237e8551f5582146989d78a3
|
[
"MIT"
] | null | null | null |
meiduo_1/meiduo_1/apps/goods/migrations/0001_initial.py
|
zjc0520/meiduo
|
1939223b050e7b69237e8551f5582146989d78a3
|
[
"MIT"
] | null | null | null |
meiduo_1/meiduo_1/apps/goods/migrations/0001_initial.py
|
zjc0520/meiduo
|
1939223b050e7b69237e8551f5582146989d78a3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-30 12:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=20, verbose_name='名称')),
('logo', models.ImageField(upload_to='', verbose_name='Logo图片')),
('first_letter', models.CharField(max_length=1, verbose_name='品牌首字母')),
],
options={
'verbose_name_plural': '品牌',
'db_table': 'tb_brand',
'verbose_name': '品牌',
},
),
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('sales', models.IntegerField(default=0, verbose_name='销量')),
('comments', models.IntegerField(default=0, verbose_name='评价数')),
('brand', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='goods.Brand', verbose_name='品牌')),
],
options={
'verbose_name_plural': '商品',
'db_table': 'tb_goods',
'verbose_name': '商品',
},
),
migrations.CreateModel(
name='GoodsCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=10, verbose_name='名称')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsCategory', verbose_name='父类别')),
],
options={
'verbose_name_plural': '商品类别',
'db_table': 'tb_goods_category',
'verbose_name': '商品类别',
},
),
migrations.CreateModel(
name='GoodsChannel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('group_id', models.IntegerField(verbose_name='组号')),
('url', models.CharField(max_length=50, verbose_name='频道页面链接')),
('sequence', models.IntegerField(verbose_name='组内顺序')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsCategory', verbose_name='顶级商品类别')),
],
options={
'verbose_name_plural': '商品频道',
'db_table': 'tb_goods_channel',
'verbose_name': '商品频道',
},
),
migrations.CreateModel(
name='GoodsSpecification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=20, verbose_name='规格名称')),
('goods', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.Goods', verbose_name='商品')),
],
options={
'verbose_name_plural': '商品规格',
'db_table': 'tb_goods_specification',
'verbose_name': '商品规格',
},
),
migrations.CreateModel(
name='SKU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('caption', models.CharField(max_length=100, verbose_name='副标题')),
('price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='单价')),
('cost_price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='进价')),
('market_price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='市场价')),
('stock', models.IntegerField(default=0, verbose_name='库存')),
('sales', models.IntegerField(default=0, verbose_name='销量')),
('comments', models.IntegerField(default=0, verbose_name='评价数')),
('is_launched', models.BooleanField(default=True, verbose_name='是否上架销售')),
('default_image_url', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='默认图片')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='goods.GoodsCategory', verbose_name='从属类别')),
('goods', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.Goods', verbose_name='商品')),
],
options={
'verbose_name_plural': '商品SKU',
'db_table': 'tb_sku',
'verbose_name': '商品SKU',
},
),
migrations.CreateModel(
name='SKUImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('image', models.ImageField(upload_to='', verbose_name='图片')),
('sku', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.SKU', verbose_name='sku')),
],
options={
'verbose_name_plural': 'SKU图片',
'db_table': 'tb_sku_image',
'verbose_name': 'SKU图片',
},
),
migrations.CreateModel(
name='SKUSpecification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
],
options={
'verbose_name_plural': 'SKU规格',
'db_table': 'tb_sku_specification',
'verbose_name': 'SKU规格',
},
),
migrations.CreateModel(
name='SpecificationOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('value', models.CharField(max_length=20, verbose_name='选项值')),
('spec', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsSpecification', verbose_name='规格')),
],
options={
'verbose_name_plural': '规格选项',
'db_table': 'tb_specification_option',
'verbose_name': '规格选项',
},
),
migrations.AddField(
model_name='skuspecification',
name='option',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='goods.SpecificationOption', verbose_name='规格值'),
),
migrations.AddField(
model_name='skuspecification',
name='sku',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.SKU', verbose_name='sku'),
),
migrations.AddField(
model_name='skuspecification',
name='spec',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='goods.GoodsSpecification', verbose_name='规格名称'),
),
migrations.AddField(
model_name='goods',
name='category1',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='cat1_goods', to='goods.GoodsCategory', verbose_name='一级类别'),
),
migrations.AddField(
model_name='goods',
name='category2',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='cat2_goods', to='goods.GoodsCategory', verbose_name='二级类别'),
),
migrations.AddField(
model_name='goods',
name='category3',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='cat3_goods', to='goods.GoodsCategory', verbose_name='三级类别'),
),
]
| 51.428571
| 160
| 0.57748
|
273ad1ff7772f896a9745bbcb951992f2e6175e2
| 3,004
|
py
|
Python
|
tests/tests_indiv_jobs/test_r3createjob.py
|
tadinve/ctm_python_client
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
[
"BSD-3-Clause"
] | null | null | null |
tests/tests_indiv_jobs/test_r3createjob.py
|
tadinve/ctm_python_client
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
[
"BSD-3-Clause"
] | null | null | null |
tests/tests_indiv_jobs/test_r3createjob.py
|
tadinve/ctm_python_client
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
[
"BSD-3-Clause"
] | null | null | null |
from ctm_python_client.jobs.sap.r3.create import R3CREATEJob
import os
from ctm_python_client.core.bmc_control_m import CmJobFlow
from ctm_python_client.session.session import Session
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(BASE_PATH, ".secrets"), "r") as fp:
ctm_uri = fp.readline().strip()
ctm_user = fp.readline().strip()
ctm_pwd = fp.readline().strip()
# Create CTM Session
session = Session(endpoint=ctm_uri, username=ctm_user, password=ctm_pwd)
# CREATE JOB FLOW
t1_flow = CmJobFlow(
application="Naga0.3_Test", sub_application="TestAllJobs", session=session
)
t1_flow.set_run_as(username="ctmuser", host="acb-rhctmv20")
# Define the schedule
months = ["JAN", "OCT", "DEC"]
monthDays = ["ALL"]
weekDays = ["MON", "TUE", "WED", "THU", "FRI"]
fromTime = "0300"
toTime = "2100"
t1_flow.set_schedule(months, monthDays, weekDays, fromTime, toTime)
# Create Folder
fn = os.path.split(__file__)[-1][:-3]
f1 = t1_flow.create_folder(name=fn)
j1 = R3CREATEJob(
folder=f1,
job_name='r3create',
connection_profile="SAPCP",
sap_job_name="SAP_job2",
start_condition="Immediate",
rerun_from_step="3",
target="controlmserver",
created_by="user1",
steps=[{'StepType': 'ABAP', 'TimeToPrint': 'PrintLater', 'CoverPrintPage': True, 'OutputDevice': 'prt', 'UserName': 'user', 'SpoolAuthorization': 'Auth', 'CoverDepartment': 'dpt', 'SpoolListName': 'spoolname', 'OutputNumberRows': '62', 'NumberOfCopies': '5', 'NewSpoolRequest': False, 'PrintArchiveMode': 'PrintAndArchive', 'CoverPage': 'Print', 'ArchiveObjectType': 'objtype', 'SpoolListTitles': 'titles', 'OutputLayout': 'layout', 'CoverSheet': 'Print', 'ProgramName': 'ABAP_PROGRAM', 'Language': 'e', 'ArchiveInformationField': 'inf', 'DeleteAfterPrint': True, 'PrintExpiration': '3', 'OutputNumberColumns': '88', 'ArchiveDocumentType': 'doctype', 'CoverRecipient': 'recipient', 'VariantName': 'NameOfVariant', 'VariantParameters': [{'Type': 'Range', 'High': '2', 'Sign': 'I', 'Option': 'BT', 'Low': '1', 'Name': 'var1', 'Modify': False}, {'Low': '5', 'Type': 'Range', 'Option': 'BT', 'Sign': 'I', 'Modify': True, 'High': '6', 'Name': 'var3'}]}, {'StepType': 'ABAP', 'PrintArchiveMode': 'Print', 'ProgramName': 'ABAP_PROGRAM2', 'VariantName': 'Myvar_with_temp', 'TemporaryVariantParameters': [{'Type': 'Simple', 'Name': 'var', 'Value': 'P11'}, {'Type': 'Simple', 'Name': 'var2', 'Value': 'P11'}]}],
post_job_action={'JobLog': 'CopyToFile', 'JobCompletionStatusWillDependOnApplicationStatus': True, 'SpoolSaveToPDF': True, 'JobLogFile': 'fileToCopy.txt'},
spool_list_recipient={'ReciptNoForwarding': False},
)
t1_flow.add_job(folder=f1, job=j1)
import json
x = t1_flow.deploy()
s = str(x[0])
s = s.replace("'", '"')
s = s.replace("None", '"None"')
s = s.replace("False", '"False"')
s = s.replace("True", '"True"')
s = s.replace("\n", "")
j = json.loads(s)
def test_output():
assert j["successful_smart_folders_count"] == 1
| 47.68254
| 1,207
| 0.676431
|
e2a9ab05b278687b258d8b6a0dd8c780d44902ad
| 2,808
|
py
|
Python
|
supybot/plugins/Anonymous/test.py
|
ekolis/ChancellorGerathIrc
|
f536c1901e743708101cb27249d0e395b8c0a493
|
[
"BSD-3-Clause"
] | 1
|
2015-04-16T14:23:20.000Z
|
2015-04-16T14:23:20.000Z
|
plugins/Anonymous/test.py
|
kblin/supybot-gsoc
|
656f42f8d6b3fe4544a5270e0dab816fd3603118
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/Anonymous/test.py
|
kblin/supybot-gsoc
|
656f42f8d6b3fe4544a5270e0dab816fd3603118
|
[
"BSD-3-Clause"
] | null | null | null |
###
# Copyright (c) 2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class AnonymousTestCase(ChannelPluginTestCase):
plugins = ('Anonymous',)
def testSay(self):
m = self.assertError('anonymous say %s I love you!' % self.channel)
try:
orig = conf.supybot.plugins.Anonymous.requireRegistration()
conf.supybot.plugins.Anonymous.requireRegistration.setValue(False)
m = self.assertNotError('anonymous say %s foo!'%self.channel)
self.failUnless(m.args[1] == 'foo!')
finally:
conf.supybot.plugins.Anonymous.requireRegistration.setValue(orig)
def testAction(self):
m = self.assertError('anonymous do %s loves you!' % self.channel)
try:
orig = conf.supybot.plugins.Anonymous.requireRegistration()
conf.supybot.plugins.Anonymous.requireRegistration.setValue(False)
m = self.assertNotError('anonymous do %s loves you!'%self.channel)
self.assertEqual(m.args, ircmsgs.action(self.channel,
'loves you!').args)
finally:
conf.supybot.plugins.Anonymous.requireRegistration.setValue(orig)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 49.263158
| 79
| 0.714387
|
e1f04e36eec3b3d8d427753adce99a1dcc36b7ce
| 9,537
|
py
|
Python
|
nox.py
|
jkhnn/python-docs-samples
|
fa3a07a3fd383d7248e58c97a65e80d80b6774a1
|
[
"Apache-2.0"
] | 1
|
2019-04-03T12:08:34.000Z
|
2019-04-03T12:08:34.000Z
|
nox.py
|
jkhnn/python-docs-samples
|
fa3a07a3fd383d7248e58c97a65e80d80b6774a1
|
[
"Apache-2.0"
] | null | null | null |
nox.py
|
jkhnn/python-docs-samples
|
fa3a07a3fd383d7248e58c97a65e80d80b6774a1
|
[
"Apache-2.0"
] | 1
|
2022-03-29T18:51:10.000Z
|
2022-03-29T18:51:10.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import fnmatch
import os
import tempfile
import nox
try:
import ci_diff_helper
except ImportError:
ci_diff_helper = None
#
# Helpers and utility functions
#
def _list_files(folder, pattern):
"""Lists all files below the given folder that match the pattern."""
for root, folders, files in os.walk(folder):
for filename in files:
if fnmatch.fnmatch(filename, pattern):
yield os.path.join(root, filename)
def _collect_dirs(
start_dir,
blacklist=set(['conftest.py', 'nox.py', 'lib', 'third_party']),
suffix='_test.py',
recurse_further=False):
"""Recursively collects a list of dirs that contain a file matching the
given suffix.
This works by listing the contents of directories and finding
directories that have `*_test.py` files.
"""
# Collect all the directories that have tests in them.
for parent, subdirs, files in os.walk(start_dir):
if './.' in parent:
continue # Skip top-level dotfiles
elif any(f for f in files if f.endswith(suffix) and f not in blacklist):
# Don't recurse further for tests, since py.test will do that.
if not recurse_further:
del subdirs[:]
# This dir has desired files in it. yield it.
yield parent
else:
# Filter out dirs we don't want to recurse into
subdirs[:] = [
s for s in subdirs
if s[0].isalpha() and
s not in blacklist]
def _get_changed_files():
"""Returns a list of files changed for this pull request / push.
If running on a public CI like Travis or Circle this is used to only
run tests/lint for changed files.
"""
if not ci_diff_helper:
return None
try:
config = ci_diff_helper.get_config()
except OSError: # Not on CI.
return None
changed_files = ci_diff_helper.get_changed_files('HEAD', config.base)
changed_files = set([
'./{}'.format(filename) for filename in changed_files])
return changed_files
def _filter_samples(sample_dirs, changed_files):
"""Filers the list of sample directories to only include directories that
contain files in the list of changed files."""
result = []
for sample_dir in sample_dirs:
for changed_file in changed_files:
if changed_file.startswith(sample_dir):
result.append(sample_dir)
return list(set(result))
def _determine_local_import_names(start_dir):
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
properly checked.
"""
file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)]
return [
basename
for basename, extension
in file_ext_pairs
if extension == '.py' or os.path.isdir(
os.path.join(start_dir, basename))
and basename not in ('__pycache__')]
#
# App Engine specific helpers
#
_GAE_ROOT = os.environ.get('GAE_ROOT')
if _GAE_ROOT is None:
_GAE_ROOT = tempfile.mkdtemp()
def _setup_appengine_sdk(session):
"""Installs the App Engine SDK, if needed."""
session.env['GAE_SDK_PATH'] = os.path.join(_GAE_ROOT, 'google_appengine')
session.run('gcp-devrel-py-tools', 'download-appengine-sdk', _GAE_ROOT)
#
# Test sessions
#
PYTEST_COMMON_ARGS = ['--junitxml=sponge_log.xml']
# Ignore I202 "Additional newline in a section of imports." to accommodate
# region tags in import blocks. Since we specify an explicit ignore, we also
# have to explicitly ignore the list of default ignores:
# `E121,E123,E126,E226,E24,E704,W503,W504` as shown by `flake8 --help`.
FLAKE8_COMMON_ARGS = [
'--show-source', '--builtin', 'gettext', '--max-complexity', '20',
'--import-order-style', 'google',
'--exclude', '.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py',
'--ignore=E121,E123,E126,E226,E24,E704,W503,W504,I202',
]
# Collect sample directories.
ALL_TESTED_SAMPLES = sorted(list(_collect_dirs('.')))
ALL_SAMPLE_DIRECTORIES = sorted(list(_collect_dirs('.', suffix='.py', recurse_further=True)))
GAE_STANDARD_SAMPLES = [
sample for sample in ALL_TESTED_SAMPLES
if sample.startswith('./appengine/standard/')]
PY2_ONLY_SAMPLES = GAE_STANDARD_SAMPLES
PY3_ONLY_SAMPLES = [
sample for sample in ALL_TESTED_SAMPLES
if (sample.startswith('./appengine/standard_python37')
or sample.startswith('./functions/'))]
NON_GAE_STANDARD_SAMPLES_PY2 = sorted(
list((set(ALL_TESTED_SAMPLES) - set(GAE_STANDARD_SAMPLES)) -
set(PY3_ONLY_SAMPLES)))
NON_GAE_STANDARD_SAMPLES_PY3 = sorted(
list(set(ALL_TESTED_SAMPLES) - set(PY2_ONLY_SAMPLES)))
# Filter sample directories if on a CI like Travis or Circle to only run tests
# for changed samples.
CHANGED_FILES = _get_changed_files()
if CHANGED_FILES is not None:
print('Filtering based on changed files.')
ALL_TESTED_SAMPLES = _filter_samples(
ALL_TESTED_SAMPLES, CHANGED_FILES)
ALL_SAMPLE_DIRECTORIES = _filter_samples(
ALL_SAMPLE_DIRECTORIES, CHANGED_FILES)
GAE_STANDARD_SAMPLES = _filter_samples(
GAE_STANDARD_SAMPLES, CHANGED_FILES)
NON_GAE_STANDARD_SAMPLES_PY2 = _filter_samples(
NON_GAE_STANDARD_SAMPLES_PY2, CHANGED_FILES)
NON_GAE_STANDARD_SAMPLES_PY3 = _filter_samples(
NON_GAE_STANDARD_SAMPLES_PY3, CHANGED_FILES)
def _session_tests(session, sample, post_install=None):
"""Runs py.test for a particular sample."""
session.install('-r', 'testing/requirements.txt')
session.chdir(sample)
if os.path.exists(os.path.join(sample, 'requirements.txt')):
session.install('-r', 'requirements.txt')
if post_install:
post_install(session)
session.run(
'pytest',
*(PYTEST_COMMON_ARGS + session.posargs),
# Pytest will return 5 when no tests are collected. This can happen
# on travis where slow and flaky tests are excluded.
# See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
success_codes=[0, 5])
@nox.parametrize('sample', GAE_STANDARD_SAMPLES)
def session_gae(session, sample):
"""Runs py.test for an App Engine standard sample."""
session.interpreter = 'python2.7'
# Create a lib directory if needed, otherwise the App Engine vendor library
# will complain.
if not os.path.isdir(os.path.join(sample, 'lib')):
os.mkdir(os.path.join(sample, 'lib'))
_session_tests(session, sample, _setup_appengine_sdk)
@nox.parametrize('sample', NON_GAE_STANDARD_SAMPLES_PY2)
def session_py27(session, sample):
"""Runs py.test for a sample using Python 2.7"""
session.interpreter = 'python2.7'
_session_tests(session, sample)
@nox.parametrize('sample', NON_GAE_STANDARD_SAMPLES_PY3)
def session_py36(session, sample):
"""Runs py.test for a sample using Python 3.6"""
session.interpreter = 'python3.6'
_session_tests(session, sample)
@nox.parametrize('sample', ALL_SAMPLE_DIRECTORIES)
def session_lint(session, sample):
"""Runs flake8 on the sample."""
session.install('flake8', 'flake8-import-order')
local_names = _determine_local_import_names(sample)
args = FLAKE8_COMMON_ARGS + [
'--application-import-names', ','.join(local_names),
'.']
session.chdir(sample)
session.run('flake8', *args)
#
# Utility sessions
#
def session_missing_tests(session):
"""Lists all sample directories that do not have tests."""
session.virtualenv = False
print('The following samples do not have tests:')
for sample in set(ALL_SAMPLE_DIRECTORIES) - set(ALL_TESTED_SAMPLES):
print('* {}'.format(sample))
SAMPLES_WITH_GENERATED_READMES = sorted(
list(_collect_dirs('.', suffix='.rst.in')))
@nox.parametrize('sample', SAMPLES_WITH_GENERATED_READMES)
def session_readmegen(session, sample):
"""(Re-)generates the readme for a sample."""
session.install('jinja2', 'pyyaml')
if os.path.exists(os.path.join(sample, 'requirements.txt')):
session.install('-r', os.path.join(sample, 'requirements.txt'))
in_file = os.path.join(sample, 'README.rst.in')
session.run('python', 'scripts/readme-gen/readme_gen.py', in_file)
def session_check_requirements(session):
"""Checks for out of date requirements and optionally updates them.
This is intentionally not parametric, as it's desired to never have two
samples with differing versions of dependencies.
"""
session.install('-r', 'testing/requirements.txt')
if 'update' in session.posargs:
command = 'update-requirements'
else:
command = 'check-requirements'
reqfiles = list(_list_files('.', 'requirements*.txt'))
for reqfile in reqfiles:
session.run('gcp-devrel-py-tools', command, reqfile)
| 32.003356
| 93
| 0.692146
|
65b1fa2c340cfd6c38c993ae9d70d5ac21556d53
| 7,652
|
py
|
Python
|
bin/archipelago-encode-trees.py
|
jeetsukumaran/archipelago
|
2007eb8bdfdc9a7c810af881102a7e3b63d155d3
|
[
"BSD-3-Clause"
] | 1
|
2015-06-29T09:13:32.000Z
|
2015-06-29T09:13:32.000Z
|
bin/archipelago-encode-trees.py
|
jeetsukumaran/archipelago
|
2007eb8bdfdc9a7c810af881102a7e3b63d155d3
|
[
"BSD-3-Clause"
] | null | null | null |
bin/archipelago-encode-trees.py
|
jeetsukumaran/archipelago
|
2007eb8bdfdc9a7c810af881102a7e3b63d155d3
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
import os
import sys
import argparse
try:
from StringIO import StringIO # Python 2 legacy support: StringIO in this module is the one needed (not io)
except ImportError:
from io import StringIO # Python 3
import dendropy
import archipelago
from archipelago import model
def symbols_desc(symbols):
return ", ".join(["'{}'".format(s) for s in symbols])
def validate_and_set_taxon_data(
data,
data_description,
allowed_symbols,
attr_name,
char_separator,
_log):
allowed_symbols = set(allowed_symbols)
for taxon in data:
seq = data[taxon]
seq_symbols = seq.symbols_as_list()
seq_symbol_set = set(seq_symbols)
diff_symbols = seq_symbol_set - allowed_symbols
if diff_symbols:
sys.stderr.write("ERROR: Following symbols are not allowed in {} data:\n {}\nAllowed symbols are:\n {}\n".format(
data_description,
symbols_desc(diff_symbols),
symbols_desc(allowed_symbols)))
setattr(taxon, attr_name, char_separator.join(seq_symbols))
def read_taxon_data(
trees,
data_description,
data_path,
data_format,
_log):
pre_data_taxa = set([t.label for t in trees.taxon_namespace])
pre_data_taxa_desc = ",".join(["'{}'".format(t) for t in pre_data_taxa])
data_path = os.path.expanduser(os.path.expandvars(data_path))
data = dendropy.StandardCharacterMatrix.get(
path=data_path,
schema=data_format,
taxon_namespace=trees.taxon_namespace)
post_data_taxa = set([t.label for t in trees.taxon_namespace])
if pre_data_taxa != post_data_taxa:
diff_taxa = post_data_taxa - pre_data_taxa
diff_taxa_desc = ",".join(["'{}'".format(t) for t in diff_taxa])
sys.stderr.write("ERROR: Following taxon names defined in {} data:\n {}\nbut not in phylogeny:\n {}\n".format(
data_description,
diff_taxa_desc,
pre_data_taxa_desc))
sys.exit(1)
seq_length = None
num_seqs = 0
for taxon in data:
num_seqs += 1
pre_data_taxa.remove(taxon.label)
seq = data[taxon]
if seq_length is None:
seq_length = len(seq)
elif seq_length != len(seq):
sys.stderr.write("Expecting sequence length of {} for {} data, but found {} for taxon '{}'\n".format(
seq_length,
data_description,
len(seq),
taxon.label))
if pre_data_taxa:
diff_taxa_desc = ",".join(["'{}'".format(t) for t in pre_data_taxa])
sys.stderr.write("ERROR: Following taxon names defined in trees but not in {} data:\n {}\n".format(
data_description,
diff_taxa_desc,
))
sys.exit(1)
_log("{} characters read for {} taxa for {} data from: {}".format(
seq_length,
num_seqs,
data_description,
data_path))
return data
def main():
parser = argparse.ArgumentParser(
description="{} Data Encoder".format(archipelago.description())
)
parser.add_argument("-p", "--phylogeny",
metavar="PHYLOGENY-FILE",
help="Path to file defining the phylogeny.")
parser.add_argument("-g", "--geography",
metavar="GEOGRAPHY-FILE",
help="Path to file defining the geography.")
parser.add_argument("-t", "--traits",
metavar="TRAITS-FILE",
help="Path to file defining the traits.")
parser.add_argument("-P", "--phylogeny-format",
choices=("newick", "nexus"),
default="newick",
help="Format of the phylogeny file (default: '%(default)s').")
parser.add_argument("--preserve-underscores",
action="store_true",
default=False,
help="Do not convert unquoted underscores to blanks/spaces in labels of phylogeny.")
parser.add_argument("-G", "--geography-format",
choices=("phylip", "nexus",),
default="phylip",
help="Format of the geography file (default: '%(default)s').")
parser.add_argument("-T", "--traits-format",
choices=("phylip", "nexus",),
default="phylip",
help="Format of the traits file (default: '%(default)s').")
parser.add_argument("-O", "--output-format",
choices=("newick", "nexus"),
default="newick",
help="Format of the output data (default: '%(default)s').")
parser.add_argument("-q", "--quiet",
action="store_true",
default=False,
help="Run in silent mode.")
args = parser.parse_args()
if args.phylogeny is None:
sys.exit("Must specify path to phylogeny file using '-p'/'--phylogeny' option.")
if args.geography is None:
sys.exit("Must specify path to geography file using '-g'/'--geography' option.")
# if args.traits is None:
# sys.exit("Must specify path to traits file using '-t'/'--traits' option.")
if args.quiet:
_log = lambda x: None
else:
_log = lambda x: sys.stderr.write("-archipelago- {}\n".format(x))
taxon_namespace = dendropy.TaxonNamespace()
trees_path = os.path.expanduser(os.path.expandvars(args.phylogeny))
trees = dendropy.TreeList.get(
path=trees_path,
schema=args.phylogeny_format,
taxon_namespace=taxon_namespace,
preserve_underscores=args.preserve_underscores,
)
_log("{} tree(s), {} taxa read from: '{}'".format(len(trees), len(taxon_namespace), trees_path))
geography_path = os.path.expanduser(os.path.expandvars(args.geography))
geography_data = read_taxon_data(
trees=trees,
data_description="geography",
data_path=geography_path,
data_format=args.geography_format,
_log=_log,
)
validate_and_set_taxon_data(
data=geography_data,
data_description="geography",
allowed_symbols=("0", "1"),
attr_name="geography_data_string",
char_separator="",
_log=_log)
if args.traits is not None:
traits_path = os.path.expanduser(os.path.expandvars(args.traits))
traits_data = read_taxon_data(
trees=trees,
data_description="traits",
data_path=traits_path,
data_format=args.traits_format,
_log=_log,
)
validate_and_set_taxon_data(
data=traits_data,
data_description="traits",
allowed_symbols="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
attr_name="traits_data_string",
char_separator=model.Lineage._TRAITS_SEPARATOR,
_log=_log)
else:
_log("No traits data specified")
for taxon in trees.taxon_namespace:
taxon.traits_data_string = model.Lineage._NULL_TRAITS
for taxon in trees.taxon_namespace:
taxon.old_label = taxon.label
taxon.label = "{idx}{sep}{traits}{sep}{areas}".format(
idx=taxon.label,
sep=model.Lineage._LABEL_COMPONENTS_SEPARATOR,
traits=taxon.traits_data_string,
areas=taxon.geography_data_string,
)
out = sys.stdout
trees.write(
file=out,
schema=args.output_format,
unquoted_underscores=args.preserve_underscores)
if __name__ == "__main__":
main()
| 37.881188
| 131
| 0.59919
|
7fdbdf636badac9e78a8a5890f62b3e9a01cd1f4
| 2,256
|
py
|
Python
|
journalpdfscraper/BMJSoupScraper.py
|
PeterMorrison1/JournalPDFScraper
|
ca30112653da9a53c9be5dc742e1409d94f71708
|
[
"MIT"
] | null | null | null |
journalpdfscraper/BMJSoupScraper.py
|
PeterMorrison1/JournalPDFScraper
|
ca30112653da9a53c9be5dc742e1409d94f71708
|
[
"MIT"
] | null | null | null |
journalpdfscraper/BMJSoupScraper.py
|
PeterMorrison1/JournalPDFScraper
|
ca30112653da9a53c9be5dc742e1409d94f71708
|
[
"MIT"
] | null | null | null |
from selenium.webdriver.common.keys import Keys
from journalpdfscraper.BaseSoup import BaseSoup
class BMJSoupScraper(BaseSoup):
def __init__(self, url):
super().__init__(url)
def __get_article_button_element__(self, class_string='icon-open-access'):
"""Finds the element on the page for the button to click to get the pdf
Returns:
element: None if no element found / timeout or return driver element
"""
element = super().__find_element_by_class__(class_string)
return element
def can_parse_url(self, url):
"""Determines if the url can be parsed by the specific scraper
Args:
url (string): the url to scraped
Returns:
element: None if no element found / timeout or return driver element
"""
super().__launch_journal_page__(url)
element = self.__get_article_button_element__()
if element is not None:
return element
else:
return None
def find_pdf_url(self, url):
"""Will find the free PDF url and return it
Args:
url (string): the url to scrape
Returns:
String: None if the article is pay-walled or if the url is invalid or if the scraper is outdated, otherwise return pdf url
"""
super().__launch_journal_page__(url)
element = self.__get_article_button_element__(class_string='article-pdf-download')
if element is not None:
return super().__get_pdf_url__(element, url, '/')
else:
return None
def find_journal_url(self, url):
"""Will find the free journal url and return it. Otherwise returns None. This is different than the pdf as it does not send the PDF page, only check if it is free.
Args:
url (string): the url of the journal page
Returns:
String: None if the article is pay-walled or if the url is invalid or if the scraper is outdated, otherwise return pdf url
"""
super().__launch_journal_page__(url)
element = self.__get_article_button_element__()
if element is not None:
return url
else:
return None
| 32.695652
| 171
| 0.630762
|
4a9977bc5abf7fb9e9471545f55fc07bb6feb6b4
| 3,979
|
py
|
Python
|
brat_multitask/iterators/balanced_bucket_iterator.py
|
jzbjyb/cmu-multinlp
|
c418aa4772f3266b6d2b2c5f02589f39df22a6d0
|
[
"BSD-3-Clause"
] | 80
|
2020-05-03T01:51:25.000Z
|
2022-03-27T03:41:18.000Z
|
brat_multitask/iterators/balanced_bucket_iterator.py
|
jzbjyb/cmu-multinlp
|
c418aa4772f3266b6d2b2c5f02589f39df22a6d0
|
[
"BSD-3-Clause"
] | 9
|
2020-05-06T22:11:07.000Z
|
2022-03-30T14:37:54.000Z
|
brat_multitask/iterators/balanced_bucket_iterator.py
|
jzbjyb/cmu-multinlp
|
c418aa4772f3266b6d2b2c5f02589f39df22a6d0
|
[
"BSD-3-Clause"
] | 9
|
2020-05-06T21:08:57.000Z
|
2021-09-28T09:30:54.000Z
|
import logging
from typing import List, Tuple, Iterable, Dict
from collections import defaultdict
from overrides import overrides
from allennlp.common.util import lazy_groups_of
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.data.iterators.bucket_iterator import sort_by_padding
from .filter_bucket_iterator import FilterBucketIterator
import numpy as np
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def split_by_task(instance_li: List[Instance], task_namespace: str) -> Dict[str, List[Instance]]:
result = {}
for inst in instance_li:
task = inst[task_namespace].label
if task not in result:
result[task] = []
result[task].append(inst)
return result
def interleave_by_task(inst_li_by_task: Dict[str, List[Instance]], num_per_task: Dict[str, int]):
task_len_li = [(k, len(inst_li_by_task[k])) for k in inst_li_by_task]
num_inst = np.sum([l[1] for l in task_len_li])
ideal_dist = np.array([l[1] / num_inst for l in task_len_li])
task_ind = np.zeros_like(ideal_dist, dtype=int)
result = []
total = 0
while total < num_inst:
task = np.argmax(ideal_dist - task_ind / (np.sum(task_ind) + 1e-5))
task_name = task_len_li[task][0]
li = inst_li_by_task[task_name][task_ind[task]:task_ind[task] + num_per_task[task_name]]
result.extend(li)
task_ind[task] += len(li)
total += len(li)
for i, tl in enumerate(task_ind):
assert len(inst_li_by_task[task_len_li[i][0]]) == tl, 'task interleave failure'
return result
@DataIterator.register('balanced_bucket')
class BalancedBucketIterator(FilterBucketIterator):
def __init__(self,
sorting_keys: List[Tuple[str, str]],
task_namespace: str = 'task_labels',
# number of samples to draw successively for each task during interleaving
num_interleave_per_task: Dict[str, int] = None,
padding_noise: float = 0.1,
biggest_batch_first: bool = False,
batch_size: int = 32,
instances_per_epoch: int = None,
max_instances_in_memory: int = None,
cache_instances: bool = False,
track_epoch: bool = False,
maximum_samples_per_batch: Tuple[str, int] = None) -> None:
'''
Use with multitask learning to make the number of samples
from different tasks balanced in a batch.
'''
super().__init__(sorting_keys,
padding_noise=padding_noise,
biggest_batch_first=biggest_batch_first,
batch_size=batch_size,
instances_per_epoch=instances_per_epoch,
max_instances_in_memory=max_instances_in_memory,
cache_instances=cache_instances,
track_epoch=track_epoch,
maximum_samples_per_batch=maximum_samples_per_batch)
self._task_namespace = task_namespace
if num_interleave_per_task is None:
num_interleave_per_task = defaultdict(lambda: 1)
self._num_interleave_per_task = num_interleave_per_task
@overrides
def _instance_list_to_batch(self, instances: List[Instance]) -> Iterable[List[Instance]]:
# split instances by tasks
inst_li_by_task = split_by_task(instances, task_namespace=self._task_namespace)
inst_li_by_task = dict((k, sort_by_padding(
inst_li_by_task[k], self._sorting_keys, self.vocab, self._padding_noise)) for k in inst_li_by_task)
# interleave instances from different tasks uniformly
instance_list = interleave_by_task(inst_li_by_task, self._num_interleave_per_task)
# create batches
yield from lazy_groups_of(iter(instance_list), self._batch_size)
| 42.329787
| 111
| 0.661975
|
7f592a7a4c08772f17e05594f6d76c5297dd81c4
| 890
|
py
|
Python
|
rolemanagement/__init__.py
|
emshov74/SinbadCogs
|
ebf7b44436a8db877151836c6fcb394445fb3c5b
|
[
"Apache-2.0"
] | 78
|
2017-08-08T04:36:11.000Z
|
2022-01-16T19:44:28.000Z
|
rolemanagement/__init__.py
|
emshov74/SinbadCogs
|
ebf7b44436a8db877151836c6fcb394445fb3c5b
|
[
"Apache-2.0"
] | 193
|
2017-03-22T06:43:48.000Z
|
2021-08-05T17:58:23.000Z
|
rolemanagement/__init__.py
|
emshov74/SinbadCogs
|
ebf7b44436a8db877151836c6fcb394445fb3c5b
|
[
"Apache-2.0"
] | 101
|
2017-03-23T07:34:21.000Z
|
2022-03-14T09:06:29.000Z
|
# Copyright 2017-present Michael Hall
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .core import RoleManagement
__red_end_user_data_statement__ = (
"This cog does not persistently store end user data. "
"This cog does store discord IDs as needed for operation."
)
async def setup(bot):
cog = RoleManagement(bot)
bot.add_cog(cog)
cog.init()
| 31.785714
| 76
| 0.730337
|
ce33764b5eef55cfa114cbe004e42f8c6afb595d
| 5,182
|
py
|
Python
|
model.py
|
JagjitBhatia-UIC/3D-ResNets-PyTorch
|
d02078b5042cbfed7c81fae711901d019fe97c7d
|
[
"MIT"
] | null | null | null |
model.py
|
JagjitBhatia-UIC/3D-ResNets-PyTorch
|
d02078b5042cbfed7c81fae711901d019fe97c7d
|
[
"MIT"
] | null | null | null |
model.py
|
JagjitBhatia-UIC/3D-ResNets-PyTorch
|
d02078b5042cbfed7c81fae711901d019fe97c7d
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from models import resnet, resnet2p1d, pre_act_resnet, wide_resnet, resnext, densenet
def get_module_name(name):
name = name.split('.')
if name[0] == 'module':
i = 1
else:
i = 0
if name[i] == 'features':
i += 1
return name[i]
def get_fine_tuning_parameters(model, ft_begin_module):
if not ft_begin_module:
return model.parameters()
parameters = []
add_flag = False
for k, v in model.named_parameters():
if ft_begin_module == get_module_name(k):
add_flag = True
if add_flag:
parameters.append({'params': v})
return parameters
def generate_model(opt):
assert opt.model in [
'resnet', 'resnet2p1d', 'preresnet', 'wideresnet', 'resnext', 'densenet'
]
if opt.model == 'resnet':
model = resnet.generate_model(model_depth=opt.model_depth,
n_classes=opt.n_classes,
n_input_channels=opt.n_input_channels,
shortcut_type=opt.resnet_shortcut,
conv1_t_size=opt.conv1_t_size,
conv1_t_stride=opt.conv1_t_stride,
no_max_pool=opt.no_max_pool,
widen_factor=opt.resnet_widen_factor)
elif opt.model == 'resnet2p1d':
model = resnet2p1d.generate_model(model_depth=opt.model_depth,
n_classes=opt.n_classes,
n_input_channels=opt.n_input_channels,
shortcut_type=opt.resnet_shortcut,
conv1_t_size=opt.conv1_t_size,
conv1_t_stride=opt.conv1_t_stride,
no_max_pool=opt.no_max_pool,
widen_factor=opt.resnet_widen_factor)
elif opt.model == 'wideresnet':
model = wide_resnet.generate_model(
model_depth=opt.model_depth,
k=opt.wide_resnet_k,
n_classes=opt.n_classes,
n_input_channels=opt.n_input_channels,
shortcut_type=opt.resnet_shortcut,
conv1_t_size=opt.conv1_t_size,
conv1_t_stride=opt.conv1_t_stride,
no_max_pool=opt.no_max_pool)
elif opt.model == 'resnext':
model = resnext.generate_model(model_depth=opt.model_depth,
cardinality=opt.resnext_cardinality,
n_classes=opt.n_classes,
n_input_channels=opt.n_input_channels,
shortcut_type=opt.resnet_shortcut,
conv1_t_size=opt.conv1_t_size,
conv1_t_stride=opt.conv1_t_stride,
no_max_pool=opt.no_max_pool)
elif opt.model == 'preresnet':
model = pre_act_resnet.generate_model(
model_depth=opt.model_depth,
n_classes=opt.n_classes,
n_input_channels=opt.n_input_channels,
shortcut_type=opt.resnet_shortcut,
conv1_t_size=opt.conv1_t_size,
conv1_t_stride=opt.conv1_t_stride,
no_max_pool=opt.no_max_pool)
elif opt.model == 'densenet':
model = densenet.generate_model(model_depth=opt.model_depth,
n_classes=opt.n_classes,
n_input_channels=opt.n_input_channels,
conv1_t_size=opt.conv1_t_size,
conv1_t_stride=opt.conv1_t_stride,
no_max_pool=opt.no_max_pool)
return model
def load_pretrained_model(model, pretrain_path, model_name, n_finetune_classes):
if pretrain_path:
print('loading pretrained model {}'.format(pretrain_path))
pretrain = torch.load(pretrain_path, map_location='cpu')
model.load_state_dict(pretrain['state_dict'], strict=False)
tmp_model = model
if model_name == 'densenet':
tmp_model.classifier = nn.Linear(tmp_model.classifier.in_features,
n_finetune_classes)
else:
tmp_model.fc = nn.Linear(tmp_model.fc.in_features,
n_finetune_classes)
return model
def make_data_parallel(model, is_distributed, device):
if is_distributed:
if device.type == 'cuda' and device.index is not None:
torch.cuda.set_device(device)
model.to(device)
model = nn.parallel.DistributedDataParallel(model,
device_ids=[device])
else:
model.to(device)
model = nn.parallel.DistributedDataParallel(model)
elif device.type == 'cuda':
model = nn.DataParallel(model, device_ids=None).cuda()
return model
| 40.170543
| 85
| 0.540332
|
ca34a5a5a848fce080d6c64295dfe1a6dc663cf7
| 1,968
|
py
|
Python
|
skl2onnx/operator_converters/__init__.py
|
PossieMP/https-github.com-onnx-sklearn-onnx
|
48c60398d38e6937897d7c1506a8dcfcf28830a2
|
[
"MIT"
] | 1
|
2019-05-06T20:54:02.000Z
|
2019-05-06T20:54:02.000Z
|
skl2onnx/operator_converters/__init__.py
|
PossieMP/https-github.com-onnx-sklearn-onnx
|
48c60398d38e6937897d7c1506a8dcfcf28830a2
|
[
"MIT"
] | null | null | null |
skl2onnx/operator_converters/__init__.py
|
PossieMP/https-github.com-onnx-sklearn-onnx
|
48c60398d38e6937897d7c1506a8dcfcf28830a2
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# To register a converter for scikit-learn operators,
# import associated modules here.
from . import AdaBoost
from . import ArrayFeatureExtractor
from . import Binarizer
from . import CalibratedClassifierCV
from . import Concat
from . import DecisionTree
from . import DictVectorizer
from . import FeatureSelection
from . import Flatten
from . import FunctionTransformer
from . import GradientBoosting
from . import Imputer
from . import KBinsDiscretiser
from . import KMeans
from . import KNN
from . import LabelBinariser
from . import LabelEncoder
from . import LinearClassifier
from . import LinearRegressor
from . import multilayer_perceptron
from . import NaiveBayes
from . import Normalizer
from . import OneHotEncoder
from . import OneVsRestClassifier
from . import PolynomialFeatures
from . import RandomForest
from . import Scaler
from . import sgd_classifier
from . import SVD
from . import SVM
from . import TextVectorizer
from . import TfIdfTransformer
from . import VotingClassifier
from . import ZipMap
__all__ = [
AdaBoost,
ArrayFeatureExtractor,
Binarizer,
CalibratedClassifierCV,
Concat,
DecisionTree,
DictVectorizer,
FeatureSelection,
Flatten,
FunctionTransformer,
GradientBoosting,
Imputer,
KBinsDiscretiser,
KMeans,
KNN,
LabelBinariser,
LabelEncoder,
LinearClassifier,
LinearRegressor,
multilayer_perceptron,
NaiveBayes,
Normalizer,
OneHotEncoder,
OneVsRestClassifier,
PolynomialFeatures,
RandomForest,
Scaler,
sgd_classifier,
SVD,
SVM,
TextVectorizer,
TfIdfTransformer,
VotingClassifier,
ZipMap,
]
| 24.6
| 76
| 0.70122
|
018c90018c6cd4beb5914c060d82b3289e2407ac
| 1,323
|
py
|
Python
|
extensions/interactions/NumberWithUnits/NumberWithUnits.py
|
Tim810306/oppia
|
6f90044d12dbe0979c999265cbe46f267c4c592d
|
[
"Apache-2.0"
] | 4
|
2021-09-16T16:46:53.000Z
|
2022-02-06T13:00:14.000Z
|
extensions/interactions/NumberWithUnits/NumberWithUnits.py
|
Tim810306/oppia
|
6f90044d12dbe0979c999265cbe46f267c4c592d
|
[
"Apache-2.0"
] | 80
|
2020-10-31T09:14:46.000Z
|
2021-01-12T23:38:15.000Z
|
extensions/interactions/NumberWithUnits/NumberWithUnits.py
|
Tim810306/oppia
|
6f90044d12dbe0979c999265cbe46f267c4c592d
|
[
"Apache-2.0"
] | 1
|
2020-12-09T21:33:49.000Z
|
2020-12-09T21:33:49.000Z
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python configuration for NumberWithUnits interaction."""
from __future__ import absolute_import
from __future__ import unicode_literals
from extensions.interactions import base
class NumberWithUnits(base.BaseInteraction):
"""Interaction for number with units."""
name = 'Number With Units'
description = 'Allows learners to enter number with units.'
display_mode = base.DISPLAY_MODE_INLINE
is_trainable = False
_dependency_ids = []
answer_type = 'NumberWithUnits'
instructions = None
narrow_instructions = None
needs_summary = False
can_have_solution = True
show_generic_submit_button = True
_customization_arg_specs = []
_answer_visualization_specs = []
| 33.923077
| 74
| 0.756614
|
c71e14d36de50954ab36ecd9a9a16f246a464a7f
| 340
|
py
|
Python
|
ctci/Chapter-1/String_Compression.py
|
atultherajput/playwithpython
|
7980a750a18b75728a96d42fe9d216a7a79334b6
|
[
"MIT"
] | null | null | null |
ctci/Chapter-1/String_Compression.py
|
atultherajput/playwithpython
|
7980a750a18b75728a96d42fe9d216a7a79334b6
|
[
"MIT"
] | null | null | null |
ctci/Chapter-1/String_Compression.py
|
atultherajput/playwithpython
|
7980a750a18b75728a96d42fe9d216a7a79334b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
def StringCompression(string):
'''Count repeated no. of characters'''
count = 0
temp = string[0]
new_string = ''
for x in string+' ':
if temp == x:
count +=1
continue
new_string = new_string+temp+str(count)
temp = x
count = 1
print(new_string)
StringCompression("aabcccccaaa") #Output: a2b1c5a3
| 20
| 51
| 0.673529
|
82ce0563ecffd661e3ebea404b2e55533913731d
| 2,802
|
py
|
Python
|
python/oneflow/test/modules/test_empty.py
|
grybd/oneflow
|
82237ad096a10527591660c09b61444c42917e69
|
[
"Apache-2.0"
] | 3,285
|
2020-07-31T05:51:22.000Z
|
2022-03-31T15:20:16.000Z
|
python/oneflow/test/modules/test_empty.py
|
grybd/oneflow
|
82237ad096a10527591660c09b61444c42917e69
|
[
"Apache-2.0"
] | 2,417
|
2020-07-31T06:28:58.000Z
|
2022-03-31T23:04:14.000Z
|
python/oneflow/test/modules/test_empty.py
|
grybd/oneflow
|
82237ad096a10527591660c09b61444c42917e69
|
[
"Apache-2.0"
] | 520
|
2020-07-31T05:52:42.000Z
|
2022-03-29T02:38:11.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import oneflow as flow
from test_util import GenArgDict
def _test_local_empty(test_case, shape, dtype, device, requires_grad):
x = flow.empty(
shape,
dtype=dtype,
device=flow.device(device),
requires_grad=requires_grad if dtype == flow.float32 else False,
)
test_case.assertFalse(x.is_consistent)
test_case.assertEqual(x.shape, flow.Size(shape))
test_case.assertEqual(x.dtype, dtype)
test_case.assertEqual(x.device, flow.device(device))
if dtype == flow.float32:
test_case.assertEqual(x.requires_grad, requires_grad)
def _test_consistent_empty(test_case, shape, dtype, placement, sbp, requires_grad):
placement = flow.placement(placement, {0: [0]})
x = flow.empty(
shape,
dtype=dtype,
placement=placement,
sbp=sbp,
requires_grad=requires_grad if dtype == flow.float32 else False,
)
test_case.assertTrue(x.is_consistent)
test_case.assertEqual(x.shape, flow.Size(shape))
test_case.assertEqual(x.dtype, dtype)
test_case.assertEqual(x.placement, placement)
test_case.assertEqual(x.sbp[0], sbp)
if dtype == flow.float32:
test_case.assertEqual(x.requires_grad, requires_grad)
@flow.unittest.skip_unless_1n1d()
class TestEmptyOp(flow.unittest.TestCase):
def test_local_empty(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 3, 4, 5)]
arg_dict["dtype"] = [flow.float32, flow.int32]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["requires_grad"] = [True, False]
for arg in GenArgDict(arg_dict):
_test_local_empty(test_case, **arg)
def test_consistent_empty(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 3, 4, 5)]
arg_dict["dtype"] = [flow.float32, flow.int32]
arg_dict["placement"] = ["cpu", "cuda"]
arg_dict["sbp"] = [flow.sbp.broadcast]
arg_dict["requires_grad"] = [True, False]
for arg in GenArgDict(arg_dict):
_test_consistent_empty(test_case, **arg)
if __name__ == "__main__":
unittest.main()
| 34.170732
| 83
| 0.683797
|
85489c964a35638be0b4f7e9398e0af56c941617
| 2,559
|
py
|
Python
|
hmkit/autoapi/properties/value/charging/departure_time.py
|
highmobility/hmkit-python
|
2ac06ed021b57014f5290eaece19a9399d52df48
|
[
"MIT"
] | 1
|
2021-08-01T20:35:57.000Z
|
2021-08-01T20:35:57.000Z
|
hmkit/autoapi/properties/value/charging/departure_time.py
|
highmobility/hmkit-python
|
2ac06ed021b57014f5290eaece19a9399d52df48
|
[
"MIT"
] | null | null | null |
hmkit/autoapi/properties/value/charging/departure_time.py
|
highmobility/hmkit-python
|
2ac06ed021b57014f5290eaece19a9399d52df48
|
[
"MIT"
] | null | null | null |
"""
The MIT License
Copyright (c) 2014- High-Mobility GmbH (https://high-mobility.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import datetime
import logging
from . import *
from ... import propertyvalue_object
log = logging.getLogger('hmkit.autoapi')
class DepartureTime(propertyvalue_object.PropertyValueObject):
"""
DepartureTime property which includes ActiveState(bool) and Time(hour and min)
"""
def __init__(self, propbytes):
"""
Property bytes will be parsed to get internal data values
:param bytearray propbytes: Property Component Bytes
:rtype: None
"""
if propbytes is not None:
log.debug("DepartureTime property " + " bytes Len:" + str(len(propbytes)) + " bytes: " + str(propbytes))
super().__init__(propbytes)
self.active_state = bool(propbytes[0])
hour = propbytes[1]
minutes = propbytes[2]
self.datetime_time = datetime.time(hour, minutes)
log.debug("DepartureTime property " + "Active_State: " + str(self.active_state) + " ,time: " + str(self.datetime_time))
return
# need to handle construct with a methods instead of _init_
def get_active_state(self):
"""
returns active state
:return: active_state
:rtype: bool
"""
return self.active_state
def get_time(self):
"""
returns time hour and mins
:return: time - hour and mins
:rtype: datetime.time
"""
return self.datetime_time
| 35.054795
| 132
| 0.69324
|
d28d7716d2116d5751ae02bc5320d002b8f0475c
| 3,473
|
py
|
Python
|
pipeline/engine/migrations/0018_auto_20190729_1041.py
|
sdgdsffdsfff/bk-sops-tencent
|
e8aff91f822e79031e12b0f66943830f44ced506
|
[
"Apache-2.0"
] | 1
|
2020-10-13T03:04:53.000Z
|
2020-10-13T03:04:53.000Z
|
pipeline/engine/migrations/0018_auto_20190729_1041.py
|
sdgdsffdsfff/bk-sops-tencent
|
e8aff91f822e79031e12b0f66943830f44ced506
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:46:54.000Z
|
2021-06-10T22:54:45.000Z
|
pipeline/engine/migrations/0018_auto_20190729_1041.py
|
sdgdsffdsfff/bk-sops-tencent
|
e8aff91f822e79031e12b0f66943830f44ced506
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 1.11.11 on 2019-07-29 02:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('engine', '0017_auto_20190719_1010'),
]
operations = [
migrations.AlterField(
model_name='history',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='history',
name='data',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE,
to='engine.HistoryData'),
),
migrations.AlterField(
model_name='loopactivityhistory',
name='data',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE,
to='engine.HistoryData'),
),
migrations.AlterField(
model_name='historydata',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='nodecelerytask',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='noderelationship',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='processcelerytask',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='pipelineprocess',
name='snapshot',
field=models.ForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.CASCADE,
to='engine.ProcessSnapshot'),
),
migrations.AlterField(
model_name='processsnapshot',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='schedulecelerytask',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='subprocessrelationship',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 40.383721
| 115
| 0.630579
|
5044bdb2f985f9398420568396ab79707e04ae31
| 10,879
|
py
|
Python
|
devstack/components/db.py
|
hagleitn/Openstack-Devstack2
|
88d3effc70c6479bba276856285dcb3974d76261
|
[
"Apache-2.0"
] | 1
|
2015-02-21T05:30:46.000Z
|
2015-02-21T05:30:46.000Z
|
devstack/components/db.py
|
hagleitn/Openstack-Devstack2
|
88d3effc70c6479bba276856285dcb3974d76261
|
[
"Apache-2.0"
] | null | null | null |
devstack/components/db.py
|
hagleitn/Openstack-Devstack2
|
88d3effc70c6479bba276856285dcb3974d76261
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devstack import component as comp
from devstack import exceptions as excp
from devstack import log as logging
from devstack import shell as sh
from devstack import utils
import abc
LOG = logging.getLogger("devstack.components.db")
# Need to reset pw to blank since this distributions don't seem to
# always reset it when u uninstall the db
RESET_BASE_PW = ''
# Links about how to reset if we fail to set the PW
SQL_RESET_PW_LINKS = [
'https://help.ubuntu.com/community/MysqlPasswordReset',
'http://dev.mysql.com/doc/refman/5.0/en/resetting-permissions.html',
]
# Used as a generic error message
BASE_ERROR = 'Currently we do not know how to [%s] for database type [%s]'
# PW keys we warm up so u won't be prompted later
PASSWORD_PROMPT = 'the database user'
WARMUP_PWS = [('sql', PASSWORD_PROMPT)]
class DBUninstaller(comp.PkgUninstallComponent):
def __init__(self, *args, **kargs):
comp.PkgUninstallComponent.__init__(self, *args, **kargs)
self.runtime = DBRuntime(*args, **kargs)
def warm_configs(self):
for key, prompt in WARMUP_PWS:
self.pw_gen.get_password(key, prompt)
def pre_uninstall(self):
dbtype = self.cfg.get("db", "type")
dbactions = self.distro.get_command(dbtype, quiet=True)
try:
if dbactions:
LOG.info(("Attempting to reset your db password to \"%s\" so"
" that we can set it the next time you install.") % (RESET_BASE_PW))
pwd_cmd = dbactions.get('set_pwd')
if pwd_cmd:
LOG.info("Ensuring your database is started before we operate on it.")
self.runtime.restart()
params = {
'OLD_PASSWORD': self.pw_gen.get_password('sql', PASSWORD_PROMPT),
'NEW_PASSWORD': RESET_BASE_PW,
'USER': self.cfg.getdefaulted("db", "sql_user", 'root'),
}
cmds = [{'cmd': pwd_cmd}]
utils.execute_template(*cmds, params=params, shell=True)
except IOError:
LOG.warn(("Could not reset the database password. You might have to manually "
"reset the password to \"%s\" before the next install") % (RESET_BASE_PW))
LOG.info("To aid in this check out: [%s]", " or ".join(SQL_RESET_PW_LINKS))
class DBInstaller(comp.PkgInstallComponent):
__meta__ = abc.ABCMeta
def __init__(self, *args, **kargs):
comp.PkgInstallComponent.__init__(self, *args, **kargs)
self.runtime = DBRuntime(*args, **kargs)
def _get_param_map(self, config_fn):
# This dictionary will be used for parameter replacement
# In pre-install and post-install sections
host_ip = self.cfg.get('host', 'ip')
out = {
'PASSWORD': self.pw_gen.get_password("sql", PASSWORD_PROMPT),
'BOOT_START': ("%s" % (True)).lower(),
'USER': self.cfg.getdefaulted("db", "sql_user", 'root'),
'SERVICE_HOST': host_ip,
'HOST_IP': host_ip
}
return out
def warm_configs(self):
for key, prompt in WARMUP_PWS:
self.pw_gen.get_password(key, prompt)
@abc.abstractmethod
def _configure_db_confs(self):
pass
def post_install(self):
comp.PkgInstallComponent.post_install(self)
# Fix up the db configs
self._configure_db_confs()
# Extra actions to ensure we are granted access
dbtype = self.cfg.get("db", "type")
dbactions = self.distro.get_command(dbtype, quiet=True)
# Set your password
try:
if dbactions:
pwd_cmd = dbactions.get('set_pwd')
if pwd_cmd:
LOG.info(("Attempting to set your db password"
" just incase it wasn't set previously."))
LOG.info("Ensuring your database is started before we operate on it.")
self.runtime.restart()
params = {
'NEW_PASSWORD': self.pw_gen.get_password("sql", PASSWORD_PROMPT),
'USER': self.cfg.getdefaulted("db", "sql_user", 'root'),
'OLD_PASSWORD': RESET_BASE_PW,
}
cmds = [{'cmd': pwd_cmd}]
utils.execute_template(*cmds, params=params, shell=True)
except IOError:
LOG.warn(("Couldn't set your db password. It might have already been "
"set by a previous process."))
# Ensure access granted
if dbactions:
grant_cmd = dbactions.get('grant_all')
if grant_cmd:
user = self.cfg.getdefaulted("db", "sql_user", 'root')
LOG.info("Updating the DB to give user '%s' full control of all databases." % (user))
LOG.info("Ensuring your database is started before we operate on it.")
self.runtime.restart()
params = {
'PASSWORD': self.pw_gen.get_password("sql", PASSWORD_PROMPT),
'USER': user,
}
cmds = [{'cmd': grant_cmd}]
# Shell seems to be needed here
# since python escapes this to much...
utils.execute_template(*cmds, params=params, shell=True)
class DBRuntime(comp.EmptyRuntime):
def __init__(self, *args, **kargs):
comp.EmptyRuntime.__init__(self, *args, **kargs)
self.wait_time = max(self.cfg.getint('default', 'service_wait_seconds'), 1)
def _get_run_actions(self, act, exception_cls):
dbtype = self.cfg.get("db", "type")
distro_options = self.distro.get_command(dbtype)
if distro_options is None:
msg = BASE_ERROR % (act, dbtype)
raise NotImplementedError(msg)
return distro_options.get(act)
def start(self):
if self.status() != comp.STATUS_STARTED:
startcmd = self._get_run_actions('start', excp.StartException)
sh.execute(*startcmd,
run_as_root=True,
check_exit_code=True)
LOG.info("Please wait %s seconds while it starts up." % self.wait_time)
sh.sleep(self.wait_time)
return 1
else:
return 0
def stop(self):
if self.status() != comp.STATUS_STOPPED:
stopcmd = self._get_run_actions('stop', excp.StopException)
sh.execute(*stopcmd,
run_as_root=True,
check_exit_code=True)
return 1
else:
return 0
def restart(self):
LOG.info("Restarting your database.")
restartcmd = self._get_run_actions('restart', excp.RestartException)
sh.execute(*restartcmd,
run_as_root=True,
check_exit_code=True)
LOG.info("Please wait %s seconds while it restarts." % self.wait_time)
sh.sleep(self.wait_time)
return 1
def status(self):
statuscmd = self._get_run_actions('status', excp.StatusException)
run_result = sh.execute(*statuscmd,
run_as_root=True,
check_exit_code=False)
if not run_result:
return comp.STATUS_UNKNOWN
(sysout, stderr) = run_result
combined = str(sysout) + str(stderr)
combined = combined.lower()
if combined.find("running") != -1:
return comp.STATUS_STARTED
elif combined.find("stop") != -1 or \
combined.find('unrecognized') != -1:
return comp.STATUS_STOPPED
else:
return comp.STATUS_UNKNOWN
def drop_db(cfg, pw_gen, distro, dbname):
dbtype = cfg.get("db", "type")
dbactions = distro.get_command(dbtype)
if dbactions and dbactions.get('drop_db'):
dropcmd = dbactions.get('drop_db')
params = dict()
params['PASSWORD'] = pw_gen.get_password("sql", PASSWORD_PROMPT)
params['USER'] = cfg.getdefaulted("db", "sql_user", 'root')
params['DB'] = dbname
cmds = list()
cmds.append({
'cmd': dropcmd,
'run_as_root': False,
})
utils.execute_template(*cmds, params=params)
else:
msg = BASE_ERROR % ('drop', dbtype)
raise NotImplementedError(msg)
def create_db(cfg, pw_gen, distro, dbname):
dbtype = cfg.get("db", "type")
dbactions = distro.get_command(dbtype)
if dbactions and dbactions.get('create_db'):
createcmd = dbactions.get('create_db')
params = dict()
params['PASSWORD'] = pw_gen.get_password("sql", PASSWORD_PROMPT)
params['USER'] = cfg.getdefaulted("db", "sql_user", 'root')
params['DB'] = dbname
cmds = list()
cmds.append({
'cmd': createcmd,
'run_as_root': False,
})
utils.execute_template(*cmds, params=params)
else:
msg = BASE_ERROR % ('create', dbtype)
raise NotImplementedError(msg)
def fetch_dbdsn(config, pw_gen, dbname=''):
"""Return the database connection string, including password."""
user = config.get("db", "sql_user")
host = config.get("db", "sql_host")
port = config.get("db", "port")
pw = pw_gen.get_password("sql", PASSWORD_PROMPT)
#form the dsn (from components we have...)
#dsn = "<driver>://<username>:<password>@<host>:<port>/<database>"
if not host:
msg = "Unable to fetch a database dsn - no sql host found"
raise excp.BadParamException(msg)
driver = config.get("db", "type")
if not driver:
msg = "Unable to fetch a database dsn - no db driver type found"
raise excp.BadParamException(msg)
dsn = driver + "://"
if user:
dsn += user
if pw:
dsn += ":" + pw
if user or pw:
dsn += "@"
dsn += host
if port:
dsn += ":" + port
if dbname:
dsn += "/" + dbname
else:
dsn += "/"
LOG.debug("For database [%s] fetched dsn [%s]" % (dbname, dsn))
return dsn
| 37.643599
| 101
| 0.583969
|
f48772403a7be2db612f6d06ca03ac993e48ea94
| 58
|
py
|
Python
|
src/aio_net_events/backends/__init__.py
|
ntamas/aio-net-events
|
a4fd2882f151008fb4351643fb5c1e0fe8fa0f5b
|
[
"MIT"
] | 1
|
2020-04-08T21:32:58.000Z
|
2020-04-08T21:32:58.000Z
|
src/aio_net_events/backends/__init__.py
|
ntamas/aio-net-events
|
a4fd2882f151008fb4351643fb5c1e0fe8fa0f5b
|
[
"MIT"
] | 1
|
2020-03-24T16:45:54.000Z
|
2020-03-24T16:45:54.000Z
|
src/aio_net_events/backends/__init__.py
|
ntamas/aio-net-events
|
a4fd2882f151008fb4351643fb5c1e0fe8fa0f5b
|
[
"MIT"
] | null | null | null |
"""Network event detector backends for aio_net_events."""
| 29
| 57
| 0.775862
|
bb62b5b55668e6129f2ba4a2afcf5116acdc39b5
| 1,779
|
py
|
Python
|
examples/gcmc/input.py
|
sinamoeini/mapp4py
|
923ef57ee5bdb6231bec2885c09a58993b6c0f1f
|
[
"MIT"
] | 3
|
2018-06-06T05:43:36.000Z
|
2020-07-18T14:31:37.000Z
|
examples/gcmc/input.py
|
sinamoeini/mapp4py
|
923ef57ee5bdb6231bec2885c09a58993b6c0f1f
|
[
"MIT"
] | null | null | null |
examples/gcmc/input.py
|
sinamoeini/mapp4py
|
923ef57ee5bdb6231bec2885c09a58993b6c0f1f
|
[
"MIT"
] | 7
|
2018-01-16T03:21:20.000Z
|
2020-07-20T19:36:13.000Z
|
#######################################################################################
# [M] = a.m.u
# [L] = Angs
# [U] = [ML^2/T^2] = eV
#
# therefore
# [T] = sqrt(a.m.u/eV)*Angs
#
#
# a.m.u = 1.66053904020e-27 Kg
# Angs = 1.0e-10 m
# eV = 1.60217656535e-19 Kg m^2/s^2
# therefore
# [T] = sqrt(1.66053904020/1.60217656535)*1.0e-14 s
#
# kB = 8.617330350e-5 eV/K
# h = 4.13566766225e-15 eVs
# h = 4.13566766225 * 0.1 * sqrt(1.60217656535/1.66053904020) sqrt(eV*a.m.u)*Angs
#######################################################################################
import time
import os
import subprocess
proc = subprocess.Popen('rm -rf dumps/*', shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
from math import sqrt
boltz = 8.617330350e-5
planck = 4.13566766225 * 0.1 * sqrt(1.60217656535/1.66053904020)
import mapp
from mapp import md
mapp.pause_slave_out();
################################################################
min=md.min_cg(e_tol=1.0e-8);
min.ntally=10000;
min.ls=mapp.ls_brent();
################################################################
muvt=md.muvt(-2.33,300,0.1,'H',2411895);
muvt.nevery=100;
muvt.nattempts=10000;
muvt.ntally=1000;
muvt.export=md.export_cfg('dumps/dump',10000)
################################################################
sim=md.atoms.import_cfg("configs/Fe.cfg")
sim.add_elem('H',1.007940)
sim.ff_eam_fs("potentials/FeH.eam.fs")
sim.hP=planck
sim.kB=boltz
min.run(sim,500000)
min.H_dof=[[True],[False,False],[False,False,True]]
min.affine=True
min.run(sim,500000)
sim.create_temp(300.0,8569643);
sim.step=0
start = time.time()
muvt.run(sim,50000000);
print "time elapsed: %lf seconds" % (time.time()-start)
| 21.962963
| 87
| 0.510961
|
952c4dcd76ef8b6a8e0a147ee537a916fd423b40
| 295
|
py
|
Python
|
fdp/methods/nstxu/mpts/change_units.py
|
Fusion-Data-Platform/fdp
|
d87a52207238f168ed69b9f96dc8f20f4481366d
|
[
"MIT"
] | 10
|
2015-12-18T22:38:07.000Z
|
2020-03-02T09:15:50.000Z
|
fdp/methods/nstxu/mpts/change_units.py
|
Fusion-Data-Platform/fdp
|
d87a52207238f168ed69b9f96dc8f20f4481366d
|
[
"MIT"
] | 14
|
2015-12-07T16:41:48.000Z
|
2019-01-18T17:48:55.000Z
|
fdp/methods/nstxu/mpts/change_units.py
|
Fusion-Data-Platform/fdp
|
d87a52207238f168ed69b9f96dc8f20f4481366d
|
[
"MIT"
] | 5
|
2016-05-20T17:35:23.000Z
|
2019-01-17T19:00:06.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 11:59:54 2015
@author: ktritz
"""
def change_units(signal, data):
if signal.units == 'cm':
data /= 100.
signal.units = 'm'
if signal.units == 'cm^-3':
data *= 1.e6
signal.units = 'm^-3'
return data
| 17.352941
| 35
| 0.522034
|
961865559cd4c4ac445f2b90a43beba1d4835b45
| 966
|
py
|
Python
|
commands/fix_code_issue.py
|
ikust/omnisharp-sublime
|
423348b4461ba916141cb16105fcab86961709f0
|
[
"MIT"
] | 424
|
2015-01-01T22:54:30.000Z
|
2022-03-16T02:32:02.000Z
|
commands/fix_code_issue.py
|
ikust/omnisharp-sublime
|
423348b4461ba916141cb16105fcab86961709f0
|
[
"MIT"
] | 160
|
2015-01-02T11:28:14.000Z
|
2022-01-27T18:09:35.000Z
|
commands/fix_code_issue.py
|
ikust/omnisharp-sublime
|
423348b4461ba916141cb16105fcab86961709f0
|
[
"MIT"
] | 81
|
2015-01-14T19:24:50.000Z
|
2022-03-12T11:05:22.000Z
|
import os
import sublime
import sublime_plugin
from ..lib import omnisharp
from ..lib import helpers
class OmniSharpFixCodeIssue(sublime_plugin.TextCommand):
data = None
def run(self, edit):
if self.data is None:
omnisharp.get_response(
self.view, '/fixcodeissue', self._handle_fixcodeissue)
else:
self._fixcodeissue(edit)
def _handle_fixcodeissue(self, data):
print('fixcodeissue response is:')
print(data)
if data is None:
return
self.data = data
self.view.run_command('omni_sharp_fix_code_issue')
def _fixcodeissue(self, edit):
print('fixcodeissue is :')
print(self.data)
if self.data != None:
region = sublime.Region(0, self.view.size())
self.view.replace(edit, region, self.data["Text"])
self.data = None
def is_enabled(self):
return helpers.is_csharp(self.view)
| 26.108108
| 69
| 0.622153
|
1364db95adddcf8a51a4d4cf50c1aabb2aca9a93
| 3,726
|
py
|
Python
|
PaperExperiments/XHExp008/parameters.py
|
stefan-c-kremer/TE_World2
|
8e1fae218af8a1eabae776deecac62192c22e0ca
|
[
"MIT"
] | null | null | null |
PaperExperiments/XHExp008/parameters.py
|
stefan-c-kremer/TE_World2
|
8e1fae218af8a1eabae776deecac62192c22e0ca
|
[
"MIT"
] | null | null | null |
PaperExperiments/XHExp008/parameters.py
|
stefan-c-kremer/TE_World2
|
8e1fae218af8a1eabae776deecac62192c22e0ca
|
[
"MIT"
] | null | null | null |
# parameters.py
"""
Exp 08 - {'Initial_genes': '500', 'Host_mutation_rate': '0.03', 'TE_progeny': '0.00, 0, 0.55, 1, 0.30, 2, 0.15, 3', 'TE_Insertion_Distribution': 'Triangle( pmax=0, pzero=3.0/3.0 )', 'Carrying_capacity': '30', 'TE_excision_rate': '0.5', 'Junk_BP': '1.4', 'Gene_Insertion_Distribution': 'Triangle( pzero=1.0/3.0, pmax=1 )', 'mutation_effect': '0.01', 'TE_death_rate': '0.005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Triangle( pmax=0, pzero=3.0/3.0 );
Gene_Insertion_Distribution = Triangle( pzero=1.0/3.0, pmax=1 );
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.005;
TE_excision_rate = 0.5; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.00, 0, 0.55, 1, 0.30, 2, 0.15, 3 );
Initial_genes = 500;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 1.4 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.03;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.01,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.01
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.01,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.01
);
Carrying_capacity = 30;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
| 38.8125
| 374
| 0.647343
|
5267c80ad990fd6d9791cab037f0ec1a52e75805
| 737
|
py
|
Python
|
jp.atcoder/abc058/arc071_b/26033791.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc058/arc071_b/26033791.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc058/arc071_b/26033791.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
import typing
import numba as nb
import numpy as np
@nb.njit((nb.i8[:],) * 2, cache=True)
def solve(x: np.ndarray, y: np.ndarray) -> typing.NoReturn:
mod = 10**9 + 7
def calc_sum(x):
n = len(x)
i = np.arange(n - 1)
s = np.sum((i + 1) * x[i + 1] % mod) % mod
s -= np.sum((n - 1 - i) * x[i] % mod) % mod
return s % mod
s = calc_sum(x) * calc_sum(y) % mod
print(s)
def main() -> typing.NoReturn:
n, m = map(int, input().split())
x = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
y = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
solve(x, y)
main()
| 19.918919
| 60
| 0.477612
|
7fd45394a0c7973cae0a5758c8eb51a80f7ba6d3
| 7,532
|
py
|
Python
|
examples/train_BCIC_IV_2a_intra-subject_cross-session.py
|
gzoumpourlis/braindecode
|
6bd595a146d0854541ff02b4483c011a394fdf0a
|
[
"BSD-3-Clause"
] | null | null | null |
examples/train_BCIC_IV_2a_intra-subject_cross-session.py
|
gzoumpourlis/braindecode
|
6bd595a146d0854541ff02b4483c011a394fdf0a
|
[
"BSD-3-Clause"
] | null | null | null |
examples/train_BCIC_IV_2a_intra-subject_cross-session.py
|
gzoumpourlis/braindecode
|
6bd595a146d0854541ff02b4483c011a394fdf0a
|
[
"BSD-3-Clause"
] | null | null | null |
from braindecode.datasets.moabb import MOABBDataset
from braindecode.datautil.preprocess import (exponential_moving_standardize, preprocess, Preprocessor)
import numpy as np
from braindecode.datautil.windowers import create_windows_from_events
import torch
from braindecode.util import set_random_seeds
from braindecode.models import * #ShallowFBCSPNet, EEGNetv1
from skorch.callbacks import LRScheduler
from skorch.helper import predefined_split
from braindecode import EEGClassifier
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import pandas as pd
import mne
mne.set_log_level(False)
moabb.set_log_level(False)
######################################################################
def train(subject_id):
subject_range = [subject_id]
##### subject_range = [x for x in range(1, 10)]
dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=subject_range)
######################################################################
# Preprocessing
low_cut_hz = 4. # low cut frequency for filtering
high_cut_hz = 38. # high cut frequency for filtering
# Parameters for exponential moving standardization
factor_new = 1e-3
init_block_size = 1000
preprocessors = [
Preprocessor('pick_types', eeg=True, eog=False, meg=False, stim=False), # Keep EEG sensors
Preprocessor(lambda x: x * 1e6), # Convert from V to uV
Preprocessor('filter', l_freq=low_cut_hz, h_freq=high_cut_hz), # Bandpass filter
Preprocessor('set_eeg_reference', ref_channels='average', ch_type='eeg'),
Preprocessor('resample', sfreq=125),
## Preprocessor(exponential_moving_standardize, # Exponential moving standardization
## factor_new=factor_new, init_block_size=init_block_size)
## Preprocessor('pick_channels', ch_names=short_ch_names, ordered=True),
]
# Transform the data
preprocess(dataset, preprocessors)
######################################################################
# Cut Compute Windows
# ~~~~~~~~~~~~~~~~~~~
trial_start_offset_seconds = -0.0
# Extract sampling frequency, check that they are same in all datasets
sfreq = dataset.datasets[0].raw.info['sfreq']
assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])
# Calculate the trial start offset in samples.
trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)
# Create windows using braindecode function for this. It needs parameters to define how
# trials should be used.
windows_dataset = create_windows_from_events(
dataset,
# picks=["Fz", "FC3", "FC1", "FCz", "FC2", "FC4", "C5", "C3", "C1", "Cz", "C2", "C4", "C6", "CP3", "CP1", "CPz", "CP2", "CP4", "P1", "Pz", "P2", "POz"],
trial_start_offset_samples=trial_start_offset_samples,
trial_stop_offset_samples=0,
preload=True,
)
######################################################################
# Split dataset into train and valid
splitted = windows_dataset.split('session')
train_set = splitted['session_T']
valid_set = splitted['session_E']
######################################################################
# Create model
cuda = torch.cuda.is_available() # check if GPU is available, if True chooses to use it
device = 'cuda' if cuda else 'cpu'
if cuda:
torch.backends.cudnn.benchmark = True
seed = 20200220 # random seed to make results reproducible
# Set random seed to be able to reproduce results
set_random_seeds(seed=seed, cuda=cuda)
n_classes = 4
# Extract number of chans and time steps from dataset
n_chans = train_set[0][0].shape[0]
input_window_samples = train_set[0][0].shape[1]
model = ShallowFBCSPNet(
n_chans,
n_classes,
input_window_samples=input_window_samples,
final_conv_length='auto')
"""
model = EEGNetv1(
n_chans,
n_classes,
input_window_samples=input_window_samples,
final_conv_length="auto",
pool_mode="mean",
second_kernel_size=(2, 32),
third_kernel_size=(8, 4),
drop_prob=0.25)
"""
"""
model = HybridNet(n_chans, n_classes,
input_window_samples=input_window_samples)
"""
"""
model = TCN(n_chans, n_classes,
n_blocks=6,
n_filters=32,
kernel_size=9,
drop_prob=0.0,
add_log_softmax=True)
"""
"""
model = EEGNetv4(n_chans,
n_classes,
input_window_samples=input_window_samples,
final_conv_length="auto",
pool_mode="mean",
F1=8,
D=2,
F2=16, # usually set to F1*D (?)
kernel_length=64,
third_kernel_size=(8, 4),
drop_prob=0.2)
"""
if cuda:
model.cuda()
######################################################################
# Training
# These values we found good for shallow network:
lr = 0.01 # 0.0625 * 0.01
weight_decay = 0.0005
# For deep4 they should be:
# lr = 1 * 0.01
# weight_decay = 0.5 * 0.001
batch_size = 64
n_epochs = 80
clf = EEGClassifier(
model,
criterion=torch.nn.NLLLoss,
optimizer=torch.optim.SGD, #AdamW,
train_split=predefined_split(valid_set), # using valid_set for validation
optimizer__lr=lr,
optimizer__momentum=0.9,
optimizer__weight_decay=weight_decay,
batch_size=batch_size,
callbacks=[
"accuracy", #("lr_scheduler", LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)),
],
device=device,
)
# Model training for a specified number of epochs. `y` is None as it is already supplied
# in the dataset.
clf.fit(train_set, y=None, epochs=n_epochs)
results_columns = ['train_loss', 'valid_loss', 'train_accuracy', 'valid_accuracy']
df = pd.DataFrame(clf.history[:, results_columns], columns=results_columns,
index=clf.history[:, 'epoch'])
val_accs = df['valid_accuracy'].values
max_val_acc = 100.0 * np.max(val_accs)
return max_val_acc
if __name__=='__main__':
accs = []
for subject_id in range(1,10):
acc = train(subject_id)
accs.append(acc)
accs = np.array(accs)
print('\n\nValidation accuracy: {:.2f} +- {:.2f}\n\n'.format(np.mean(accs), np.std(accs)))
######################################################################
# Plot Results
"""
# Extract loss and accuracy values for plotting from history object
results_columns = ['train_loss', 'valid_loss', 'train_accuracy', 'valid_accuracy']
df = pd.DataFrame(clf.history[:, results_columns], columns=results_columns,
index=clf.history[:, 'epoch'])
# get percent of misclass for better visual comparison to loss
df = df.assign(train_misclass=100 - 100 * df.train_accuracy,
valid_misclass=100 - 100 * df.valid_accuracy)
plt.style.use('seaborn')
fig, ax1 = plt.subplots(figsize=(8, 3))
df.loc[:, ['train_loss', 'valid_loss']].plot(
ax=ax1, style=['-', ':'], marker='o', color='tab:blue', legend=False, fontsize=14)
ax1.tick_params(axis='y', labelcolor='tab:blue', labelsize=14)
ax1.set_ylabel("Loss", color='tab:blue', fontsize=14)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
df.loc[:, ['train_misclass', 'valid_misclass']].plot(
ax=ax2, style=['-', ':'], marker='o', color='tab:red', legend=False)
ax2.tick_params(axis='y', labelcolor='tab:red', labelsize=14)
ax2.set_ylabel("Misclassification Rate [%]", color='tab:red', fontsize=14)
ax2.set_ylim(ax2.get_ylim()[0], 85) # make some room for legend
ax1.set_xlabel("Epoch", fontsize=14)
# where some data has already been plotted to ax
handles = []
handles.append(Line2D([0], [0], color='black', linewidth=1, linestyle='-', label='Train'))
handles.append(Line2D([0], [0], color='black', linewidth=1, linestyle=':', label='Valid'))
plt.legend(handles, [h.get_label() for h in handles], fontsize=14)
plt.tight_layout()
plt.ioff()
plt.show()
"""
| 28.530303
| 154
| 0.668614
|
93d4bd3867cc1901695dd5983d0426eb2cff1e40
| 2,473
|
py
|
Python
|
tools/gcc-arm-none-eabi-5_4-2016q3/arm-none-eabi/lib/fpu/libstdc++.a-gdb.py
|
971586331/rt-thread
|
4799ae65c3ef2b8d11cd3a16c11bbd5cd56c1b5d
|
[
"Apache-2.0"
] | 16
|
2020-09-08T09:58:40.000Z
|
2022-01-24T08:50:45.000Z
|
tools/gcc-arm-none-eabi-5_4-2016q3/arm-none-eabi/lib/fpu/libstdc++.a-gdb.py
|
971586331/rt-thread
|
4799ae65c3ef2b8d11cd3a16c11bbd5cd56c1b5d
|
[
"Apache-2.0"
] | 2
|
2020-11-17T07:45:03.000Z
|
2022-03-17T02:25:53.000Z
|
tools/gcc-arm-none-eabi-5_4-2016q3/arm-none-eabi/lib/fpu/libstdc++.a-gdb.py
|
971586331/rt-thread
|
4799ae65c3ef2b8d11cd3a16c11bbd5cd56c1b5d
|
[
"Apache-2.0"
] | 9
|
2020-08-05T09:48:31.000Z
|
2021-12-28T06:53:00.000Z
|
# -*- python -*-
# Copyright (C) 2009-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/home/build/work/GCC-5-build/install-native/share/gcc-arm-none-eabi'
libdir = '/home/build/work/GCC-5-build/install-native/arm-none-eabi/lib/fpu'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| 39.887097
| 81
| 0.723817
|
6cf279e439a61bbb4a982f3913635764334a6b5f
| 9,822
|
py
|
Python
|
test/functional/rpc_createmultisig.py
|
Soptq/bitgesell
|
4e31314180d8cadaee5868c4d797208ddac7d392
|
[
"MIT"
] | 12
|
2020-05-14T20:22:20.000Z
|
2021-06-07T19:21:34.000Z
|
test/functional/rpc_createmultisig.py
|
slowriot/bitgesell
|
9b7f9e207323e9863253ad2598068b0ad0b159d7
|
[
"MIT"
] | 55
|
2021-03-24T15:00:42.000Z
|
2022-02-22T10:07:14.000Z
|
test/functional/rpc_createmultisig.py
|
slowriot/bitgesell
|
9b7f9e207323e9863253ad2598068b0ad0b159d7
|
[
"MIT"
] | 35
|
2021-02-03T03:02:04.000Z
|
2021-11-22T07:27:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multisig RPCs"""
import binascii
import decimal
import itertools
import json
import os
from test_framework.authproxy import JSONRPCException
from test_framework.descriptors import descsum_create, drop_origins
from test_framework.key import ECPubKey, ECKey
from test_framework.test_framework import BGLTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
)
from test_framework.wallet_util import bytes_to_wif
class RpcCreateMultiSigTest(BGLTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_keys(self):
self.pub = []
self.priv = []
node0, node1, node2 = self.nodes
for _ in range(self.nkeys):
k = ECKey()
k.generate()
self.pub.append(k.get_pubkey().get_bytes().hex())
self.priv.append(bytes_to_wif(k.get_bytes(), k.is_compressed))
self.final = node2.getnewaddress()
def run_test(self):
node0, node1, node2 = self.nodes
self.check_addmultisigaddress_errors()
self.log.info('Generating blocks ...')
node0.generate(149)
self.sync_all()
self.moved = 0
for self.nkeys in [3, 5]:
for self.nsigs in [2, 3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
# Test mixed compressed and uncompressed pubkeys
self.log.info('Mixed compressed and uncompressed multisigs are not allowed')
pk0 = node0.getaddressinfo(node0.getnewaddress())['pubkey']
pk1 = node1.getaddressinfo(node1.getnewaddress())['pubkey']
pk2 = node2.getaddressinfo(node2.getnewaddress())['pubkey']
# decompress pk2
pk_obj = ECPubKey()
pk_obj.set(binascii.unhexlify(pk2))
pk_obj.compressed = False
pk2 = binascii.hexlify(pk_obj.get_bytes()).decode()
node0.createwallet(wallet_name='wmulti0', disable_private_keys=True)
wmulti0 = node0.get_wallet_rpc('wmulti0')
# Check all permutations of keys because order matters apparently
for keys in itertools.permutations([pk0, pk1, pk2]):
# Results should be the same as this legacy one
legacy_addr = node0.createmultisig(2, keys, 'legacy')['address']
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'legacy')['address'])
# Generate addresses with the segwit types. These should all make legacy addresses
assert_equal(legacy_addr, wmulti0.createmultisig(2, keys, 'bech32')['address'])
assert_equal(legacy_addr, wmulti0.createmultisig(2, keys, 'p2sh-segwit')['address'])
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'bech32')['address'])
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'p2sh-segwit')['address'])
self.log.info('Testing sortedmulti descriptors with BIP 67 test vectors')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_bip67.json'), encoding='utf-8') as f:
vectors = json.load(f)
for t in vectors:
key_str = ','.join(t['keys'])
desc = descsum_create('sh(sortedmulti(2,{}))'.format(key_str))
assert_equal(self.nodes[0].deriveaddresses(desc)[0], t['address'])
sorted_key_str = ','.join(t['sorted_keys'])
sorted_key_desc = descsum_create('sh(multi(2,{}))'.format(sorted_key_str))
assert_equal(self.nodes[0].deriveaddresses(sorted_key_desc)[0], t['address'])
def check_addmultisigaddress_errors(self):
if self.options.descriptors:
return
self.log.info('Check that addmultisigaddress fails when the private keys are missing')
addresses = [self.nodes[1].getnewaddress(address_type='legacy') for _ in range(2)]
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
for a in addresses:
# Importing all addresses should not change the result
self.nodes[0].importaddress(a)
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
def checkbalances(self):
node0, node1, node2 = self.nodes
node0.generate(100)
self.sync_all()
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149 * 200 + (height - 149 - 100) * 100 - decimal.Decimal("0.00038295")
assert bal1 == 0
assert bal2 == self.moved
assert bal0 + bal1 + bal2 == total
def do_multisig(self):
node0, node1, node2 = self.nodes
if 'wmulti' not in node1.listwallets():
try:
node1.loadwallet('wmulti')
except JSONRPCException as e:
path = os.path.join(self.options.tmpdir, "node1", "regtest", "wallets", "wmulti")
if e.error['code'] == -18 and "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path) in e.error['message']:
node1.createwallet(wallet_name='wmulti', disable_private_keys=True)
else:
raise
wmulti = node1.get_wallet_rpc('wmulti')
# Construct the expected descriptor
desc = 'multi({},{})'.format(self.nsigs, ','.join(self.pub))
if self.output_type == 'legacy':
desc = 'sh({})'.format(desc)
elif self.output_type == 'p2sh-segwit':
desc = 'sh(wsh({}))'.format(desc)
elif self.output_type == 'bech32':
desc = 'wsh({})'.format(desc)
desc = descsum_create(desc)
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
assert_equal(desc, msig['descriptor'])
if self.output_type == 'bech32':
assert madd[0:4] == "rbgl" # actually a bech32 address
# compare against addmultisigaddress
msigw = wmulti.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
assert_equal(desc, drop_origins(msigw['descriptor']))
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd == v["scriptPubKey"]["address"]]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
prevtx_err = dict(prevtxs[0])
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "Missing redeemScript/witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# if witnessScript specified, all ok
prevtx_err["witnessScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# both specified, also ok
prevtx_err["redeemScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript mismatch to witnessScript
prevtx_err["redeemScript"] = "6a" # OP_RETURN
assert_raises_rpc_error(-8, "redeemScript does not correspond to witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript does not match scriptPubKey
del prevtx_err["witnessScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# witnessScript does not match scriptPubKey
prevtx_err["witnessScript"] = prevtx_err["redeemScript"]
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs - 1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], 0)
blk = node0.generate(1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
wmulti.unloadwallet()
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
| 44.243243
| 174
| 0.645999
|
56079d39331923815dcd33ff540100fc115daf66
| 9,952
|
py
|
Python
|
methods/seg/fcn_segmentor_test.py
|
gatarelib/PyTorchCV
|
5191d0ddc5c42a4cc8dc5451aa14c263c2f3e77f
|
[
"Apache-2.0"
] | null | null | null |
methods/seg/fcn_segmentor_test.py
|
gatarelib/PyTorchCV
|
5191d0ddc5c42a4cc8dc5451aa14c263c2f3e77f
|
[
"Apache-2.0"
] | null | null | null |
methods/seg/fcn_segmentor_test.py
|
gatarelib/PyTorchCV
|
5191d0ddc5c42a4cc8dc5451aa14c263c2f3e77f
|
[
"Apache-2.0"
] | 1
|
2019-07-23T02:33:07.000Z
|
2019-07-23T02:33:07.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You (youansheng@gmail.com)
# Class Definition for Semantic Segmentation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import numpy as np
import torch
from PIL import Image
from datasets.seg_data_loader import SegDataLoader
from datasets.tools.data_transformer import DataTransformer
from methods.tools.blob_helper import BlobHelper
from methods.tools.module_utilizer import ModuleUtilizer
from models.seg_model_manager import SegModelManager
from utils.helpers.file_helper import FileHelper
from utils.helpers.image_helper import ImageHelper
from utils.tools.logger import Logger as Log
from vis.parser.seg_parser import SegParser
from vis.visualizer.seg_visualizer import SegVisualizer
class FCNSegmentorTest(object):
def __init__(self, configer):
self.configer = configer
self.blob_helper = BlobHelper(configer)
self.seg_visualizer = SegVisualizer(configer)
self.seg_parser = SegParser(configer)
self.seg_model_manager = SegModelManager(configer)
self.seg_data_loader = SegDataLoader(configer)
self.module_utilizer = ModuleUtilizer(configer)
self.data_transformer = DataTransformer(configer)
self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')
self.seg_net = None
self._init_model()
def _init_model(self):
self.seg_net = self.seg_model_manager.semantic_segmentor()
self.seg_net = self.module_utilizer.load_net(self.seg_net)
self.seg_net.eval()
def __test_img(self, image_path, label_path, vis_path, raw_path):
Log.info('Image Path: {}'.format(image_path))
ori_image = ImageHelper.read_image(image_path,
tool=self.configer.get('data', 'image_tool'),
mode=self.configer.get('data', 'input_mode'))
ori_width, ori_height = ImageHelper.get_size(ori_image)
total_logits = np.zeros((ori_height, ori_width, self.configer.get('data', 'num_classes')), np.float32)
for scale in self.configer.get('test', 'scale_search'):
image = self.blob_helper.make_input(image=ori_image,
input_size=self.configer.get('test', 'input_size'),
scale=scale)
if self.configer.get('test', 'crop_test'):
crop_size = self.configer.get('test', 'crop_size')
if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]:
results = self._crop_predict(image, crop_size)
else:
results = self._predict(image)
else:
results = self._predict(image)
results = cv2.resize(results, (ori_width, ori_height), interpolation=cv2.INTER_LINEAR)
total_logits += results
if self.configer.get('test', 'mirror'):
if self.configer.get('data', 'image_tool') == 'cv2':
image = cv2.flip(ori_image, 1)
else:
image = ori_image.transpose(Image.FLIP_LEFT_RIGHT)
image = self.blob_helper.make_input(image, input_size=self.configer.get('test', 'input_size'), scale=1.0)
if self.configer.get('test', 'crop_test'):
crop_size = self.configer.get('test', 'crop_size')
if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]:
results = self._crop_predict(image, crop_size)
else:
results = self._predict(image)
else:
results = self._predict(image)
results = cv2.resize(results[:, ::-1], (ori_width, ori_height), interpolation=cv2.INTER_LINEAR)
total_logits += results
label_map = np.argmax(total_logits, axis=-1)
label_img = np.array(label_map, dtype=np.uint8)
image_bgr = cv2.cvtColor(np.array(ori_image), cv2.COLOR_RGB2BGR)
image_canvas = self.seg_parser.colorize(label_img, image_canvas=image_bgr)
ImageHelper.save(image_canvas, save_path=vis_path)
ImageHelper.save(ori_image, save_path=raw_path)
if not self.configer.is_empty('data', 'label_list'):
label_img = self.__relabel(label_img)
label_img = Image.fromarray(label_img, 'P')
Log.info('Label Path: {}'.format(label_path))
ImageHelper.save(label_img, label_path)
def _crop_predict(self, image, crop_size):
height, width = image.size()[2:]
np_image = image.squeeze(0).permute(1, 2, 0).cpu().numpy()
height_starts = self._decide_intersection(height, crop_size[1])
width_starts = self._decide_intersection(width, crop_size[0])
split_crops = []
for height in height_starts:
for width in width_starts:
image_crop = np_image[height:height + crop_size[1], width:width + crop_size[0]]
split_crops.append(image_crop[np.newaxis, :])
split_crops = np.concatenate(split_crops, axis=0) # (n, crop_image_size, crop_image_size, 3)
inputs = torch.from_numpy(split_crops).permute(0, 3, 1, 2).to(self.device)
with torch.no_grad():
results = self.seg_net.forward(inputs)
results = results[0].permute(0, 2, 3, 1).cpu().numpy()
reassemble = np.zeros((np_image.shape[0], np_image.shape[1], results.shape[-1]), np.float32)
index = 0
for height in height_starts:
for width in width_starts:
reassemble[height:height+crop_size[1], width:width+crop_size[0]] += results[index]
index += 1
return reassemble
def _decide_intersection(self, total_length, crop_length):
stride = int(crop_length * self.configer.get('test', 'crop_stride_ratio')) # set the stride as the paper do
times = (total_length - crop_length) // stride + 1
cropped_starting = []
for i in range(times):
cropped_starting.append(stride*i)
if total_length - cropped_starting[-1] > crop_length:
cropped_starting.append(total_length - crop_length) # must cover the total image
return cropped_starting
def _predict(self, inputs):
with torch.no_grad():
results = self.seg_net.forward(inputs)
results = results[0].squeeze().permute(1, 2, 0).cpu().numpy()
return results
def __relabel(self, label_map):
height, width = label_map.shape
label_dst = np.zeros((height, width), dtype=np.uint8)
for i in range(self.configer.get('data', 'num_classes')):
label_dst[label_map == i] = self.configer.get('data', 'label_list')[i]
label_dst = np.array(label_dst, dtype=np.uint8)
return label_dst
def test(self):
base_dir = os.path.join(self.configer.get('project_dir'),
'val/results/seg', self.configer.get('dataset'))
test_img = self.configer.get('test_img')
test_dir = self.configer.get('test_dir')
if test_img is None and test_dir is None:
Log.error('test_img & test_dir not exists.')
exit(1)
if test_img is not None and test_dir is not None:
Log.error('Either test_img or test_dir.')
exit(1)
if test_img is not None:
base_dir = os.path.join(base_dir, 'test_img')
filename = test_img.rstrip().split('/')[-1]
label_path = os.path.join(base_dir, 'label', '{}.png'.format('.'.join(filename.split('.')[:-1])))
raw_path = os.path.join(base_dir, 'raw', filename)
vis_path = os.path.join(base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1])))
FileHelper.make_dirs(label_path, is_file=True)
FileHelper.make_dirs(raw_path, is_file=True)
FileHelper.make_dirs(vis_path, is_file=True)
self.__test_img(test_img, label_path, vis_path, raw_path)
else:
base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1])
FileHelper.make_dirs(base_dir)
for filename in FileHelper.list_dir(test_dir):
image_path = os.path.join(test_dir, filename)
label_path = os.path.join(base_dir, 'label', '{}.png'.format('.'.join(filename.split('.')[:-1])))
raw_path = os.path.join(base_dir, 'raw', filename)
vis_path = os.path.join(base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1])))
FileHelper.make_dirs(label_path, is_file=True)
FileHelper.make_dirs(raw_path, is_file=True)
FileHelper.make_dirs(vis_path, is_file=True)
self.__test_img(image_path, label_path, vis_path, raw_path)
def debug(self):
base_dir = os.path.join(self.configer.get('project_dir'),
'vis/results/seg', self.configer.get('dataset'), 'debug')
if not os.path.exists(base_dir):
os.makedirs(base_dir)
count = 0
for i, data_dict in enumerate(self.seg_data_loader.get_trainloader()):
inputs = data_dict['img']
targets = data_dict['labelmap']
for j in range(inputs.size(0)):
count = count + 1
if count > 20:
exit(1)
image_bgr = self.blob_helper.tensor2bgr(inputs[j])
label_map = targets[j].numpy()
image_canvas = self.seg_parser.colorize(label_map, image_canvas=image_bgr)
cv2.imwrite(os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas)
cv2.imshow('main', image_canvas)
cv2.waitKey()
| 44.035398
| 126
| 0.615756
|
3463630712c5f189ef02f1225c22007718b1eec4
| 33,166
|
py
|
Python
|
cvtorchvision/cvtransforms/cvfunctional.py
|
jxw-tmp/opencv_transforms_torchvision
|
4604610a422cfdc0af94a7e3112949121a0c6fe6
|
[
"MIT"
] | null | null | null |
cvtorchvision/cvtransforms/cvfunctional.py
|
jxw-tmp/opencv_transforms_torchvision
|
4604610a422cfdc0af94a7e3112949121a0c6fe6
|
[
"MIT"
] | null | null | null |
cvtorchvision/cvtransforms/cvfunctional.py
|
jxw-tmp/opencv_transforms_torchvision
|
4604610a422cfdc0af94a7e3112949121a0c6fe6
|
[
"MIT"
] | null | null | null |
from __future__ import division
import torch
import math
import random
from PIL import Image
import cv2
import numpy as np
import numbers
import types
import collections
import warnings
import matplotlib.pyplot as plt
from torchvision.transforms import functional
import PIL
INTER_MODE = {'NEAREST': cv2.INTER_NEAREST, 'BILINEAR': cv2.INTER_LINEAR, 'BICUBIC': cv2.INTER_CUBIC}
PAD_MOD = {'constant': cv2.BORDER_CONSTANT,
'edge': cv2.BORDER_REPLICATE,
'reflect': cv2.BORDER_DEFAULT,
'symmetric': cv2.BORDER_REFLECT
}
def imshow(inps, title=None):
"""Imshow for Tensor."""
subwindows = len(inps)
for idx, (inp, name) in enumerate(zip(inps, title)):
inp = inp.numpy().transpose((1, 2, 0))
ax = plt.subplot(1, subwindows, idx+1)
ax.axis('off')
plt.imshow(inp)
ax.set_title(name)
# plt.pause(0.001)
plt.show()
# plt.waitforbuttonpress(-1)
def _is_tensor_image(img):
return torch.is_tensor(img) and img.ndimension() == 3
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def to_tensor(pic):
"""Converts a numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
Args:
pic (np.ndarray, torch.Tensor): Image to be converted to tensor, (H x W x C[RGB]).
Returns:
Tensor: Converted image.
"""
if _is_numpy_image(pic):
if len(pic.shape) == 2:
pic = cv2.cvtColor(pic, cv2.COLOR_GRAY2RGB)
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor) or img.max() > 1:
return img.float().div(255)
else:
return img
elif _is_tensor_image(pic):
return pic
else:
try:
return to_tensor(np.array(pic))
except Exception:
raise TypeError('pic should be ndarray. Got {}'.format(type(pic)))
def to_cv_image(pic, mode=None):
"""Convert a tensor to an ndarray.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (str): color space and pixel depth of input data (optional)
for example: cv2.COLOR_RGB2BGR.
Returns:
np.array: Image converted to PIL Image.
"""
if not (_is_numpy_image(pic) or _is_tensor_image(pic)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
npimg = pic
if isinstance(pic, torch.FloatTensor):
pic = pic.mul(255).byte()
if torch.is_tensor(pic):
npimg = np.squeeze(np.transpose(pic.numpy(), (1, 2, 0)))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if mode is None:
return npimg
else:
return cv2.cvtColor(npimg, mode)
def normalize(tensor, mean, std):
"""Normalize a tensor image with mean and standard deviation.
See ``Normalize`` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channely.
Returns:
Tensor: Normalized Tensor image.
"""
if _is_tensor_image(tensor):
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor
elif _is_numpy_image(tensor):
return (tensor.astype(np.float32) - 255.0 * np.array(mean))/np.array(std)
else:
raise RuntimeError('Undefined type')
def resize(img, size, interpolation='BILINEAR'):
"""Resize the input CV Image to the given size.
Args:
img (np.ndarray): Image to be resized.
size (tuple or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (str, optional): Desired interpolation. Default is ``BILINEAR``
Returns:
cv Image: Resized image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
h, w, c = img.shape
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return cv2.resize(img, dsize=(ow, oh), interpolation=INTER_MODE[interpolation])
else:
oh = size
ow = int(size * w / h)
return cv2.resize(img, dsize=(ow, oh), interpolation=INTER_MODE[interpolation])
else:
oh, ow = size
return cv2.resize(img, dsize=(int(ow), int(oh)), interpolation=INTER_MODE[interpolation])
def to_rgb_bgr(pic):
"""Converts a color image stored in BGR sequence to RGB (BGR to RGB)
or stored in RGB sequence to BGR (RGB to BGR).
Args:
pic (np.ndarray, torch.Tensor): Image to be converted, (H x W x 3).
Returns:
Tensor: Converted image.
"""
if _is_numpy_image(pic) or _is_tensor_image(pic):
img = pic[:, :, [2, 1, 0]]
return img
else:
try:
return to_rgb_bgr(np.array(pic))
except Exception:
raise TypeError('pic should be numpy.ndarray or torch.Tensor. Got {}'.format(type(pic)))
def pad(img, padding, fill=(0, 0, 0), padding_mode='constant'):
"""Pad the given CV Image on all sides with speficified padding mode and fill value.
Args:
img (np.ndarray): Image to be padded.
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (int, tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
constant: pads with a constant value, this value is specified with fill
edge: pads with the last value on the edge of the image
reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
CV Image: Padded image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
if not isinstance(padding, (numbers.Number, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError('Got inappropriate fill arg')
if not isinstance(padding_mode, str):
raise TypeError('Got inappropriate padding_mode arg')
if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
'Padding mode should be either constant, edge, reflect or symmetric'
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, collections.Sequence) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, collections.Sequence) and len(padding) == 4:
pad_left, pad_top, pad_right, pad_bottom = padding
if isinstance(fill, numbers.Number):
fill = (fill,) * (2 * len(img.shape) - 3)
if padding_mode == 'constant':
assert (len(fill) == 3 and len(img.shape) == 3) or (len(fill) == 1 and len(img.shape) == 2), \
'channel of image is {} but length of fill is {}'.format(img.shape[-1], len(fill))
img = cv2.copyMakeBorder(src=img, top=pad_top, bottom=pad_bottom, left=pad_left, right=pad_right,
borderType=PAD_MOD[padding_mode], value=fill)
return img
def crop(img, x, y, h, w):
"""Crop the given CV Image.
Args:
img (np.ndarray): Image to be cropped.
x: Upper pixel coordinate.
y: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
Returns:
CV Image: Cropped image.
"""
assert _is_numpy_image(img), 'img should be CV Image. Got {}'.format(type(img))
assert h > 0 and w > 0, 'h={} and w={} should greater than 0'.format(h, w)
x1, y1, x2, y2 = round(x), round(y), round(x+h), round(y+w)
try:
check_point1 = img[x1, y1, ...]
check_point2 = img[x2-1, y2-1, ...]
except IndexError:
# warnings.warn('crop region is {} but image size is {}'.format((x1, y1, x2, y2), img.shape))
img = cv2.copyMakeBorder(img, - min(0, x1), max(x2 - img.shape[0], 0),
-min(0, y1), max(y2 - img.shape[1], 0), cv2.BORDER_CONSTANT, value=[0, 0, 0])
y2 += -min(0, y1)
y1 += -min(0, y1)
x2 += -min(0, x1)
x1 += -min(0, x1)
finally:
return img[x1:x2, y1:y2, ...].copy()
def center_crop(img, output_size):
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size))
h, w, _ = img.shape
th, tw = output_size
i = int(round((h - th) * 0.5))
j = int(round((w - tw) * 0.5))
return crop(img, i, j, th, tw)
def resized_crop(img, i, j, h, w, size, interpolation='BILINEAR'):
"""Crop the given CV Image and resize it to desired size. Notably used in RandomResizedCrop.
Args:
img (np.ndarray): Image to be cropped.
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
size (sequence or int): Desired output size. Same semantics as ``scale``.
interpolation (str, optional): Desired interpolation. Default is
``BILINEAR``.
Returns:
np.ndarray: Cropped image.
"""
assert _is_numpy_image(img), 'img should be CV Image'
img = crop(img, i, j, h, w)
img = resize(img, size, interpolation)
return img
def hflip(img):
"""Horizontally flip the given PIL Image.
Args:
img (np.ndarray): Image to be flipped.
Returns:
np.ndarray: Horizontall flipped image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
return cv2.flip(img, 1)
def vflip(img):
"""Vertically flip the given PIL Image.
Args:
img (CV Image): Image to be flipped.
Returns:
PIL Image: Vertically flipped image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return cv2.flip(img, 0)
def five_crop(img, size):
"""Crop the given CV Image into four corners and the central crop.
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
tuple: tuple (tl, tr, bl, br, center) corresponding top left,
top right, bottom left, bottom right and center crop.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
h, w, _ = img.shape
crop_h, crop_w = size
if crop_w > w or crop_h > h:
raise ValueError("Requested crop size {} is bigger than input size {}".format(size,
(h, w)))
tl = crop(img, 0, 0, crop_h, crop_w)
tr = crop(img, 0, w - crop_w, crop_h, crop_w)
bl = crop(img, h - crop_h, 0, crop_h, crop_w)
br = crop(img, h - crop_h, w - crop_w, crop_h, crop_w)
center = center_crop(img, (crop_h, crop_w))
return (tl, tr, bl, br, center)
def ten_crop(img, size, vertical_flip=False):
"""Crop the given CV Image into four corners and the central crop plus the
flipped version of these (horizontal flipping is used by default).
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
vertical_flip (bool): Use vertical flipping instead of horizontal
Returns:
tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip,
br_flip, center_flip) corresponding top left, top right,
bottom left, bottom right and center crop and same for the
flipped image.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
first_five = five_crop(img, size)
if vertical_flip:
img = vflip(img)
else:
img = hflip(img)
second_five = five_crop(img, size)
return first_five + second_five
def adjust_brightness(img, brightness_factor):
"""Adjust brightness of an Image.
Args:
img (np.ndarray): CV Image to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
np.ndarray: Brightness adjusted image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
im = img.astype(np.float32) * brightness_factor
im = im.clip(min=0, max=255)
return im.astype(img.dtype)
def adjust_contrast(img, contrast_factor):
"""Adjust contrast of an Image.
Args:
img (np.ndarray): CV Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
np.ndarray: Contrast adjusted image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
im = img.astype(np.float32)
mean = round(cv2.cvtColor(im, cv2.COLOR_RGB2GRAY).mean())
im = (1-contrast_factor)*mean + contrast_factor * im
im = im.clip(min=0, max=255)
return im.astype(img.dtype)
def adjust_saturation(img, saturation_factor):
"""Adjust color saturation of an image.
Args:
img (np.ndarray): CV Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a gray image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
np.ndarray: Saturation adjusted image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
im = img.astype(np.float32)
degenerate = cv2.cvtColor(cv2.cvtColor(im, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
im = (1-saturation_factor) * degenerate + saturation_factor * im
im = im.clip(min=0, max=255)
return im.astype(img.dtype)
def adjust_hue(img, hue_factor):
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See https://en.wikipedia.org/wiki/Hue for more details on Hue.
Args:
img (np.ndarray): CV Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
np.ndarray: Hue adjusted image.
"""
if not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
im = img.astype(np.uint8)
hsv = cv2.cvtColor(im, cv2.COLOR_RGB2HSV_FULL)
hsv[..., 0] += np.uint8(hue_factor * 255)
im = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB_FULL)
return im.astype(img.dtype)
def adjust_gamma(img, gamma, gain=1):
"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
I_out = 255 * gain * ((I_in / 255) ** gamma)
See https://en.wikipedia.org/wiki/Gamma_correction for more details.
Args:
img (np.ndarray): CV Image to be adjusted.
gamma (float): Non negative real number. gamma larger than 1 make the
shadows darker, while gamma smaller than 1 make dark regions
lighter.
gain (float): The constant multiplier.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number')
im = img.astype(np.float32)
im = 255. * gain * np.power(im / 255., gamma)
im = im.clip(min=0., max=255.)
return im.astype(img.dtype)
def to_grayscale(img, num_output_channels=1):
"""Convert image to grayscale version of image.
Args:
img (np.ndarray): Image to be converted to grayscale.
Returns:
CV Image: Grayscale version of the image.
if num_output_channels == 1 : returned image is single channel
if num_output_channels == 3 : returned image is 3 channel with r == g == b
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
if num_output_channels == 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
elif num_output_channels == 3:
img = cv2.cvtColor(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
else:
raise ValueError('num_output_channels should be either 1 or 3')
return img
def rotate(img, angle, resample='BILINEAR', expand=False, center=None):
"""Rotate the image by angle.
Args:
img (PIL Image): PIL Image to be rotated.
angle ({float, int}): In degrees clockwise order.
resample ({NEAREST, BILINEAR, BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
imgtype = img.dtype
if not _is_numpy_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
h, w, _ = img.shape
point = center or (w/2, h/2)
M = cv2.getRotationMatrix2D(point, angle=-angle, scale=1)
if expand:
if center is None:
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - point[0]
M[1, 2] += (nH / 2) - point[1]
# perform the actual rotation and return the image
dst = cv2.warpAffine(img, M, (nW, nH))
else:
xx = []
yy = []
for point in (np.array([0, 0, 1]), np.array([w-1, 0, 1]), np.array([w-1, h-1, 1]), np.array([0, h-1, 1])):
target = M@point
xx.append(target[0])
yy.append(target[1])
nh = int(math.ceil(max(yy)) - math.floor(min(yy)))
nw = int(math.ceil(max(xx)) - math.floor(min(xx)))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nw - w)/2
M[1, 2] += (nh - h)/2
dst = cv2.warpAffine(img, M, (nw, nh), flags=INTER_MODE[resample])
else:
dst = cv2.warpAffine(img, M, (w, h), flags=INTER_MODE[resample])
return dst.astype(imgtype)
def affine6(img, anglez=0, shear=0, translate=(0, 0), scale=(1, 1), resample='BILINEAR', fillcolor=(0, 0, 0)):
"""Apply affine transformation on the image keeping image center invariant
Args:
img (np.ndarray): PIL Image to be rotated.
anglez (float): rotation angle in degrees around Z between -180 and 180, clockwise direction.
shear (float): rotation angle in degrees around Z between -180 and 180, clockwise direction.
translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
scale (float, or tuple): overall scale
resample ({NEAREST, BILINEAR, BICUBIC}, optional):
fillcolor (int or tuple): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
"""
rows, cols, _ = img.shape
centery = rows * 0.5
centerx = cols * 0.5
alpha = math.radians(shear)
beta = math.radians(anglez)
lambda1 = scale[0]
lambda2 = scale[1]
tx = translate[0]
ty = translate[1]
sina = math.sin(alpha)
cosa = math.cos(alpha)
sinb = math.sin(beta)
cosb = math.cos(beta)
M00 = cosb * (lambda1 * cosa ** 2 + lambda2 * sina ** 2) - sinb * (lambda2 - lambda1) * sina * cosa
M01 = - sinb * (lambda1 * sina ** 2 + lambda2 * cosa ** 2) + cosb * (lambda2 - lambda1) * sina * cosa
M10 = sinb * (lambda1 * cosa ** 2 + lambda2 * sina ** 2) + cosb * (lambda2 - lambda1) * sina * cosa
M11 = + cosb * (lambda1 * sina ** 2 + lambda2 * cosa ** 2) + sinb * (lambda2 - lambda1) * sina * cosa
M02 = centerx - M00 * centerx - M01 * centery + tx
M12 = centery - M10 * centerx - M11 * centery + ty
affine_matrix = np.array([[M00, M01, M02], [M10, M11, M12]], dtype=np.float32)
dst_img = cv2.warpAffine(img, affine_matrix, (cols, rows), flags=INTER_MODE[resample],
borderMode=cv2.BORDER_CONSTANT, borderValue=fillcolor)
return dst_img
def affine(img, angle=0, translate=(0, 0), scale=1, shear=0, resample='BILINEAR', fillcolor=(0,0,0)):
"""Apply affine transformation on the image keeping image center invariant
Args:
img (np.ndarray): PIL Image to be rotated.
angle ({float, int}): rotation angle in degrees between -180 and 180, clockwise direction.
translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
scale (float): overall scale
shear (float): shear angle value in degrees between -180 to 180, clockwise direction.
resample ({NEAREST, BILINEAR, BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
fillcolor (int or tuple): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"Argument translate should be a list or tuple of length 2"
assert scale > 0.0, "Argument scale should be positive"
rows, cols, _ = img.shape
center = (cols * 0.5, rows * 0.5)
angle = math.radians(angle)
shear = math.radians(shear)
M00 = math.cos(angle)*scale
M01 = -math.sin(angle+shear)*scale
M10 = math.sin(angle)*scale
M11 = math.cos(angle+shear)*scale
M02 = center[0] - center[0]*M00 - center[1]*M01 + translate[0]
M12 = center[1] - center[0]*M10 - center[1]*M11 + translate[1]
affine_matrix = np.array([[M00, M01, M02], [M10, M11, M12]], dtype=np.float32)
dst_img = cv2.warpAffine(img, affine_matrix, (cols, rows), flags=INTER_MODE[resample],
borderMode=cv2.BORDER_CONSTANT, borderValue=fillcolor)
return dst_img
def perspective(img, fov=45, anglex=0, angley=0, anglez=0, shear=0,
translate=(0, 0), scale=(1, 1), resample='BILINEAR', fillcolor=(0, 0, 0)):
"""
This function is partly referred to https://blog.csdn.net/dcrmg/article/details/80273818
"""
imgtype = img.dtype
h, w, _ = img.shape
centery = h * 0.5
centerx = w * 0.5
alpha = math.radians(shear)
beta = math.radians(anglez)
lambda1 = scale[0]
lambda2 = scale[1]
tx = translate[0]
ty = translate[1]
sina = math.sin(alpha)
cosa = math.cos(alpha)
sinb = math.sin(beta)
cosb = math.cos(beta)
M00 = cosb * (lambda1 * cosa ** 2 + lambda2 * sina ** 2) - sinb * (lambda2 - lambda1) * sina * cosa
M01 = - sinb * (lambda1 * sina ** 2 + lambda2 * cosa ** 2) + cosb * (lambda2 - lambda1) * sina * cosa
M10 = sinb * (lambda1 * cosa ** 2 + lambda2 * sina ** 2) + cosb * (lambda2 - lambda1) * sina * cosa
M11 = + cosb * (lambda1 * sina ** 2 + lambda2 * cosa ** 2) + sinb * (lambda2 - lambda1) * sina * cosa
M02 = centerx - M00 * centerx - M01 * centery + tx
M12 = centery - M10 * centerx - M11 * centery + ty
affine_matrix = np.array([[M00, M01, M02], [M10, M11, M12], [0, 0, 1]], dtype=np.float32)
# -------------------------------------------------------------------------------
z = np.sqrt(w ** 2 + h ** 2) / 2 / np.tan(math.radians(fov / 2))
radx = math.radians(anglex)
rady = math.radians(angley)
sinx = math.sin(radx)
cosx = math.cos(radx)
siny = math.sin(rady)
cosy = math.cos(rady)
r = np.array([[cosy, 0, -siny, 0],
[-siny * sinx, cosx, -sinx * cosy, 0],
[cosx * siny, sinx, cosx * cosy, 0],
[0, 0, 0, 1]])
pcenter = np.array([centerx, centery, 0, 0], np.float32)
p1 = np.array([0, 0, 0, 0], np.float32) - pcenter
p2 = np.array([w, 0, 0, 0], np.float32) - pcenter
p3 = np.array([0, h, 0, 0], np.float32) - pcenter
p4 = np.array([w, h, 0, 0], np.float32) - pcenter
dst1 = r.dot(p1)
dst2 = r.dot(p2)
dst3 = r.dot(p3)
dst4 = r.dot(p4)
list_dst = [dst1, dst2, dst3, dst4]
org = np.array([[0, 0],
[w, 0],
[0, h],
[w, h]], np.float32)
dst = np.zeros((4, 2), np.float32)
for i in range(4):
dst[i, 0] = list_dst[i][0] * z / (z - list_dst[i][2]) + pcenter[0]
dst[i, 1] = list_dst[i][1] * z / (z - list_dst[i][2]) + pcenter[1]
perspective_matrix = cv2.getPerspectiveTransform(org, dst)
total_matrix = perspective_matrix @ affine_matrix
result_img = cv2.warpPerspective(img, total_matrix, (w, h), flags=INTER_MODE[resample],
borderMode=cv2.BORDER_CONSTANT, borderValue=fillcolor)
return result_img.astype(imgtype)
def gaussian_noise(img: np.ndarray, mean, std):
imgtype = img.dtype
gauss = np.random.normal(mean, std, img.shape).astype(np.float32)
noisy = np.clip((1 + gauss) * img.astype(np.float32), 0, 255)
return noisy.astype(imgtype)
def poisson_noise(img):
imgtype = img.dtype
img = img.astype(np.float32)/255.0
vals = len(np.unique(img))
vals = 2 ** np.ceil(np.log2(vals))
noisy = 255 * np.clip(np.random.poisson(img.astype(np.float32) * vals) / float(vals), 0, 1)
return noisy.astype(imgtype)
def salt_and_pepper(img, prob=0.01):
''' Adds "Salt & Pepper" noise to an image.
prob: probability (threshold) that controls level of noise
'''
imgtype = img.dtype
rnd = np.random.rand(img.shape[0], img.shape[1])
noisy = img.copy()
noisy[rnd < prob/2] = 0.0
noisy[rnd > 1 - prob/2] = 255.0
return noisy.astype(imgtype)
def cv_transform(img):
# img = resize(img, size=(100, 300))
# img = to_tensor(img)
# img = normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# img = pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant')
# img = pad(img, padding=(100, 100, 100, 100), fill=5, padding_mode='symmetric')
# img = crop(img, -40, -20, 1000, 1000)
# img = center_crop(img, (310, 300))
# img = resized_crop(img, -10.3, -20, 330, 220, (500, 500))
# img = hflip(img)
# img = vflip(img)
# tl, tr, bl, br, center = five_crop(img, 100)
# img = adjust_brightness(img, 2.1)
# img = adjust_contrast(img, 1.5)
# img = adjust_saturation(img, 2.3)
# img = adjust_hue(img, 0.5)
# img = adjust_gamma(img, gamma=3, gain=0.1)
# img = rotate(img, 10, resample='BILINEAR', expand=True, center=None)
# img = to_grayscale(img, 3)
# img = affine(img, 10, (0, 0), 1, 0, resample='BICUBIC', fillcolor=(255,255,0))
# img = gaussion_noise(img)
# img = poisson_noise(img)
img = salt_and_pepper(img)
return to_tensor(img)
def pil_transform(img):
# img = functional.resize(img, size=(100, 300))
# img = functional.to_tensor(img)
# img = functional.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# img = functional.pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant')
# img = functional.pad(img, padding=(100, 100, 100, 100), padding_mode='symmetric')
# img = functional.crop(img, -40, -20, 1000, 1000)
# img = functional.center_crop(img, (310, 300))
# img = functional.resized_crop(img, -10.3, -20, 330, 220, (500, 500))
# img = functional.hflip(img)
# img = functional.vflip(img)
# tl, tr, bl, br, center = functional.five_crop(img, 100)
# img = functional.adjust_brightness(img, 2.1)
# img = functional.adjust_contrast(img, 1.5)
# img = functional.adjust_saturation(img, 2.3)
# img = functional.adjust_hue(img, 0.5)
# img = functional.adjust_gamma(img, gamma=3, gain=0.1)
# img = functional.rotate(img, 10, resample=PIL.Image.BILINEAR, expand=True, center=None)
# img = functional.to_grayscale(img, 3)
# img = functional.affine(img, 10, (0, 0), 1, 0, resample=PIL.Image.BICUBIC, fillcolor=(255,255,0))
return functional.to_tensor(img)
if __name__ == '__main__':
image_path = '../../cat.jpg'
cvimage = cv2.imread(image_path, cv2.IMREAD_COLOR)
cvimage = cv2.cvtColor(cvimage, cv2.COLOR_BGR2RGB)
cvimage = cv_transform(cvimage)
pilimage = Image.open(image_path).convert('RGB')
pilimage = pil_transform(pilimage)
sub = abs(cvimage - pilimage)
imshow((cvimage, pilimage, sub), ('CV', 'PIL', 'sub'))
# imshow((cvimage, pilimage), ('CV', 'PIL'))
# imshow([pilimage], ('PIL'))
| 37.603175
| 125
| 0.609962
|
815effbfb46ddc355ac2b30413071eb99fc70864
| 1,753
|
py
|
Python
|
ironic_tempest_plugin/tests/scenario/ironic_standalone/test_ramdisk_iso.py
|
ameya-r/ironic-tempest-plugin
|
d3360cf3b6ad8b89b9c80fc806dc5d4ba373dd01
|
[
"Apache-2.0"
] | 9
|
2016-11-20T08:00:27.000Z
|
2019-01-28T22:03:31.000Z
|
ironic_tempest_plugin/tests/scenario/ironic_standalone/test_ramdisk_iso.py
|
ameya-r/ironic-tempest-plugin
|
d3360cf3b6ad8b89b9c80fc806dc5d4ba373dd01
|
[
"Apache-2.0"
] | 2
|
2018-12-07T11:14:14.000Z
|
2022-01-19T10:25:28.000Z
|
ironic_tempest_plugin/tests/scenario/ironic_standalone/test_ramdisk_iso.py
|
ameya-r/ironic-tempest-plugin
|
d3360cf3b6ad8b89b9c80fc806dc5d4ba373dd01
|
[
"Apache-2.0"
] | 7
|
2017-12-11T18:07:47.000Z
|
2021-10-21T05:07:02.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from ironic_tempest_plugin.tests.scenario import \
baremetal_standalone_manager as bsm
LOG = logging.getLogger(__name__)
CONF = config.CONF
class BaremetalRamdiskBootIso(bsm.BaremetalStandaloneScenarioTest):
if 'redfish' in CONF.baremetal.enabled_hardware_types:
driver = 'redfish'
boot_interface = 'redfish-virtual-media'
else:
driver = 'ipmi'
boot_interface = 'ipxe'
delete_node = False
deploy_interface = 'ramdisk'
api_microversion = '1.66'
image_ref = CONF.baremetal.ramdisk_iso_image_ref
wholedisk_image = False
@classmethod
def skip_checks(cls):
super(BaremetalRamdiskBootIso, cls).skip_checks()
if not cls.image_ref:
raise cls.skipException('Skipping ramdisk ISO booting as'
'no ramdisk_iso_image_ref is defined.')
@decorators.idempotent_id('2859d115-9266-4461-9286-79b146e65dc9')
@utils.services('image', 'network')
def test_ramdisk_boot(self):
self.boot_and_verify_ramdisk_node(self.image_ref, iso=True)
| 35.06
| 75
| 0.727325
|
3e01024cab28256791d4ce424b896dc6c1a82a70
| 3,690
|
py
|
Python
|
ve/unit/test_randomization.py
|
fvutils/py-vsc
|
e30ffae1b750d8182d102b1fe5b1cfdce017a092
|
[
"Apache-2.0"
] | null | null | null |
ve/unit/test_randomization.py
|
fvutils/py-vsc
|
e30ffae1b750d8182d102b1fe5b1cfdce017a092
|
[
"Apache-2.0"
] | null | null | null |
ve/unit/test_randomization.py
|
fvutils/py-vsc
|
e30ffae1b750d8182d102b1fe5b1cfdce017a092
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from vsc.model.coverpoint_model import CoverpointModel
'''
Created on Jul 29, 2019
@author: ballance
'''
from unittest.case import TestCase
import vsc
from vsc_test_case import VscTestCase
class TestRandomization(VscTestCase):
def test_single(self):
@vsc.randobj
class my_s(object):
def __init__(self):
super().__init__()
self.a = vsc.rand_bit_t(16)
self.b = vsc.rand_bit_t(16)
@vsc.constraint
def ab_c(self):
self.a < self.b
my_i = my_s()
for i in range(100):
my_i.randomize()
print("a=" + str(my_i.a) + " (" + bin(my_i.a) + ") b=" + str(my_i.b))
self.assertLess(my_i.a, my_i.b)
def test_simple(self):
@vsc.randobj
class my_s(object):
def __init__(self):
super().__init__()
self.a = vsc.rand_bit_t(16)
self.b = vsc.rand_bit_t(8)
self.c = vsc.rand_bit_t(2)
self.d = vsc.rand_bit_t(1)
self.e = vsc.rand_bit_t(16)
self.f = vsc.rand_bit_t(8)
self.g = vsc.rand_bit_t(2)
self.h = vsc.rand_bit_t(1)
self.i = vsc.rand_bit_t(16)
self.j = vsc.rand_bit_t(8)
self.k = vsc.rand_bit_t(2)
self.l = vsc.rand_bit_t(1)
@vsc.constraint
def ab_c(self):
with vsc.if_then(self.a < self.b):
self.c < self.d
with vsc.else_then():
self.c == self.d
# self.c != self.d
@vsc.covergroup
class my_s_cg(object):
def __init__(self):
self.with_sample(dict(
a=vsc.uint16_t()
))
self.a_cp = vsc.coverpoint(self.a)
v = my_s()
v_cg = my_s_cg()
v_cg_m = v_cg.get_model()
cp_m : CoverpointModel = v_cg_m.coverpoint_l[0]
for b in cp_m.bin_model_l[0].bin_l:
print("b: " + str(b.target_val_low) + ".." + str(b.target_val_high))
# for i in range(1000):
for i in range(500):
v.randomize()
v_cg.sample(v.a)
print("a=" + str(v.a) + " b=" + str(v.b) + " c=" + str(v.c) + " d=" + str(v.d) + " e=" + str(v.e) + " f=" + str(v.f))
# self.assertGreaterEqual(v_cg.get_coverage(), 70)
print("Coverage: %f" % (v_cg.get_coverage()))
print("cp_m=" + str(cp_m))
for bi in range(cp_m.get_n_bins()):
print("Bin[%d]=%d" % (bi, cp_m.get_bin_hits(bi)))
| 31.271186
| 129
| 0.51626
|
92c0ca8a21348147ca94c28fd27dbec36dd1af80
| 5,553
|
py
|
Python
|
visualization.py
|
Jabb0/FastFlow3D
|
cdc2a547268b85d0c851cf87786d80fcde4e8487
|
[
"MIT"
] | 6
|
2021-10-14T03:30:32.000Z
|
2022-03-25T07:16:03.000Z
|
visualization.py
|
Jabb0/FastFlow3D
|
cdc2a547268b85d0c851cf87786d80fcde4e8487
|
[
"MIT"
] | 2
|
2021-10-08T09:06:24.000Z
|
2022-03-26T10:37:22.000Z
|
visualization.py
|
Jabb0/FastFlow3D
|
cdc2a547268b85d0c851cf87786d80fcde4e8487
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
import yaml
from data.WaymoDataset import WaymoDataset
from data.util import ApplyPillarization, drop_points_function
from utils import str2bool
from visualization.util import predict_and_store_flows, flows_exist
from models.FastFlow3DModelScatter import FastFlow3DModelScatter
# vispy
# if error vispy:
# https://askubuntu.com/questions/308128/failed-to-load-platform-plugin-xcb-while-launching-qt5-app-on-linux-without
# https://gist.github.com/ujjwal96/1dcd57542bdaf3c9d1b0dd526ccd44ff
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('data_directory', type=str)
parser.add_argument('config_file', type=str)
# NOTE: IF MODEL IS NONE IT WILL VISUALIZE GROUND TRUTH DATA
parser.add_argument('--model_path', default=None, type=str)
# start_frame and end_frame allow us just visualize a set of frames
parser.add_argument('--start_frame', default=0, type=int)
parser.add_argument('--end_frame', default=None, type=int)
parser.add_argument('--vis_previous_current', default=False, type=bool)
# If you want online prediction or first predict, store the flows and predict them
# This is suitable for slow systems since it reads the flows then from disk
parser.add_argument('--online', type=str2bool, nargs='?', const=False, default=True)
# If you want to create an automatic video of the visualization
# --video {gt, model}, gt if you want a video of the ground truth or model if you want a video of the model
parser.add_argument('--video', default=None, type=str)
args = parser.parse_args()
waymo_dataset = WaymoDataset(args.data_directory)
if args.end_frame is None:
args.end_frame = len(waymo_dataset)
if args.start_frame < 0 or args.start_frame > len(waymo_dataset):
raise ValueError("Start frame must be greater than 0 and less thant the dataset length")
if args.end_frame < 0 or args.end_frame > len(waymo_dataset):
raise ValueError("End frame must be greater than 0 and less thant the dataset length")
if args.start_frame > args.end_frame:
raise ValueError("Start frame cannot be greater than end frame")
# Load config file (must be downloaded from Weights and Biases), it has the name of config.yaml
with open(args.config_file, 'r') as stream:
try:
config_info = yaml.safe_load(stream)
grid_cell_size = config_info['grid_cell_size']['value']
x_min = config_info['x_min']['value']
y_min = config_info['y_min']['value']
z_min = config_info['z_min']['value']
x_max = config_info['x_max']['value']
y_max = config_info['y_max']['value']
z_max = config_info['z_max']['value']
n_pillars_x = config_info['n_pillars_x']['value']
n_pillars_y = config_info['n_pillars_y']['value']
point_cloud_transform = ApplyPillarization(grid_cell_size=grid_cell_size, x_min=x_min,
y_min=y_min, z_min=z_min, z_max=z_max, n_pillars_x=n_pillars_x)
waymo_dataset.set_point_cloud_transform(point_cloud_transform)
drop_points_function = drop_points_function(x_min=x_min,
x_max=x_max, y_min=y_min, y_max=y_max,
z_min=z_min, z_max=z_max)
waymo_dataset.set_drop_invalid_point_function(drop_points_function)
if "n_points" in config_info.keys():
n_points = config_info['n_points']['value']
if n_points is not None and n_points != 'None':
waymo_dataset.set_n_points(n_points)
if "architecture" in config_info.keys():
architecture = config_info['architecture']['value']
else:
architecture = "FastFlowNet"
if args.model_path is not None and not flows_exist(waymo_dataset):
if architecture == "FastFlowNet":
model = FastFlow3DModelScatter.load_from_checkpoint(args.model_path)
model.eval()
print("DISPLAYING PREDICTED DATA WITH FASTFLOWNET")
elif architecture == "FlowNet":
from models.Flow3DModel import Flow3DModel
model = Flow3DModel.load_from_checkpoint(args.model_path)
model.cuda()
model.eval()
print("DISPLAYING PREDICTED DATA WITH FLOWNET (baseline)")
else:
raise ValueError("no architecture {0} implemented".format(architecture))
else:
model = None
print("DISPLAYING GROUND TRUTH DATA - NO MODEL HAS BEEN LOADED")
except yaml.YAMLError as exc:
print(exc)
exit(1)
if args.online is not True:
# Predict and store into disk
print(f"Predicting and storing {len(waymo_dataset)} frames...")
predict_and_store_flows(model, waymo_dataset, architecture=architecture)
from visualization.laserscanvis import LaserScanVis
vis = LaserScanVis(dataset=waymo_dataset,
start_frame=args.start_frame,
end_frame=args.end_frame,
model=model,
vis_previous_current=args.vis_previous_current,
online=args.online,
video=args.video)
vis.run()
| 46.663866
| 118
| 0.638934
|
f9e4099323cb3b862a1e13cb38b93a612c9254ff
| 230
|
py
|
Python
|
rss_scraper/feeds/enums.py
|
muhammad-mamdouh/rss-scraper
|
a49f09e4ebc147b1de58ed3dce95a76400dac832
|
[
"MIT"
] | null | null | null |
rss_scraper/feeds/enums.py
|
muhammad-mamdouh/rss-scraper
|
a49f09e4ebc147b1de58ed3dce95a76400dac832
|
[
"MIT"
] | null | null | null |
rss_scraper/feeds/enums.py
|
muhammad-mamdouh/rss-scraper
|
a49f09e4ebc147b1de58ed3dce95a76400dac832
|
[
"MIT"
] | null | null | null |
from enum import Enum
from django.db import models
class ItemStatus(models.IntegerChoices):
NEW, READ = range(1, 3)
class FeedParsingErrorCodes(Enum):
IS_GONE = 410
URL_CHANGED = 301
CONTENT_NOT_CHANGED = 304
| 16.428571
| 40
| 0.726087
|
9846261d303995c8cf4d857d7737505b89ab8b81
| 6,254
|
py
|
Python
|
src/einsteinpy/symbolic/weyl.py
|
bibek22/einsteinpy
|
78bf5d942cbb12393852f8e4d7a8426f1ffe6f23
|
[
"MIT"
] | 485
|
2019-02-04T09:15:22.000Z
|
2022-03-19T13:50:17.000Z
|
src/einsteinpy/symbolic/weyl.py
|
bibek22/einsteinpy
|
78bf5d942cbb12393852f8e4d7a8426f1ffe6f23
|
[
"MIT"
] | 570
|
2019-02-02T10:57:27.000Z
|
2022-02-26T16:37:05.000Z
|
src/einsteinpy/symbolic/weyl.py
|
bibek22/einsteinpy
|
78bf5d942cbb12393852f8e4d7a8426f1ffe6f23
|
[
"MIT"
] | 250
|
2019-01-30T14:14:14.000Z
|
2022-02-28T21:18:18.000Z
|
import numpy as np
import sympy
from einsteinpy.symbolic.helpers import _change_name
from einsteinpy.symbolic.ricci import RicciScalar, RicciTensor
from einsteinpy.symbolic.riemann import RiemannCurvatureTensor
from einsteinpy.symbolic.tensor import BaseRelativityTensor, _change_config
class WeylTensor(BaseRelativityTensor):
"""
Class for defining Weyl Tensor
"""
def __init__(self, arr, syms, config="ulll", parent_metric=None, name="WeylTensor"):
"""
Constructor and Initializer
Parameters
----------
arr : ~sympy.tensor.array.dense_ndim_array.ImmutableDenseNDimArray or list
Sympy Array or multi-dimensional list containing Sympy Expressions
syms : tuple or list
Tuple of crucial symbols denoting time-axis, 1st, 2nd, and 3rd axis (t,x1,x2,x3)
config : str
Configuration of contravariant and covariant indices in tensor. 'u' for upper and 'l' for lower indices. Defaults to 'ulll'.
parent_metric : ~einsteinpy.symbolic.metric.WeylTensor
Corresponding Metric for the Weyl Tensor. Defaults to None.
name : str
Name of the Tensor. Defaults to "WeylTensor"
Raises
------
TypeError
Raised when arr is not a list or sympy Array
TypeError
syms is not a list or tuple
ValueError
config has more or less than 4 indices
"""
super(WeylTensor, self).__init__(
arr=arr, syms=syms, config=config, parent_metric=parent_metric, name=name
)
self._order = 4
if not len(config) == self._order:
raise ValueError("config should be of length {}".format(self._order))
@classmethod
def from_metric(cls, metric):
"""
Get Weyl tensor calculated from a metric tensor
Parameters
----------
metric : ~einsteinpy.symbolic.metric.MetricTensor
Space-time Metric from which Christoffel Symbols are to be calculated
Raises
------
ValueError
Raised when the dimension of the tensor is less than 3
"""
if metric.dims > 3:
metric_cov = metric.lower_config()
t_riemann = RiemannCurvatureTensor.from_metric(metric)
# Riemann Tensor with covariant indices is needed
t_riemann_cov = t_riemann.change_config("llll", metric=None)
t_ricci = RicciTensor.from_riemann(t_riemann, parent_metric=None)
r_scalar = RicciScalar.from_riccitensor(t_ricci, parent_metric=None)
g = metric_cov
dims = g.dims
# Indexing for resultant Weyl Tensor is iklm
C = np.zeros(shape=(dims, dims, dims, dims), dtype=int).tolist()
for t in range(dims ** 4):
i, k, l, m = (
t % dims,
(int(t / dims)) % (dims),
(int(t / (dims ** 2))) % (dims),
(int(t / (dims ** 3))) % (dims),
)
C[i][k][l][m] = t_riemann_cov[i, k, l, m] + (
(
(
t_ricci[i, m] * g[k, l]
- t_ricci[i, l] * g[k, m]
+ t_ricci[k, l] * g[i, m]
- t_ricci[k, m] * g[i, l]
)
/ (dims - 2)
)
+ (
r_scalar.expr
* (g[i, l] * g[k, m] - g[i, m] * g[k, l])
/ ((dims - 1) * (dims - 2))
)
)
C = sympy.simplify(sympy.Array(C))
return cls(C, metric.syms, config="llll", parent_metric=metric)
if metric.dims == 3:
return cls(
sympy.Array(np.zeros((3, 3, 3, 3), dtype=int)),
metric.syms,
config="llll",
parent_metric=metric,
)
raise ValueError("Dimension of the space/space-time should be 3 or more")
def change_config(self, newconfig="llll", metric=None):
"""
Changes the index configuration(contravariant/covariant)
Parameters
----------
newconfig : str
Specify the new configuration. Defaults to 'llll'
metric : ~einsteinpy.symbolic.metric.MetricTensor or None
Parent metric tensor for changing indices.
Already assumes the value of the metric tensor from which it was initialized if passed with None.
Compulsory if not initialized with 'from_metric'. Defaults to None.
Returns
-------
~einsteinpy.symbolic.weyl.WeylTensor
New tensor with new configuration. Configuration defaults to 'llll'
Raises
------
Exception
Raised when a parent metric could not be found.
"""
if metric is None:
metric = self._parent_metric
if metric is None:
raise Exception("Parent Metric not found, can't do configuration change")
new_tensor = _change_config(self, metric, newconfig)
new_obj = WeylTensor(
new_tensor,
self.syms,
config=newconfig,
parent_metric=metric,
name=_change_name(self.name, context="__" + newconfig),
)
return new_obj
def lorentz_transform(self, transformation_matrix):
"""
Performs a Lorentz transform on the tensor.
Parameters
----------
transformation_matrix : ~sympy.tensor.array.dense_ndim_array.ImmutableDenseNDimArray or list
Sympy Array or multi-dimensional list containing Sympy Expressions
Returns
-------
~einsteinpy.symbolic.weyl.WeylTensor
lorentz transformed tensor(or vector)
"""
t = super(WeylTensor, self).lorentz_transform(transformation_matrix)
return WeylTensor(
t.tensor(),
syms=self.syms,
config=self._config,
parent_metric=None,
name=_change_name(self.name, context="__lt"),
)
| 36.573099
| 136
| 0.550847
|
8d0874b74bf2de8ffc385689980e859600b0c089
| 6,136
|
py
|
Python
|
tools/coresctostandoff.py
|
bepnye/brat
|
28acfb2d3cce20bd4d4ff1a67690e271675841f2
|
[
"CC-BY-3.0"
] | 17
|
2017-09-14T07:21:37.000Z
|
2021-12-07T03:17:05.000Z
|
tools/coresctostandoff.py
|
bepnye/brat
|
28acfb2d3cce20bd4d4ff1a67690e271675841f2
|
[
"CC-BY-3.0"
] | 7
|
2015-04-11T12:57:42.000Z
|
2016-04-08T13:43:44.000Z
|
tools/coresctostandoff.py
|
bepnye/brat
|
28acfb2d3cce20bd4d4ff1a67690e271675841f2
|
[
"CC-BY-3.0"
] | 5
|
2017-09-14T07:21:55.000Z
|
2021-01-27T01:50:19.000Z
|
#!/usr/bin/env python
import sys
import re
try:
import cElementTree as ET
except:
import xml.etree.cElementTree as ET
# tags of elements to exclude from standoff output
# (not used now; anything not explicitly converted is excluded)
EXCLUDED_TAGS = [
# "SP",
# "IT",
# "SB",
# "REF",
# "P",
# "B",
# "TITLE",
# "PAPER",
# "HEADER",
# "DIV",
# "BODY",
# "ABSTRACT",
# "THEAD",
# "TGROUP",
# "TBODY",
# "SUP",
# "EQN",
# "ENTRY",
# "XREF",
# "ROW",
# "EQ-S",
# "text",
# "datasection",
# "s",
# "mode2",
]
EXCLUDED_TAG = { t:True for t in EXCLUDED_TAGS }
# string to use to indicate elided text in output
ELIDED_TEXT_STRING = "[[[...]]]"
# maximum length of text strings printed without elision
MAXIMUM_TEXT_DISPLAY_LENGTH = 1000
# c-style string escaping for just newline, tab and backslash.
# (s.encode('string_escape') does too much for utf-8)
def c_escape(s):
return s.replace('\\', '\\\\').replace('\t','\\t').replace('\n','\\n')
def strip_ns(tag):
# remove namespace spec from tag, if any
return tag if tag[0] != '{' else re.sub(r'\{.*?\}', '', tag)
class Standoff:
def __init__(self, sid, element, start, end, text):
self.sid = sid
self.element = element
self.start = start
self.end = end
self.text = text
def compress_text(self, l):
if len(self.text) >= l:
el = len(ELIDED_TEXT_STRING)
sl = (l-el)/2
self.text = (self.text[:sl]+ELIDED_TEXT_STRING+self.text[-(l-sl-el):])
def tag(self):
return strip_ns(self.element.tag)
def attrib(self):
# remove namespace specs from attribute names, if any
attrib = {}
for a in self.element.attrib:
if a[0] == "{":
an = re.sub(r'\{.*?\}', '', a)
else:
an = a
attrib[an] = self.element.attrib[a]
return attrib
def __str__(self):
return "X%d\t%s %d %d\t%s\t%s" % \
(self.sid, self.tag(), self.start, self.end,
c_escape(self.text.encode("utf-8")),
" ".join(['%s="%s"' % (k.encode("utf-8"), v.encode("utf-8"))
for k,v in self.attrib().items()]))
def txt(s):
return s if s is not None else ""
next_free_so_id = 1
def text_and_standoffs(e, curroff=0, standoffs=None):
global next_free_so_id
if standoffs == None:
standoffs = []
startoff = curroff
# to keep standoffs in element occurrence order, append
# a placeholder before recursing
so = Standoff(next_free_so_id, e, 0, 0, "")
next_free_so_id += 1
standoffs.append(so)
setext, dummy = subelem_text_and_standoffs(e, curroff+len(txt(e.text)), standoffs)
text = txt(e.text) + setext
curroff += len(text)
so.start = startoff
so.end = curroff
so.text = text
return (text, standoffs)
def subelem_text_and_standoffs(e, curroff, standoffs):
startoff = curroff
text = ""
for s in e:
stext, dummy = text_and_standoffs(s, curroff, standoffs)
text += stext
text += txt(s.tail)
curroff = startoff + len(text)
return (text, standoffs)
def empty_elements(e, tags=None):
if tags is None or strip_ns(e.tag) in tags:
e.clear()
for c in e:
empty_elements(c, tags)
def add_space(e):
if strip_ns(e.tag) in ('title', ):
e.tail = (e.tail if e.tail is not None else '') + '\n'
for c in e:
add_space(c)
def convert_coresc1(s):
sostrings = []
# create a textbound of the type specified by the "type"
# attribute.
tid = "T%d" % convert_coresc1._idseq
sostrings.append('%s\t%s %d %d\t%s' % \
(tid, s.attrib()['type'], s.start, s.end,
s.text.encode('utf-8')))
# TODO: consider converting "advantage" and "novelty" attributes
convert_coresc1._idseq += 1
return sostrings
convert_coresc1._idseq = 1
convert_function = {
'CoreSc1' : convert_coresc1,
'annotationART' : convert_coresc1,
}
def main(argv=[]):
if len(argv) != 4:
print >> sys.stderr, "Usage:", argv[0], "IN-XML OUT-TEXT OUT-SO"
return -1
in_fn, out_txt_fn, out_so_fn = argv[1:]
# "-" for STDIN / STDOUT
if in_fn == "-":
in_fn = "/dev/stdin"
if out_txt_fn == "-":
out_txt_fn = "/dev/stdout"
if out_so_fn == "-":
out_so_fn = "/dev/stdout"
tree = ET.parse(in_fn)
root = tree.getroot()
# remove unannotated, (primarily) non-content elements
empty_elements(root, set(['article-categories',
'copyright-statement', 'license',
'copyright-holder', 'copyright-year',
'journal-meta', 'article-id',
'back',
'fig', 'table-wrap',
'contrib-group',
'aff', 'author-notes',
'pub-date',
'volume', 'issue',
'fpage', 'lpage',
'history'
]))
add_space(root)
text, standoffs = text_and_standoffs(root)
# filter
standoffs = [s for s in standoffs if not s.tag() in EXCLUDED_TAG]
# convert selected elements
converted = []
for s in standoffs:
if s.tag() in convert_function:
converted.extend(convert_function[s.tag()](s))
# else:
# converted.append(s)
standoffs = converted
for so in standoffs:
try:
so.compress_text(MAXIMUM_TEXT_DISPLAY_LENGTH)
except AttributeError:
pass
# open output files
out_txt = open(out_txt_fn, "wt")
out_so = open(out_so_fn, "wt")
out_txt.write(text.encode("utf-8"))
for so in standoffs:
print >> out_so, so
out_txt.close()
out_so.close()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 26.79476
| 86
| 0.538625
|
62fcc369b13216fd0e2be676ec660693c948412c
| 4,215
|
py
|
Python
|
city_scrapers/spiders/chi_ssa_48.py
|
MAYANK25402/city-scrapers
|
08f92ec5b68682a8120eee1a13c4a03fe0335b9e
|
[
"MIT"
] | 255
|
2018-03-06T20:12:03.000Z
|
2022-03-05T03:06:45.000Z
|
city_scrapers/spiders/chi_ssa_48.py
|
MAYANK25402/city-scrapers
|
08f92ec5b68682a8120eee1a13c4a03fe0335b9e
|
[
"MIT"
] | 514
|
2018-02-02T16:12:50.000Z
|
2022-03-21T20:07:35.000Z
|
city_scrapers/spiders/chi_ssa_48.py
|
MAYANK25402/city-scrapers
|
08f92ec5b68682a8120eee1a13c4a03fe0335b9e
|
[
"MIT"
] | 342
|
2018-02-03T04:05:37.000Z
|
2022-03-18T16:34:58.000Z
|
import re
from datetime import datetime
from city_scrapers_core.constants import COMMISSION
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
# TODO: Fix
class ChiSsa48Spider(CityScrapersSpider):
name = "chi_ssa_48"
agency = "Chicago Special Service Area #48 Old Town"
timezone = "America/Chicago"
start_urls = ["https://oldtownchicago.org/ssa-48/"]
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
meeting_year = response.xpath("//div[@class='meeting-dates-block']/h2")
meeting_date = response.xpath("//div[@class='meeting-dates-block']/h5")
meeting_info = response.xpath("//div[@class='meeting-dates-block']/p")
meeting_links = response.xpath("//div[@class='meeting-minutes-block']")
for item_date, item_info in zip(meeting_date, meeting_info):
start_time, end_time = self._parse_start_end(
item_date, item_info, meeting_year
)
meeting = Meeting(
title="Commission",
description="",
classification=COMMISSION,
start=start_time,
end=end_time,
all_day=False,
time_notes="",
location=self._parse_location(item_info),
links=self._parse_links(start_time, meeting_links),
source=self._parse_source(response),
)
meeting["status"] = self._get_status(
meeting, text=meeting_info.xpath(".//text()").get()
)
meeting["id"] = self._get_id(meeting)
yield meeting
def _parse_start_end(self, date, info, year):
meeting_year = year.xpath("//div[@class='meeting-dates-block']/h2/text()").get()
parse_year = meeting_year.split(" ")
year = parse_year[0]
meeting_date = date.xpath(".//text()").get()
meeting_time = info.xpath(".//text()").get()
parse_time = meeting_time.split("-")
start_time = datetime.strptime(
year + " " + meeting_date + " " + parse_time[0] + " " + parse_time[1][-2:],
"%Y %A %B %d %I:%M %p",
)
end_time = datetime.strptime(
year + " " + meeting_date + " " + parse_time[1], "%Y %A %B %d %I:%M%p"
)
return start_time, end_time
def _parse_location(self, info):
"""Parse or generate location."""
element = info.xpath(".//text()").getall()
name = re.sub(r"\s+", " ", element[1]).strip()
"""If the location is not known, the element contains only two strings """
if len(element) > 2:
address = re.sub(r"[\(\)]", "", re.sub(r"\s+", " ", element[2]).strip())
else:
address = name
if "TBD" not in address and "Chicago" not in address:
address += " Chicago, IL"
return {
"address": address,
"name": name,
}
def _parse_links(self, start_time, meeting_links):
"""Parse or generate links."""
links = []
for href in meeting_links.xpath(".//a"):
title = href.xpath("text()").get().strip()
minutes_date = title.split(" ")
# Verification, that selected link is a link to meeting minutes
if len(minutes_date) >= 2 and minutes_date[1] != "Minutes":
continue
# Date format for the last meeting minutes uses 2019 instead of 19 format
if minutes_date[2][-3] != "/":
meeting_minutes_date = datetime.strptime(minutes_date[2], "%m/%d/%Y")
else:
meeting_minutes_date = datetime.strptime(minutes_date[2], "%m/%d/%y")
if meeting_minutes_date.date() == start_time.date():
links.append(
{"title": title, "href": href.xpath("@href").get().strip()}
)
return links
def _parse_source(self, response):
"""Parse or generate source."""
return response.url
| 36.025641
| 88
| 0.559193
|
01f146e0aa9d8cce76dcf4fadb963ca6814f4017
| 110,449
|
py
|
Python
|
o3seespy/command/element/bearing.py
|
o3seespy/o3seespy
|
4fdd942370df1ac8d454e361f651405717b8584c
|
[
"MIT",
"BSD-3-Clause"
] | 16
|
2019-10-24T17:58:46.000Z
|
2022-03-01T19:48:06.000Z
|
o3seespy/command/element/bearing.py
|
o3seespy/o3seespy
|
4fdd942370df1ac8d454e361f651405717b8584c
|
[
"MIT",
"BSD-3-Clause"
] | 5
|
2020-04-17T01:39:27.000Z
|
2020-12-18T05:07:58.000Z
|
o3seespy/command/element/bearing.py
|
o3seespy/o3seespy
|
4fdd942370df1ac8d454e361f651405717b8584c
|
[
"MIT",
"BSD-3-Clause"
] | 6
|
2020-02-20T02:13:11.000Z
|
2021-11-01T19:08:41.000Z
|
from o3seespy.command.element.base_element import ElementBase
class ElastomericBearingPlasticity2D(ElementBase):
"""
The ElastomericBearingPlasticity2D Element Class
This command is used to construct an elastomericBearing element object, which is defined by two nodes. The element
can have zero length or the appropriate bearing height. The bearing has unidirectional (2D) or coupled (3D)
plasticity properties for the shear deformations, and force-deformation behaviors defined by
UniaxialMaterials in the remaining two (2D) or four (3D) directions. By default (sDratio =
0.5) P-Delta moments are equally distributed to the two end-nodes. To avoid the
introduction of artificial viscous damping in the isolation system (sometimes
referred to as "damping leakage in the isolation system"), the bearing
element does not contribute to the Rayleigh damping by default. If
the element has non-zero length, the local x-axis is determined
from the nodal geometry unless the optional x-axis vector is
specified in which case the nodal geometry is ignored and
the user-defined orientation is utilized.
For a two-dimensional problem
"""
op_type = 'elastomericBearingPlasticity'
def __init__(self, osi, ele_nodes, k_init, qd, alpha1, alpha2, mu, p_mat=None, mz_mat=None, do_rayleigh=False, orient=None, mass: float=None, shear_dist: float=None):
"""
Initial method for ElastomericBearingPlasticity2D
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
k_init: float
Initial elastic stiffness in local shear direction
qd: float
Characteristic strength
alpha1: float
Post yield stiffness ratio of linear hardening component
alpha2: float
Post yield stiffness ratio of non-linear hardening component
mu: float
Exponent of non-linear hardening component
p_mat: obj, optional
Object associated with previously-defined uniaxial_material in axial direction
mz_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local z-axis
do_rayleigh: bool
To include rayleigh damping from the bearing (optional, default = no rayleigh damping contribution)
orient: None, optional
mass: float, optional
Element mass (optional, default = 0.0)
shear_dist: float, optional
Shear distance from inode as a fraction of the element length (optional, default = 0.5)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [0, 1]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> p_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> mz_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> o3.element.ElastomericBearingPlasticity2D(osi, ele_nodes=ele_nodes, k_init=1.0, qd=1.0, alpha1=1.0, alpha2=1.0,
>>> mu=1.0, p_mat=p_mat, mz_mat=mz_mat)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.k_init = float(k_init)
self.qd = float(qd)
self.alpha1 = float(alpha1)
self.alpha2 = float(alpha2)
self.mu = float(mu)
self.p_mat = p_mat
self.mz_mat = mz_mat
self.do_rayleigh = do_rayleigh
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
if shear_dist is None:
self.shear_dist = None
else:
self.shear_dist = float(shear_dist)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.k_init, self.qd, self.alpha1, self.alpha2, self.mu]
if getattr(self, 'p_mat') is not None:
self._parameters += ['-P', self.p_mat.tag]
if getattr(self, 'mz_mat') is not None:
self._parameters += ['-Mz', self.mz_mat.tag]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', self.shear_dist]
self.to_process(osi)
class ElastomericBearingPlasticity3D(ElementBase):
"""
The ElastomericBearingPlasticity3D Element Class
This command is used to construct an elastomericBearing element object, which is defined by two nodes. The element
can have zero length or the appropriate bearing height. The bearing has unidirectional (2D) or coupled (3D)
plasticity properties for the shear deformations, and force-deformation behaviors defined by
UniaxialMaterials in the remaining two (2D) or four (3D) directions. By default (sDratio =
0.5) P-Delta moments are equally distributed to the two end-nodes. To avoid the
introduction of artificial viscous damping in the isolation system (sometimes
referred to as "damping leakage in the isolation system"), the bearing
element does not contribute to the Rayleigh damping by default. If
the element has non-zero length, the local x-axis is determined
from the nodal geometry unless the optional x-axis vector is
specified in which case the nodal geometry is ignored and
the user-defined orientation is utilized.
For a three-dimensional problem
"""
op_type = 'elastomericBearingPlasticity'
def __init__(self, osi, ele_nodes, k_init, qd, alpha1, alpha2, mu, p_mat=None, t_mat=None, my_mat=None, mz_mat=None, do_rayleigh=False, orient=None, mass: float=None, shear_dist: float=None):
"""
Initial method for ElastomericBearingPlasticity3D
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
k_init: float
Initial elastic stiffness in local shear direction
qd: float
Characteristic strength
alpha1: float
Post yield stiffness ratio of linear hardening component
alpha2: float
Post yield stiffness ratio of non-linear hardening component
mu: float
Exponent of non-linear hardening component
p_mat: obj, optional
Object associated with previously-defined uniaxial_material in axial direction
t_mat: obj, optional
Object associated with previously-defined uniaxial_material in torsional direction
my_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local y-axis
mz_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local z-axis
do_rayleigh: bool
To include rayleigh damping from the bearing (optional, default = no rayleigh damping contribution)
orient: None, optional
mass: float, optional
Element mass (optional, default = 0.0)
shear_dist: float, optional
Shear distance from inode as a fraction of the element length (optional, default = 0.5)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=3, ndf=6)
>>> coords = [[0, 0, 0], [0, 1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> p_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> mz_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> t_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> my_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> orient_vals = [1, 0, 0]
>>> o3.element.ElastomericBearingPlasticity3D(osi, ele_nodes=ele_nodes, k_init=1.0, qd=1.0, alpha1=1.0, alpha2=1.0,
>>> mu=1.0, p_mat=p_mat, t_mat=t_mat, my_mat=my_mat, mz_mat=mz_mat,
>>> orient=orient_vals)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.k_init = float(k_init)
self.qd = float(qd)
self.alpha1 = float(alpha1)
self.alpha2 = float(alpha2)
self.mu = float(mu)
self.p_mat = p_mat
self.t_mat = t_mat
self.my_mat = my_mat
self.mz_mat = mz_mat
self.do_rayleigh = do_rayleigh
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
if shear_dist is None:
self.shear_dist = None
else:
self.shear_dist = float(shear_dist)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.k_init, self.qd, self.alpha1, self.alpha2, self.mu]
if getattr(self, 'p_mat') is not None:
self._parameters += ['-P', self.p_mat.tag]
if getattr(self, 't_mat') is not None:
self._parameters += ['-T', self.t_mat.tag]
if getattr(self, 'my_mat') is not None:
self._parameters += ['-My', self.my_mat.tag]
if getattr(self, 'mz_mat') is not None:
self._parameters += ['-Mz', self.mz_mat.tag]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', self.shear_dist]
self.to_process(osi)
class ElastomericBearingBoucWen2D(ElementBase):
"""
The ElastomericBearingBoucWen2D Element Class
This command is used to construct an elastomericBearing element object, which is defined by two nodes. The element
can have zero length or the appropriate bearing height. The bearing has unidirectional (2D) or coupled (3D)
plasticity properties for the shear deformations, and force-deformation behaviors defined by
UniaxialMaterials in the remaining two (2D) or four (3D) directions. By default (sDratio =
0.5) P-Delta moments are equally distributed to the two end-nodes. To avoid the
introduction of artificial viscous damping in the isolation system (sometimes
referred to as "damping leakage in the isolation system"), the bearing
element does not contribute to the Rayleigh damping by default. If
the element has non-zero length, the local x-axis is determined
from the nodal geometry unless the optional x-axis vector is
specified in which case the nodal geometry is ignored and
the user-defined orientation is utilized.
For a two-dimensional problem
"""
op_type = 'elastomericBearingBoucWen'
def __init__(self, osi, ele_nodes, k_init, qd, alpha1, alpha2, mu, eta, beta, gamma, p_mat=None, mz_mat=None,
orient_vals: list = None, shear_dist: float = None, do_rayleigh=False, mass: float = None):
"""
Initial method for ElastomericBearingBoucWen2D
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
k_init: float
Initial elastic stiffness in local shear direction
qd: float
Characteristic strength
alpha1: float
Post yield stiffness ratio of linear hardening component
alpha2: float
Post yield stiffness ratio of non-linear hardening component
mu: float
Exponent of non-linear hardening component
eta: float
Yielding exponent (sharpness of hysteresis loop corners) (default = 1.0)
beta: float
First hysteretic shape parameter (default = 0.5)
gamma: float
Second hysteretic shape parameter (default = 0.5)
p_mat: obj, optional
Object associated with previously-defined uniaxial_material in axial direction
mz_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local z-axis
orient_vals: list, optional
Vector components in global coordinates defining local x-axis , vector components in global coordinates
defining local y-axis
shear_dist: float, optional
Shear distance from inode as a fraction of the element length (optional, default = 0.5)
do_rayleigh: bool
To include rayleigh damping from the bearing (optional, default = no rayleigh damping contribution)
mass: float, optional
Element mass (optional, default = 0.0)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> orient_vals = [1, 0, 0, 1, 0, 1]
>>> p_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> mz_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> o3.element.ElastomericBearingBoucWen2D(osi, ele_nodes=ele_nodes, k_init=1.0, qd=1.0, alpha1=1.0, alpha2=1.0,
>>> mu=1.0, eta=1.0, beta=1.0, gamma=1.0, p_mat=p_mat, mz_mat=mz_mat,
>>> orient_vals=orient_vals, shear_dist=1.0, do_rayleigh=False, mass=1.0)
"""
self.osi = osi
self.ele_nodes = [x.tag for x in ele_nodes]
self.k_init = float(k_init)
self.qd = float(qd)
self.alpha1 = float(alpha1)
self.alpha2 = float(alpha2)
self.mu = float(mu)
self.eta = float(eta)
self.beta = float(beta)
self.gamma = float(gamma)
self.p_mat = p_mat
self.mz_mat = mz_mat
self.orient_vals = orient_vals
if shear_dist is None:
self.shear_dist = None
else:
self.shear_dist = float(shear_dist)
self.do_rayleigh = do_rayleigh
if mass is None:
self.mass = None
else:
self.mass = float(mass)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_nodes, self.k_init, self.qd, self.alpha1, self.alpha2,
self.mu, self.eta, self.beta, self.gamma]
if getattr(self, 'p_mat') is not None:
self._parameters += ['-P', self.p_mat.tag]
if getattr(self, 'mz_mat') is not None:
self._parameters += ['-Mz', self.mz_mat.tag]
if getattr(self, 'orient_vals') is not None:
self._parameters += ['-orient', *self.orient_vals]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', self.shear_dist]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
try:
self.to_process(osi)
except ValueError:
self._parameters[0] = 'ElastomericBearingBoucWen'
self.to_process(osi)
class ElastomericBearingBoucWen3D(ElementBase):
"""
The ElastomericBearingBoucWen3D Element Class
This command is used to construct an elastomericBearing element object, which is defined by two nodes. The element
can have zero length or the appropriate bearing height. The bearing has unidirectional (2D) or coupled (3D)
plasticity properties for the shear deformations, and force-deformation behaviors defined by
UniaxialMaterials in the remaining two (2D) or four (3D) directions. By default (sDratio =
0.5) P-Delta moments are equally distributed to the two end-nodes. To avoid the
introduction of artificial viscous damping in the isolation system (sometimes
referred to as "damping leakage in the isolation system"), the bearing
element does not contribute to the Rayleigh damping by default. If
the element has non-zero length, the local x-axis is determined
from the nodal geometry unless the optional x-axis vector is
specified in which case the nodal geometry is ignored and
the user-defined orientation is utilized.
For a three-dimensional problem
"""
op_type = 'elastomericBearingBoucWen'
def __init__(self, osi, ele_nodes, k_init, qd, alpha1, alpha2, mu, eta, beta, gamma, p_mat=None, t_mat=None,
my_mat=None, mz_mat=None, orient_vals: list = None, shear_dist: float = None, do_rayleigh=False,
mass: float = None):
"""
Initial method for ElastomericBearingBoucWen3D
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
k_init: float
Initial elastic stiffness in local shear direction
qd: float
Characteristic strength
alpha1: float
Post yield stiffness ratio of linear hardening component
alpha2: float
Post yield stiffness ratio of non-linear hardening component
mu: float
Exponent of non-linear hardening component
eta: float
Yielding exponent (sharpness of hysteresis loop corners) (default = 1.0)
beta: float
First hysteretic shape parameter (default = 0.5)
gamma: float
Second hysteretic shape parameter (default = 0.5)
p_mat: obj, optional
Object associated with previously-defined uniaxial_material in axial direction
t_mat: obj, optional
Object associated with previously-defined uniaxial_material in torsional direction
my_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local y-axis
mz_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local z-axis
orient_vals: list, optional
Vector components in global coordinates defining local x-axis , vector components in global coordinates
defining local y-axis
shear_dist: float, optional
Shear distance from inode as a fraction of the element length (optional, default = 0.5)
do_rayleigh: bool
To include rayleigh damping from the bearing (optional, default = no rayleigh damping contribution)
mass: float, optional
Element mass (optional, default = 0.0)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=3, ndf=6)
>>> coords = [[0, 0, 0], [0, 1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> orient_vals = [1, 0, 0]
>>> p_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> mz_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> t_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> my_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> o3.element.ElastomericBearingBoucWen3D(osi, ele_nodes=ele_nodes, k_init=1.0, qd=1.0, alpha1=1.0, alpha2=1.0,
>>> mu=1.0, eta=1.0, beta=1.0, gamma=1.0, p_mat=p_mat, t_mat=t_mat,
>>> my_mat=my_mat, mz_mat=mz_mat, orient_vals=orient_vals,
>>> shear_dist=1.0, do_rayleigh=False, mass=1.0)
"""
self.osi = osi
self.ele_nodes = [x.tag for x in ele_nodes]
self.k_init = float(k_init)
self.qd = float(qd)
self.alpha1 = float(alpha1)
self.alpha2 = float(alpha2)
self.mu = float(mu)
self.eta = float(eta)
self.beta = float(beta)
self.gamma = float(gamma)
self.p_mat = p_mat
self.t_mat = t_mat
self.my_mat = my_mat
self.mz_mat = mz_mat
self.orient_vals = orient_vals
if shear_dist is None:
self.shear_dist = None
else:
self.shear_dist = float(shear_dist)
self.do_rayleigh = do_rayleigh
if mass is None:
self.mass = None
else:
self.mass = float(mass)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_nodes, self.k_init, self.qd, self.alpha1, self.alpha2,
self.mu, self.eta, self.beta, self.gamma]
if getattr(self, 'p_mat') is not None:
self._parameters += ['-P', self.p_mat.tag]
if getattr(self, 't_mat') is not None:
self._parameters += ['-T', self.t_mat.tag]
if getattr(self, 'my_mat') is not None:
self._parameters += ['-My', self.my_mat.tag]
if getattr(self, 'mz_mat') is not None:
self._parameters += ['-Mz', self.mz_mat.tag]
if getattr(self, 'orient_vals') is not None:
self._parameters += ['-orient', *self.orient_vals]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', self.shear_dist]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
try:
self.to_process(osi)
except ValueError:
self._parameters[0] = 'ElastomericBearingBoucWen'
self.to_process(osi)
class FlatSliderBearing2D(ElementBase):
"""
The FlatSliderBearing2D Element Class
This command is used to construct a flatSliderBearing element object, which is defined by two nodes. The iNode
represents the flat sliding surface and the jNode represents the slider. The element can have zero length or the
appropriate bearing height. The bearing has unidirectional (2D) or coupled (3D) friction properties for the
shear deformations, and force-deformation behaviors defined by UniaxialMaterials in the remaining two (2D)
or four (3D) directions. To capture the uplift behavior of the bearing, the user-specified
UniaxialMaterial in the axial direction is modified for no-tension behavior. By default
(sDratio = 0.0) P-Delta moments are entirely transferred to the flat sliding surface
(iNode). It is important to note that rotations of the flat sliding surface
(rotations at the iNode) affect the shear behavior of the bearing. To
avoid the introduction of artificial viscous damping in the
isolation system (sometimes referred to as "damping
leakage in the isolation system"), the bearing
element does not contribute to the Rayleigh
damping by default. If the element has
non-zero length, the local x-axis is
determined from the nodal geometry
unless the optional x-axis vector
is specified in which case the
nodal geometry is ignored and the user-defined orientation is utilized.
For a two-dimensional problem
"""
op_type = 'flatSliderBearing'
def __init__(self, osi, ele_nodes, frn_mdl, k_init, p_mat=None, mz_mat=None, do_rayleigh=False, max_iter: int=None, tol: float=None, orient=None, mass: float=None, shear_dist: float=None):
"""
Initial method for FlatSliderBearing2D
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
frn_mdl: obj
Object associated with previously-defined frictionmodel
k_init: float
Initial elastic stiffness in local shear direction
p_mat: obj, optional
Object associated with previously-defined uniaxial_material in axial direction
mz_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local z-axis
do_rayleigh: bool
To include rayleigh damping from the bearing (optional, default = no rayleigh damping contribution)
max_iter: int, optional
Maximum number of iterations to undertake to satisfy element equilibrium (optional, default = 20)
tol: float, optional
Convergence tolerance to satisfy element equilibrium (optional, default = 1e-8)
orient: None, optional
mass: float, optional
Element mass (optional, default = 0.0)
shear_dist: float, optional
Shear distance from inode as a fraction of the element length (optional, default = 0.0)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> p_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> mz_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> frn1 = o3.friction_model.Coulomb(osi, mu=1.0)
>>> o3.element.FlatSliderBearing2D(osi, ele_nodes=ele_nodes, frn_mdl=frn1, k_init=1.0, p_mat=p_mat, mz_mat=mz_mat,
>>> do_rayleigh=False, max_iter=1, tol=1.0, orient=None, mass=1.0, shear_dist=1.0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.frn_mdl = frn_mdl
self.k_init = float(k_init)
self.p_mat = p_mat
self.mz_mat = mz_mat
self.do_rayleigh = do_rayleigh
if max_iter is None:
self.max_iter = None
else:
self.max_iter = int(max_iter)
if tol is None:
self.tol = None
else:
self.tol = float(tol)
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
if shear_dist is None:
self.shear_dist = None
else:
self.shear_dist = float(shear_dist)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.frn_mdl.tag, self.k_init]
if getattr(self, 'p_mat') is not None:
self._parameters += ['-P', self.p_mat.tag]
if getattr(self, 'mz_mat') is not None:
self._parameters += ['-Mz', self.mz_mat.tag]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'max_iter') is not None:
self._parameters += ['-iter', self.max_iter]
if getattr(self, 'tol') is not None:
if getattr(self, 'max_iter') is None:
raise ValueError('Cannot set: tol and not: max_iter')
self._parameters += [self.tol]
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', self.shear_dist]
self.to_process(osi)
class FlatSliderBearing3D(ElementBase):
"""
The FlatSliderBearing3D Element Class
This command is used to construct a flatSliderBearing element object, which is defined by two nodes. The iNode
represents the flat sliding surface and the jNode represents the slider. The element can have zero length or the
appropriate bearing height. The bearing has unidirectional (2D) or coupled (3D) friction properties for the
shear deformations, and force-deformation behaviors defined by UniaxialMaterials in the remaining two (2D)
or four (3D) directions. To capture the uplift behavior of the bearing, the user-specified
UniaxialMaterial in the axial direction is modified for no-tension behavior. By default
(sDratio = 0.0) P-Delta moments are entirely transferred to the flat sliding surface
(iNode). It is important to note that rotations of the flat sliding surface
(rotations at the iNode) affect the shear behavior of the bearing. To
avoid the introduction of artificial viscous damping in the
isolation system (sometimes referred to as "damping
leakage in the isolation system"), the bearing
element does not contribute to the Rayleigh
damping by default. If the element has
non-zero length, the local x-axis is
determined from the nodal geometry
unless the optional x-axis vector
is specified in which case the
nodal geometry is ignored and the user-defined orientation is utilized.
For a three-dimensional problem
"""
op_type = 'flatSliderBearing'
def __init__(self, osi, ele_nodes, frn_mdl, k_init, p_mat=None, t_mat=None, my_mat=None, mz_mat=None, do_rayleigh=False, max_iter=None, tol: float=None, orient=None, mass: float=None, shear_dist: float=None):
"""
Initial method for FlatSliderBearing3D
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
frn_mdl: obj
Object associated with previously-defined frictionmodel
k_init: float
Initial elastic stiffness in local shear direction
p_mat: obj, optional
Object associated with previously-defined uniaxial_material in axial direction
t_mat: obj, optional
Object associated with previously-defined uniaxial_material in torsional direction
my_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local y-axis
mz_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local z-axis
do_rayleigh: bool
To include rayleigh damping from the bearing (optional, default = no rayleigh damping contribution)
max_iter: None, optional
tol: float, optional
Convergence tolerance to satisfy element equilibrium (optional, default = 1e-8)
orient: None, optional
mass: float, optional
Element mass (optional, default = 0.0)
shear_dist: float, optional
Shear distance from inode as a fraction of the element length (optional, default = 0.0)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=3, ndf=6)
>>> coords = [[0, 0, 0], [0, 1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> p_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> mz_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> t_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> my_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> frn1 = o3.friction_model.Coulomb(osi, mu=1.0)
>>> orient_vals = [1, 0, 0]
>>> o3.element.FlatSliderBearing3D(osi, ele_nodes=ele_nodes, frn_mdl=frn1, k_init=1.0, p_mat=p_mat, t_mat=t_mat,
>>> my_mat=my_mat, mz_mat=mz_mat, do_rayleigh=False, max_iter=None, tol=None,
>>> mass=1.0, shear_dist=1.0, orient=orient_vals)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.frn_mdl = frn_mdl
self.k_init = float(k_init)
self.p_mat = p_mat
self.t_mat = t_mat
self.my_mat = my_mat
self.mz_mat = mz_mat
self.do_rayleigh = do_rayleigh
self.max_iter = max_iter
if tol is None:
self.tol = None
else:
self.tol = float(tol)
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
if shear_dist is None:
self.shear_dist = None
else:
self.shear_dist = float(shear_dist)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.frn_mdl.tag, self.k_init]
if getattr(self, 'p_mat') is not None:
self._parameters += ['-P', self.p_mat.tag]
if getattr(self, 't_mat') is not None:
self._parameters += ['-T', self.t_mat.tag]
if getattr(self, 'my_mat') is not None:
self._parameters += ['-My', self.my_mat.tag]
if getattr(self, 'mz_mat') is not None:
self._parameters += ['-Mz', self.mz_mat.tag]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'max_iter') is not None:
self._parameters += ['-iter', self.max_iter]
if getattr(self, 'tol') is not None:
if getattr(self, 'max_iter') is None:
raise ValueError('Cannot set: tol and not: max_iter')
self._parameters += [self.tol]
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', self.shear_dist]
self.to_process(osi)
class SingleFPBearing2D(ElementBase):
"""
The SingleFPBearing2D Element Class
This command is used to construct a singleFPBearing element object, which is defined by two nodes. The iNode
represents the concave sliding surface and the jNode represents the articulated slider. The element can have
zero length or the appropriate bearing height. The bearing has unidirectional (2D) or coupled (3D) friction
properties (with post-yield stiffening due to the concave sliding surface) for the shear deformations, and
force-deformation behaviors defined by UniaxialMaterials in the remaining two (2D) or four (3D)
directions. To capture the uplift behavior of the bearing, the user-specified UniaxialMaterial
in the axial direction is modified for no-tension behavior. By default (sDratio = 0.0)
P-Delta moments are entirely transferred to the concave sliding surface (iNode). It
is important to note that rotations of the concave sliding surface (rotations at
the iNode) affect the shear behavior of the bearing. To avoid the introduction
of artificial viscous damping in the isolation system (sometimes referred to
as "damping leakage in the isolation system"), the bearing element does not
contribute to the Rayleigh damping by default. If the element has non-zero
length, the local x-axis is determined from the nodal geometry unless the
optional x-axis vector is specified in which case the nodal geometry is
ignored and the user-defined orientation is utilized.
For a two-dimensional problem
"""
op_type = 'singleFPBearing'
def __init__(self, osi, ele_nodes, frn_mdl, reff, k_init, p_mat=None, mz_mat=None, do_rayleigh=False, max_iter: int=None, tol: float=None, orient=None, mass: float=None, shear_dist: float=None):
"""
Initial method for SingleFPBearing2D
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
frn_mdl: obj
Object associated with previously-defined frictionmodel
reff: float
Effective radius of concave sliding surface
k_init: float
Initial elastic stiffness in local shear direction
p_mat: obj, optional
Object associated with previously-defined uniaxial_material in axial direction
mz_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local z-axis
do_rayleigh: bool
To include rayleigh damping from the bearing (optional, default = no rayleigh damping contribution)
max_iter: int, optional
Maximum number of iterations to undertake to satisfy element equilibrium (optional, default = 20)
tol: float, optional
Convergence tolerance to satisfy element equilibrium (optional, default = 1e-8)
orient: None, optional
mass: float, optional
Element mass (optional, default = 0.0)
shear_dist: float, optional
Shear distance from inode as a fraction of the element length (optional, default = 0.0)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> p_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> mz_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> frn1 = o3.friction_model.Coulomb(osi, mu=1.0)
>>> o3.element.SingleFPBearing2D(osi, ele_nodes=ele_nodes, frn_mdl=frn1, reff=1.0, k_init=1.0, p_mat=p_mat,
>>> mz_mat=mz_mat, do_rayleigh=False, max_iter=1, tol=1.0, orient=None,
>>> mass=1.0, shear_dist=1.0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.frn_mdl = frn_mdl
self.reff = float(reff)
self.k_init = float(k_init)
self.p_mat = p_mat
self.mz_mat = mz_mat
self.do_rayleigh = do_rayleigh
if max_iter is None:
self.max_iter = None
else:
self.max_iter = int(max_iter)
if tol is None:
self.tol = None
else:
self.tol = float(tol)
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
if shear_dist is None:
self.shear_dist = None
else:
self.shear_dist = float(shear_dist)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.frn_mdl.tag, self.reff, self.k_init]
if getattr(self, 'p_mat') is not None:
self._parameters += ['-P', self.p_mat.tag]
if getattr(self, 'mz_mat') is not None:
self._parameters += ['-Mz', self.mz_mat.tag]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'max_iter') is not None:
self._parameters += ['-iter', self.max_iter]
if getattr(self, 'tol') is not None:
if getattr(self, 'max_iter') is None:
raise ValueError('Cannot set: tol and not: max_iter')
self._parameters += [self.tol]
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', self.shear_dist]
self.to_process(osi)
class SingleFPBearing3D(ElementBase):
"""
The SingleFPBearing3D Element Class
This command is used to construct a singleFPBearing element object, which is defined by two nodes. The iNode
represents the concave sliding surface and the jNode represents the articulated slider. The element can have
zero length or the appropriate bearing height. The bearing has unidirectional (2D) or coupled (3D) friction
properties (with post-yield stiffening due to the concave sliding surface) for the shear deformations, and
force-deformation behaviors defined by UniaxialMaterials in the remaining two (2D) or four (3D)
directions. To capture the uplift behavior of the bearing, the user-specified UniaxialMaterial
in the axial direction is modified for no-tension behavior. By default (sDratio = 0.0)
P-Delta moments are entirely transferred to the concave sliding surface (iNode). It
is important to note that rotations of the concave sliding surface (rotations at
the iNode) affect the shear behavior of the bearing. To avoid the introduction
of artificial viscous damping in the isolation system (sometimes referred to
as "damping leakage in the isolation system"), the bearing element does not
contribute to the Rayleigh damping by default. If the element has non-zero
length, the local x-axis is determined from the nodal geometry unless the
optional x-axis vector is specified in which case the nodal geometry is
ignored and the user-defined orientation is utilized.
For a three-dimensional problem
"""
op_type = 'singleFPBearing'
def __init__(self, osi, ele_nodes, frn_mdl, reff, k_init, p_mat=None, t_mat=None, my_mat=None, mz_mat=None, do_rayleigh=False, max_iter: int=None, tol: float=None, orient=None, mass: float=None, shear_dist: float=None):
"""
Initial method for SingleFPBearing3D
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
frn_mdl: obj
Object associated with previously-defined frictionmodel
reff: float
Effective radius of concave sliding surface
k_init: float
Initial elastic stiffness in local shear direction
p_mat: obj, optional
Object associated with previously-defined uniaxial_material in axial direction
t_mat: obj, optional
Object associated with previously-defined uniaxial_material in torsional direction
my_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local y axis
mz_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local z-axis
do_rayleigh: bool
To include rayleigh damping from the bearing (optional, default = no rayleigh damping contribution)
max_iter: int, optional
Maximum number of iterations to undertake to satisfy element equilibrium (optional, default = 20)
tol: float, optional
Convergence tolerance to satisfy element equilibrium (optional, default = 1e-8)
orient: None, optional
mass: float, optional
Element mass (optional, default = 0.0)
shear_dist: float, optional
Shear distance from inode as a fraction of the element length (optional, default = 0.0)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=3, ndf=6)
>>> coords = [[0, 0, 0], [0, 1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> p_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> mz_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> t_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> my_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> frn1 = o3.friction_model.Coulomb(osi, mu=1.0)
>>> orient_vals = [1, 0, 0]
>>> o3.element.SingleFPBearing3D(osi, ele_nodes=ele_nodes, frn_mdl=frn1, reff=1.0, k_init=1.0, p_mat=p_mat, t_mat=t_mat,
>>> my_mat=my_mat, mz_mat=mz_mat, do_rayleigh=False, max_iter=None, tol=None,
>>> orient=orient_vals, mass=1.0, shear_dist=1.0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.frn_mdl = frn_mdl
self.reff = float(reff)
self.k_init = float(k_init)
self.p_mat = p_mat
self.t_mat = t_mat
self.my_mat = my_mat
self.mz_mat = mz_mat
self.do_rayleigh = do_rayleigh
if max_iter is None:
self.max_iter = None
else:
self.max_iter = int(max_iter)
if tol is None:
self.tol = None
else:
self.tol = float(tol)
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
if shear_dist is None:
self.shear_dist = None
else:
self.shear_dist = float(shear_dist)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.frn_mdl.tag, self.reff, self.k_init]
if getattr(self, 'p_mat') is not None:
self._parameters += ['-P', self.p_mat.tag]
if getattr(self, 't_mat') is not None:
self._parameters += ['-T', self.t_mat.tag]
if getattr(self, 'my_mat') is not None:
self._parameters += ['-My', self.my_mat.tag]
if getattr(self, 'mz_mat') is not None:
self._parameters += ['-Mz', self.mz_mat.tag]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'max_iter') is not None:
self._parameters += ['-iter', self.max_iter]
if getattr(self, 'tol') is not None:
if getattr(self, 'max_iter') is None:
raise ValueError('Cannot set: tol and not: max_iter')
self._parameters += [self.tol]
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', self.shear_dist]
self.to_process(osi)
class TFP(ElementBase):
"""
The TFP Element Class
This command is used to construct a Triple Friction Pendulum Bearing element object, which is defined by two nodes.
The element can have zero length or the appropriate bearing height. The bearing has unidirectional (2D) or coupled
(3D) friction properties (with post-yield stiffening due to the concave sliding surface) for the shear
deformations, and force-deformation behaviors defined by UniaxialMaterials in the remaining two (2D)
or four (3D) directions. To capture the uplift behavior of the bearing, the user-specified
UniaxialMaterial in the axial direction is modified for no-tension behavior. P-Delta
moments are entirely transferred to the concave sliding surface (iNode). It is
important to note that rotations of the concave sliding surface (rotations at
the iNode) affect the shear behavior of the bearing. If the element has
non-zero length, the local x-axis is determined from the nodal
geometry unless the optional x-axis vector is specified in
which case the nodal geometry is ignored and the user-defined orientation is utilized.
"""
op_type = 'TFP'
def __init__(self, osi, ele_nodes, r1, r2, r3, r4, db1, db2, db3, db4, d1, d2, d3, d4, mu1, mu2, mu3, mu4, h1, h2,
h3, h4, h0, col_load, big_k=None):
"""
Initial method for TFP
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
r1: float
Radius of inner bottom sliding surface
r2: float
Radius of inner top sliding surface
r3: float
Radius of outer bottom sliding surface
r4: float
Radius of outer top sliding surface
db1: float
Diameter of inner bottom sliding surface
db2: float
Diameter of inner top sliding surface
db3: float
Diameter of outer bottom sliding surface
db4: float
Diameter of outer top sliding surface
d1: float
Diameter of inner slider
d2: float
Diameter of inner slider
d3: float
Diameter of outer bottom slider
d4: float
Diameter of outer top slider
mu1: float
Friction coefficient of inner bottom sliding surface
mu2: float
Friction coefficient of inner top sliding surface
mu3: float
Friction coefficient of outer bottom sliding surface
mu4: float
Friction coefficient of outer top sliding surface
h1: float
Height from inner bottom sliding surface to center of bearing
h2: float
Height from inner top sliding surface to center of bearing
h3: float
Height from outer bottom sliding surface to center of bearing
h4: float
Height from inner top sliding surface to center of bearing
h0: float
Total height of bearing
col_load: float
Initial axial load on bearing (only used for first time step then load come from model)
big_k: float
Optional, stiffness of spring in vertical dirn (dof 2 if ndm= 2, dof 3 if ndm = 3) (default=1.0e15)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> o3.element.TFP(osi, ele_nodes=ele_nodes,
>>> r1=1.0, r2=1.0, r3=1.0, r4=1.0,
>>> db1=1.0, db2=1.0, db3=1.0, db4=1.0,
>>> d1=1.0, d2=1.0, d3=1.0, d4=1.0,
>>> mu1=0.3, mu2=0.4, mu3=0.5, mu4=0.5,
>>> h1=1.0, h2=1.0, h3=1.0, h4=1.0,
>>> h0=1.0, col_load=1.0, big_k=None)
"""
self.osi = osi
self.ele_nodes = [x.tag for x in ele_nodes]
self.r1 = float(r1)
self.r2 = float(r2)
self.r3 = float(r3)
self.r4 = float(r4)
self.db1 = float(db1)
self.db2 = float(db2)
self.db3 = float(db3)
self.db4 = float(db4)
self.d1 = float(d1)
self.d2 = float(d2)
self.d3 = float(d3)
self.d4 = float(d4)
self.mu1 = float(mu1)
self.mu2 = float(mu2)
self.mu3 = float(mu3)
self.mu4 = float(mu4)
self.h1 = float(h1)
self.h2 = float(h2)
self.h3 = float(h3)
self.h4 = float(h4)
self.h0 = float(h0)
self.col_load = float(col_load)
if big_k is not None:
self.big_k = float(big_k)
else:
self.big_k = None
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_nodes, self.r1, self.r2, self.r3, self.r4, self.db1,
self.db2, self.db3, self.db4, self.d1, self.d2, self.d3, self.d4, self.mu1, self.mu2,
self.mu3, self.mu4, self.h1, self.h2, self.h3, self.h4, self.h0, self.col_load]
if getattr(self, 'big_k') is not None:
self._parameters += [self.big_k]
self.to_process(osi)
class TripleFrictionPendulum(ElementBase):
"""
The TripleFrictionPendulum Element Class
"""
op_type = 'TripleFrictionPendulum'
def __init__(self, osi, ele_nodes, frn1, frn2, frn3, vert_mat, rot_z_mat, rot_x_mat, rot_y_mat, l1, l2, l3, d1, d2, d3, big_w, uy, kvt, min_fv, tol):
"""
Initial method for TripleFrictionPendulum
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
frn1: obj
= objects associated with previously-defined frictionmodels at the three sliding interfaces
frn2: obj
= objects associated with previously-defined frictionmodels at the three sliding interfaces
frn3: obj
= objects associated with previously-defined frictionmodels at the three sliding interfaces
vert_mat: obj
= pre-defined material object for compression behavior of the bearing
rot_z_mat: obj
= pre-defined material objects for rotational behavior about 3-axis, 1-axis and 2-axis, respectively.
rot_x_mat: obj
= pre-defined material objects for rotational behavior about 3-axis, 1-axis and 2-axis, respectively.
rot_y_mat: obj
= pre-defined material objects for rotational behavior about 3-axis, 1-axis and 2-axis, respectively.
l1: float
= effective radii. li = r_i - h_i (see figure 1)
l2: float
= effective radii. li = r_i - h_i (see figure 1)
l3: float
= effective radii. li = r_i - h_i (see figure 1)
d1: float
= displacement limits of pendulums (figure 1). displacement limit of the bearing is 2 ``d1`` + ``d2`` +
``d3`` + ``l1``. ``d3``/ ``l3`` - ``l1``. ``d2``/ ``l2``
d2: float
= displacement limits of pendulums (figure 1). displacement limit of the bearing is 2 ``d1`` + ``d2`` +
``d3`` + ``l1``. ``d3``/ ``l3`` - ``l1``. ``d2``/ ``l2``
d3: float
= displacement limits of pendulums (figure 1). displacement limit of the bearing is 2 ``d1`` + ``d2`` +
``d3`` + ``l1``. ``d3``/ ``l3`` - ``l1``. ``d2``/ ``l2``
big_w: float
= axial force used for the first trial of the first analysis step.
uy: float
= lateral displacement where sliding of the bearing starts. recommended value = 0.25 to 1 mm. a smaller
value may cause convergence problem.
kvt: float
= tension stiffness k_vt of the bearing.
min_fv: None
tol: float
= relative tolerance for checking the convergence of the element. recommended value = 1.e-10 to 1.e-3.
Examples
--------
>>> import o3seespy as o3
>>> # Example is currently not working
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> frn1 = o3.friction_model.Coulomb(osi, mu=1.0)
>>> frn2 = o3.friction_model.Coulomb(osi, mu=1.0)
>>> frn3 = o3.friction_model.Coulomb(osi, mu=1.0)
>>> vert_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> rot_z_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> rot_x_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> rot_y_mat = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> o3.element.TripleFrictionPendulum(osi, ele_nodes=ele_nodes, frn1=frn1, frn2=frn2, frn3=frn3, vert_mat=vert_mat,
>>> rot_z_mat=rot_z_mat, rot_x_mat=rot_x_mat, rot_y_mat=rot_y_mat, l1=1.0, l2=1.0,
>>> l3=1.0, d1=1.0, d2=1.0, d3=1.0, big_w=1.0, uy=1.0, kvt=1.0, min_fv=None, tol=1.0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.frn1 = frn1
self.frn2 = frn2
self.frn3 = frn3
self.vert_mat = vert_mat
self.rot_z_mat = rot_z_mat
self.rot_x_mat = rot_x_mat
self.rot_y_mat = rot_y_mat
self.l1 = float(l1)
self.l2 = float(l2)
self.l3 = float(l3)
self.d1 = float(d1)
self.d2 = float(d2)
self.d3 = float(d3)
self.big_w = float(big_w)
self.uy = float(uy)
self.kvt = float(kvt)
self.min_fv = min_fv
self.tol = float(tol)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.frn1.tag, self.frn2.tag, self.frn3.tag, self.vert_mat.tag, self.rot_z_mat.tag, self.rot_x_mat.tag, self.rot_y_mat.tag, self.l1, self.l2, self.l3, self.d1, self.d2, self.d3, self.big_w, self.uy, self.kvt, self.min_fv, self.tol]
self.to_process(osi)
class MultipleShearSpring(ElementBase):
"""
The MultipleShearSpring Element Class
This command is used to construct a multipleShearSpring (MSS) element object, which is defined by two nodes. This
element consists of a series of identical shear springs arranged radially to represent the isotropic behavior in the
local y-z plane.
"""
op_type = 'multipleShearSpring'
def __init__(self, osi, ele_nodes, n_spring, mat=None, lim: float=None, mass: float=None, orient=None):
"""
Initial method for MultipleShearSpring
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
n_spring: int
Number of springs
mat: obj, optional
Object associated with previously-defined uniaxial_material object
lim: float, optional
Minimum deformation to calculate equivalent coefficient (see note 1)
mass: float, optional
Element mass
orient: None, optional
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=3, ndf=6)
>>> coords = [[0, 0, 0], [1, 0, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> mat = o3.uniaxial_material.Elastic(osi, 1.0)
>>> o3.element.MultipleShearSpring(osi, ele_nodes=ele_nodes, n_spring=1, mat=mat, lim=1.0, mass=1.0, orient=None)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.n_spring = int(n_spring)
self.mat = mat
if lim is None:
self.lim = None
else:
self.lim = float(lim)
if mass is None:
self.mass = None
else:
self.mass = float(mass)
self.orient = orient
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.n_spring]
if getattr(self, 'mat') is not None:
self._parameters += ['-mat', self.mat.tag]
if getattr(self, 'lim') is not None:
self._parameters += ['-lim', self.lim]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
self.to_process(osi)
class KikuchiBearingadjustPDOutput(ElementBase):
"""
The KikuchiBearingadjustPDOutput Element Class
This command is used to construct a KikuchiBearing element object, which is defined by two nodes. This element
consists of multiple shear spring model (MSS) and multiple normal spring model (MNS).
"""
op_type = 'KikuchiBearing'
def __init__(self, osi, ele_nodes, total_rubber, ci, cj, shape: float=None, size: float=None, total_height: float=None, n_mss: int=None, mat_mss=None, lim_disp: float=None, n_mns: int=None, mat_mns=None, lamb: float=None, no_pd_input=False, no_tilt=False, orient=None, mass: float=None):
"""
Initial method for KikuchiBearingadjustPDOutput
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
total_rubber: float
Total rubber thickness
ci: float
P-delta moment adjustment for reaction force (default: ``ci`` =0.5, ``cj`` =0.5)
cj: float
P-delta moment adjustment for reaction force (default: ``ci`` =0.5, ``cj`` =0.5)
shape: float, optional
Following shapes are available: round, square
size: float, optional
Diameter (round shape), length of edge (square shape)
total_height: float, optional
Total height of the bearing (defaulut: distance between inode and jnode)
n_mss: int, optional
Number of springs in mss = nmss
mat_mss: obj, optional
Matobject for mss
lim_disp: float, optional
Minimum deformation to calculate equivalent coefficient of mss (see note 1)
n_mns: int, optional
Number of springs in mns = nmns*nmns (for round and square shape)
mat_mns: obj, optional
Matobject for mns
lamb: float, optional
Parameter to calculate compression modulus distribution on mns (see note 2)
no_pd_input: bool
Not consider p-delta moment
no_tilt: bool
Not consider tilt of rigid link
orient: None, optional
mass: float, optional
Element mass
Examples
--------
>>> import o3seespy as o3
>>> # Example is currently not working
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> mat_mss = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> mat_mns = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> o3.element.KikuchiBearingadjustPDOutput(osi, ele_nodes=ele_nodes, shape=1.0, size=1.0, total_rubber=1.0, total_height=1.0, n_mss=1, mat_mss=mat_mss, lim_disp=1.0, n_mns=1, mat_mns=mat_mns, lamb=1.0, no_pd_input="string", no_tilt="string", ci=1.0, cj=1.0, orient=[0.0, 0.0], mass=1.0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
if shape is None:
self.shape = None
else:
self.shape = float(shape)
if size is None:
self.size = None
else:
self.size = float(size)
self.total_rubber = float(total_rubber)
if total_height is None:
self.total_height = None
else:
self.total_height = float(total_height)
if n_mss is None:
self.n_mss = None
else:
self.n_mss = int(n_mss)
self.mat_mss = mat_mss
if lim_disp is None:
self.lim_disp = None
else:
self.lim_disp = float(lim_disp)
if n_mns is None:
self.n_mns = None
else:
self.n_mns = int(n_mns)
self.mat_mns = mat_mns
if lamb is None:
self.lamb = None
else:
self.lamb = float(lamb)
self.no_pd_input = no_pd_input
self.no_tilt = no_tilt
self.ci = float(ci)
self.cj = float(cj)
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.total_rubber, '-adjustPDOutput', self.ci, self.cj]
if getattr(self, 'shape') is not None:
self._parameters += ['-shape', self.shape]
if getattr(self, 'size') is not None:
self._parameters += ['-size', self.size]
if getattr(self, 'total_height') is not None:
self._parameters += ['-totalHeight', self.total_height]
if getattr(self, 'n_mss') is not None:
self._parameters += ['-nMSS', self.n_mss]
if getattr(self, 'mat_mss') is not None:
self._parameters += ['-matMSS', self.mat_mss.tag]
if getattr(self, 'lim_disp') is not None:
self._parameters += ['-limDisp', self.lim_disp]
if getattr(self, 'n_mns') is not None:
self._parameters += ['-nMNS', self.n_mns]
if getattr(self, 'mat_mns') is not None:
self._parameters += ['-matMNS', self.mat_mns.tag]
if getattr(self, 'lamb') is not None:
self._parameters += ['-lambda', self.lamb]
if getattr(self, 'no_pd_input'):
self._parameters += ['-noPDInput']
if getattr(self, 'no_tilt'):
self._parameters += ['-noTilt']
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
self.to_process(osi)
class KikuchiBearingdoBalance(ElementBase):
"""
The KikuchiBearingdoBalance Element Class
This command is used to construct a KikuchiBearing element object, which is defined by two nodes. This element
consists of multiple shear spring model (MSS) and multiple normal spring model (MNS).
"""
op_type = 'KikuchiBearing'
def __init__(self, osi, ele_nodes, total_rubber, lim_fo, lim_fi, n_iter, shape: float=None, size: float=None, total_height: float=None, n_mss: int=None, mat_mss=None, lim_disp: float=None, n_mns: int=None, mat_mns=None, lamb: float=None, no_pd_input=False, no_tilt=False, orient=None, mass: float=None):
"""
Initial method for KikuchiBearingdoBalance
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
total_rubber: float
Total rubber thickness
lim_fo: float
Tolerance of external unbalanced force ( ``limfo``), tolorance of internal unbalanced force (
``limfi``), number of iterations to get rid of internal unbalanced force ( ``niter``)
lim_fi: float
Tolerance of external unbalanced force ( ``limfo``), tolorance of internal unbalanced force (
``limfi``), number of iterations to get rid of internal unbalanced force ( ``niter``)
n_iter: float
Tolerance of external unbalanced force ( ``limfo``), tolorance of internal unbalanced force (
``limfi``), number of iterations to get rid of internal unbalanced force ( ``niter``)
shape: float, optional
Following shapes are available: round, square
size: float, optional
Diameter (round shape), length of edge (square shape)
total_height: float, optional
Total height of the bearing (defaulut: distance between inode and jnode)
n_mss: int, optional
Number of springs in mss = nmss
mat_mss: obj, optional
Matobject for mss
lim_disp: float, optional
Minimum deformation to calculate equivalent coefficient of mss (see note 1)
n_mns: int, optional
Number of springs in mns = nmns*nmns (for round and square shape)
mat_mns: obj, optional
Matobject for mns
lamb: float, optional
Parameter to calculate compression modulus distribution on mns (see note 2)
no_pd_input: bool
Not consider p-delta moment
no_tilt: bool
Not consider tilt of rigid link
orient: None, optional
mass: float, optional
Element mass
Examples
--------
>>> import o3seespy as o3
>>> # Example is currently not working
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> mat_mss = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> mat_mns = o3.uniaxial_material.Elastic(osi, e_mod=1.0, eta=0.0, eneg=None)
>>> o3.element.KikuchiBearingdoBalance(osi, ele_nodes=ele_nodes, shape=1.0, size=1.0, total_rubber=1.0, total_height=1.0, n_mss=1, mat_mss=mat_mss, lim_disp=1.0, n_mns=1, mat_mns=mat_mns, lamb=1.0, no_pd_input="string", no_tilt="string", lim_fo=1.0, lim_fi=1.0, n_iter=1.0, orient=[0.0, 0.0], mass=1.0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
if shape is None:
self.shape = None
else:
self.shape = float(shape)
if size is None:
self.size = None
else:
self.size = float(size)
self.total_rubber = float(total_rubber)
if total_height is None:
self.total_height = None
else:
self.total_height = float(total_height)
if n_mss is None:
self.n_mss = None
else:
self.n_mss = int(n_mss)
self.mat_mss = mat_mss
if lim_disp is None:
self.lim_disp = None
else:
self.lim_disp = float(lim_disp)
if n_mns is None:
self.n_mns = None
else:
self.n_mns = int(n_mns)
self.mat_mns = mat_mns
if lamb is None:
self.lamb = None
else:
self.lamb = float(lamb)
self.no_pd_input = no_pd_input
self.no_tilt = no_tilt
self.lim_fo = float(lim_fo)
self.lim_fi = float(lim_fi)
self.n_iter = float(n_iter)
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.total_rubber, '-doBalance', self.lim_fo, self.lim_fi, self.n_iter]
if getattr(self, 'shape') is not None:
self._parameters += ['-shape', self.shape]
if getattr(self, 'size') is not None:
self._parameters += ['-size', self.size]
if getattr(self, 'total_height') is not None:
self._parameters += ['-totalHeight', self.total_height]
if getattr(self, 'n_mss') is not None:
self._parameters += ['-nMSS', self.n_mss]
if getattr(self, 'mat_mss') is not None:
self._parameters += ['-matMSS', self.mat_mss.tag]
if getattr(self, 'lim_disp') is not None:
self._parameters += ['-limDisp', self.lim_disp]
if getattr(self, 'n_mns') is not None:
self._parameters += ['-nMNS', self.n_mns]
if getattr(self, 'mat_mns') is not None:
self._parameters += ['-matMNS', self.mat_mns.tag]
if getattr(self, 'lamb') is not None:
self._parameters += ['-lambda', self.lamb]
if getattr(self, 'no_pd_input'):
self._parameters += ['-noPDInput']
if getattr(self, 'no_tilt'):
self._parameters += ['-noTilt']
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
self.to_process(osi)
class YamamotoBiaxialHDRcoRS(ElementBase):
"""
The YamamotoBiaxialHDRcoRS Element Class
This command is used to construct a YamamotoBiaxialHDR element object, which is defined by two nodes. This element
can be used to represent the isotropic behavior of high-damping rubber bearing in the local y-z plane.
"""
op_type = 'YamamotoBiaxialHDR'
def __init__(self, osi, ele_nodes, tp, d_do, d_di, hr, cr, cs, orient: list=None, mass: float=None):
"""
Initial method for YamamotoBiaxialHDRcoRS
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
tp: int
Compound type = 1 : x0.6r manufactured by bridgestone corporation.
d_do: float
Outer diameter [m]
d_di: float
Bore diameter [m]
hr: float
Total thickness of rubber layer [m] optional data
cr: float
Coefficients for shear stress components of tau_r and tau_s
cs: float
Coefficients for shear stress components of tau_r and tau_s
orient: list, optional
mass: float, optional
Element mass [kg]
Examples
--------
>>> import o3seespy as o3
>>> # Example is currently not working
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> o3.element.YamamotoBiaxialHDRcoRS(osi, ele_nodes=ele_nodes, tp=1, d_do=1.0, d_di=1.0, hr=1.0, cr=1.0, cs=1.0, orient=[0.0, 0.0], mass=1.0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.tp = int(tp)
self.d_do = float(d_do)
self.d_di = float(d_di)
self.hr = float(hr)
self.cr = float(cr)
self.cs = float(cs)
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.tp, self.d_do, self.d_di, self.hr, '-coRS', self.cr, self.cs]
if getattr(self, 'orient') is not None:
self._parameters += ['--orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
self.to_process(osi)
class ElastomericX(ElementBase):
"""
The ElastomericX Element Class
This command is used to construct an ElastomericX bearing element object in three-dimension. The 3D continuum
geometry of an elastomeric bearing is modeled as a 2-node, 12 DOF discrete element. This elements extends the
formulation of Elastomeric_Bearing_(Bouc-Wen)_Element element. However, instead of the user providing
material models as input arguments, it only requires geometric and material properties of an
elastomeric bearing as arguments. The material models in six direction are formulated
within the element from input arguments. The time-dependent values of mechanical
properties (e.g., shear stiffness, buckling load capacity) can also be recorded
using the "parameters" recorder.
For 3D problem
"""
op_type = 'ElastomericX'
def __init__(self, osi, ele_nodes, fy, alpha, gr, kbulk, d1, d2, ts, tr, n, x1, x2, x3, y1, y2, y3, kc, phi_m, ac, s_dratio, m, cd, tc, tag1, tag2, tag3, tag4):
"""
Initial method for ElastomericX
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
fy: float
Yield strength
alpha: float
Post-yield stiffness ratio
gr: float
Shear modulus of elastomeric bearing
kbulk: float
Bulk modulus of rubber
d1: float
Internal diameter
d2: float
Outer diameter (excluding cover thickness)
ts: float
Single steel shim layer thickness
tr: float
Single rubber layer thickness
n: int
Number of rubber layers
x1: float
Vector components in global coordinates defining local x-axis
x2: float
Vector components in global coordinates defining local x-axis
x3: float
Vector components in global coordinates defining local x-axis
y1: float
Vector components in global coordinates defining local y-axis
y2: float
Vector components in global coordinates defining local y-axis
y3: float
Vector components in global coordinates defining local y-axis
kc: float
Cavitation parameter (optional, default = 10.0)
phi_m: float
Damage parameter (optional, default = 0.5)
ac: float
Strength reduction parameter (optional, default = 1.0)
s_dratio: float
Shear distance from inode as a fraction of the element length (optional, default = 0.5)
m: float
Element mass (optional, default = 0.0)
cd: float
Viscous damping parameter (optional, default = 0.0)
tc: float
Cover thickness (optional, default = 0.0)
tag1: float
Object to include cavitation and post-cavitation (optional, default = 0)
tag2: float
Object to include buckling load variation (optional, default = 0)
tag3: float
Object to include horizontal stiffness variation (optional, default = 0)
tag4: float
Object to include vertical stiffness variation (optional, default = 0)
Examples
--------
>>> import o3seespy as o3
>>> # Example is currently not working
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> o3.element.ElastomericX(osi, ele_nodes=ele_nodes, fy=1.0, alpha=1.0, gr=1.0, kbulk=1.0, d1=1.0, d2=1.0, ts=1.0,
>>> tr=1.0, n=1, x1=1.0, x2=1.0, x3=1.0, y1=1.0, y2=1.0, y3=1.0, kc=1.0, phi_m=1.0, ac=1.0,
>>> s_dratio=1.0, m=1.0, cd=1.0, tc=1.0, tag1=0, tag2=0, tag3=0, tag4=0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.fy = float(fy)
self.alpha = float(alpha)
self.gr = float(gr)
self.kbulk = float(kbulk)
self.d1 = float(d1)
self.d2 = float(d2)
self.ts = float(ts)
self.tr = float(tr)
self.n = int(n)
self.x1 = float(x1)
self.x2 = float(x2)
self.x3 = float(x3)
self.y1 = float(y1)
self.y2 = float(y2)
self.y3 = float(y3)
self.kc = float(kc)
self.phi_m = float(phi_m)
self.ac = float(ac)
self.s_dratio = float(s_dratio)
self.m = float(m)
self.cd = float(cd)
self.tc = float(tc)
self.tag1 = float(tag1)
self.tag2 = float(tag2)
self.tag3 = float(tag3)
self.tag4 = float(tag4)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.fy, self.alpha, self.gr, self.kbulk, self.d1, self.d2, self.ts, self.tr, self.n, self.x1, self.x2, self.x3, self.y1, self.y2, self.y3, self.kc, self.phi_m, self.ac, self.s_dratio, self.m, self.cd, self.tc, self.tag1, self.tag2, self.tag3, self.tag4]
self.to_process(osi)
class LeadRubberX(ElementBase):
"""
The LeadRubberX Element Class
This command is used to construct a LeadRubberX bearing element object in three-dimension. The 3D continuum geometry
of a lead rubber bearing is modeled as a 2-node, 12 DOF discrete element. It extends the formulation of ElastomericX by
including strength degradation in lead rubber bearing due to heating of the lead-core. The LeadRubberX element
requires only the geometric and material properties of an elastomeric bearing as arguments. The material
models in six direction are formulated within the element from input arguments. The time-dependent
values of mechanical properties (e.g., shear stiffness, buckling load capacity, temperature in
the lead-core, yield strength) can also be recorded using the "parameters" recorder.
"""
op_type = 'LeadRubberX'
def __init__(self, osi, ele_nodes, fy, alpha, gr, kbulk, d1, d2, ts, tr, n, x1, x2, x3, y1, y2, y3, kc, phi_m, ac, s_dratio, m, cd, tc, q_l, c_l, k_s, a_s, tag1, tag2, tag3, tag4, tag5):
"""
Initial method for LeadRubberX
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
fy: float
Yield strength
alpha: float
Post-yield stiffness ratio
gr: float
Shear modulus of elastomeric bearing
kbulk: float
Bulk modulus of rubber
d1: float
Internal diameter
d2: float
Outer diameter (excluding cover thickness)
ts: float
Single steel shim layer thickness
tr: float
Single rubber layer thickness
n: int
Number of rubber layers
x1: float
Vector components in global coordinates defining local x-axis
x2: float
Vector components in global coordinates defining local x-axis
x3: float
Vector components in global coordinates defining local x-axis
y1: float
Vector components in global coordinates defining local y-axis
y2: float
Vector components in global coordinates defining local y-axis
y3: float
Vector components in global coordinates defining local y-axis
kc: float
Cavitation parameter (optional, default = 10.0)
phi_m: float
Damage parameter (optional, default = 0.5)
ac: float
Strength reduction parameter (optional, default = 1.0)
s_dratio: float
Shear distance from inode as a fraction of the element length (optional, default = 0.5)
m: float
Element mass (optional, default = 0.0)
cd: float
Viscous damping parameter (optional, default = 0.0)
tc: float
Cover thickness (optional, default = 0.0)
q_l: float
Density of lead (optional, default = 11200 kg/m3)
c_l: float
Specific heat of lead (optional, default = 130 n-m/kg oc)
k_s: float
Thermal conductivity of steel (optional, default = 50 w/m oc)
a_s: float
Thermal diffusivity of steel (optional, default = 1.41e-05 m2/s)
tag1: int
Object to include cavitation and post-cavitation (optional, default = 0)
tag2: int
Object to include buckling load variation (optional, default = 0)
tag3: int
Object to include horizontal stiffness variation (optional, default = 0)
tag4: int
Object to include vertical stiffness variation (optional, default = 0)
tag5: int
Object to include strength degradation in shear due to heating of lead core (optional, default = 0)
Examples
--------
>>> import o3seespy as o3
>>> # Example is currently not working
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> o3.element.LeadRubberX(osi, ele_nodes=ele_nodes, fy=1.0, alpha=1.0, gr=1.0, kbulk=1.0, d1=1.0, d2=1.0, ts=1.0,
>>> tr=1.0, n=1, x1=1.0, x2=1.0, x3=1.0, y1=1.0, y2=1.0, y3=1.0, kc=1.0, phi_m=1.0, ac=1.0,
>>> s_dratio=1.0, m=1.0, cd=1.0, tc=1.0, q_l=1.0, c_l=1.0, k_s=1.0, a_s=1.0,
>>> tag1=1, tag2=1, tag3=1, tag4=1, tag5=1)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.fy = float(fy)
self.alpha = float(alpha)
self.gr = float(gr)
self.kbulk = float(kbulk)
self.d1 = float(d1)
self.d2 = float(d2)
self.ts = float(ts)
self.tr = float(tr)
self.n = int(n)
self.x1 = float(x1)
self.x2 = float(x2)
self.x3 = float(x3)
self.y1 = float(y1)
self.y2 = float(y2)
self.y3 = float(y3)
self.kc = float(kc)
self.phi_m = float(phi_m)
self.ac = float(ac)
self.s_dratio = float(s_dratio)
self.m = float(m)
self.cd = float(cd)
self.tc = float(tc)
self.q_l = float(q_l)
self.c_l = float(c_l)
self.k_s = float(k_s)
self.a_s = float(a_s)
self.tag1 = int(tag1)
self.tag2 = int(tag2)
self.tag3 = int(tag3)
self.tag4 = int(tag4)
self.tag5 = int(tag5)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.fy, self.alpha, self.gr, self.kbulk, self.d1, self.d2, self.ts, self.tr, self.n, self.x1, self.x2, self.x3, self.y1, self.y2, self.y3, self.kc, self.phi_m, self.ac, self.s_dratio, self.m, self.cd, self.tc, self.q_l, self.c_l, self.k_s, self.a_s, self.tag1, self.tag2, self.tag3, self.tag4, self.tag5]
self.to_process(osi)
class HDR(ElementBase):
"""
The HDR Element Class
For 3D problem
"""
op_type = 'HDR'
def __init__(self, osi, ele_nodes, gr, kbulk, d1, d2, ts, tr, n, a1, a2, a3, b1, b2, b3, c1, c2, c3, c4, x1, x2, x3, y1, y2, y3, kc, phi_m, ac, s_dratio, m, tc):
"""
Initial method for HDR
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
gr: float
Shear modulus of elastomeric bearing
kbulk: float
Bulk modulus of rubber
d1: float
Internal diameter
d2: float
Outer diameter (excluding cover thickness)
ts: float
Single steel shim layer thickness
tr: float
Single rubber layer thickness
n: int
Number of rubber layers
a1: float
Parameters of the grant model
a2: float
Parameters of the grant model
a3: float
Parameters of the grant model
b1: float
Parameters of the grant model
b2: float
Parameters of the grant model
b3: float
Parameters of the grant model
c1: float
Parameters of the grant model
c2: float
Parameters of the grant model
c3: float
Parameters of the grant model
c4: float
Parameters of the grant model
x1: float
Vector components in global coordinates defining local x-axis
x2: float
Vector components in global coordinates defining local x-axis
x3: float
Vector components in global coordinates defining local x-axis
y1: float
Vector components in global coordinates defining local y-axis
y2: float
Vector components in global coordinates defining local y-axis
y3: float
Vector components in global coordinates defining local y-axis
kc: float
Cavitation parameter (optional, default = 10.0)
phi_m: float
Damage parameter (optional, default = 0.5)
ac: float
Strength reduction parameter (optional, default = 1.0)
s_dratio: float
Shear distance from inode as a fraction of the element length (optional, default = 0.5)
m: float
Element mass (optional, default = 0.0)
tc: float
Cover thickness (optional, default = 0.0)
Examples
--------
>>> import o3seespy as o3
>>> # Example is currently not working
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> o3.element.HDR(osi, ele_nodes=ele_nodes, gr=1.0, kbulk=1.0, d1=1.0, d2=1.0, ts=1.0, tr=1.0, n=1, a1=1.0, a2=1.0, a3=1.0, b1=1.0, b2=1.0, b3=1.0, c1=1.0, c2=1.0, c3=1.0, c4=1.0, x1=1.0, x2=1.0, x3=1.0, y1=1.0, y2=1.0, y3=1.0, kc=1.0, phi_m=1.0, ac=1.0, s_dratio=1.0, m=1.0, tc=1.0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.gr = float(gr)
self.kbulk = float(kbulk)
self.d1 = float(d1)
self.d2 = float(d2)
self.ts = float(ts)
self.tr = float(tr)
self.n = int(n)
self.a1 = float(a1)
self.a2 = float(a2)
self.a3 = float(a3)
self.b1 = float(b1)
self.b2 = float(b2)
self.b3 = float(b3)
self.c1 = float(c1)
self.c2 = float(c2)
self.c3 = float(c3)
self.c4 = float(c4)
self.x1 = float(x1)
self.x2 = float(x2)
self.x3 = float(x3)
self.y1 = float(y1)
self.y2 = float(y2)
self.y3 = float(y3)
self.kc = float(kc)
self.phi_m = float(phi_m)
self.ac = float(ac)
self.s_dratio = float(s_dratio)
self.m = float(m)
self.tc = float(tc)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.gr, self.kbulk, self.d1, self.d2, self.ts, self.tr, self.n, self.a1, self.a2, self.a3, self.b1, self.b2, self.b3, self.c1, self.c2, self.c3, self.c4, self.x1, self.x2, self.x3, self.y1, self.y2, self.y3, self.kc, self.phi_m, self.ac, self.s_dratio, self.m, self.tc]
self.to_process(osi)
class RJWatsonEqsBearing2D(ElementBase):
"""
The RJWatsonEqsBearing2D Element Class
This command is used to construct a RJWatsonEqsBearing element object, which is defined by two nodes. The iNode
represents the masonry plate and the jNode represents the sliding surface plate. The element can have zero length
or the appropriate bearing height. The bearing has unidirectional (2D) or coupled (3D) friction properties (with
post-yield stiffening due to the mass-energy-regulator (MER) springs) for the shear deformations, and
force-deformation behaviors defined by UniaxialMaterials in the remaining two (2D) or four (3D)
directions. To capture the uplift behavior of the bearing, the user-specified UniaxialMaterial
in the axial direction is modified for no-tension behavior. By default (sDratio = 1.0)
P-Delta moments are entirely transferred to the sliding surface (jNode). It is
important to note that rotations of the sliding surface (rotations at the
jNode) affect the shear behavior of the bearing. To avoid the
introduction of artificial viscous damping in the isolation
system (sometimes referred to as "damping leakage in the
isolation system"), the bearing element does not
contribute to the Rayleigh damping by default.
If the element has non-zero length, the local
x-axis is determined from the nodal geometry
unless the optional x-axis vector is
specified in which case the nodal
geometry is ignored and the user-defined orientation is utilized.
For a two-dimensional problem
"""
op_type = 'RJWatsonEqsBearing'
def __init__(self, osi, ele_nodes, frn_mdl, k_init, p_mat=None, vy_mat=None, mz_mat=None, do_rayleigh=False, max_iter: int=None, tol: float=None, orient=None, mass: float=None, shear_dist: float=None):
"""
Initial method for RJWatsonEqsBearing2D
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
frn_mdl: obj
Object associated with previously-defined frictionmodel
k_init: float
Initial stiffness of sliding friction component in local shear direction
p_mat: obj, optional
Object associated with previously-defined uniaxial_material in axial direction
vy_mat: obj, optional
Object associated with previously-defined uniaxial_material in shear direction along local y-axis (mer
spring behavior not including friction)
mz_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local z-axis
do_rayleigh: bool
To include rayleigh damping from the bearing (optional, default = no rayleigh damping contribution)
max_iter: int, optional
Maximum number of iterations to undertake to satisfy element equilibrium (optional, default = 20)
tol: float, optional
Convergence tolerance to satisfy element equilibrium (optional, default = 1e-8)
orient: None, optional
mass: float, optional
Element mass (optional, default = 0.0)
shear_dist: float, optional
Shear distance from inode as a fraction of the element length (optional, default = 0.0)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [0, 1]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> p_mat = o3.uniaxial_material.Elastic(osi, 1, 1)
>>> vy_mat = o3.uniaxial_material.Elastic(osi, 1, 1)
>>> mz_mat = o3.uniaxial_material.Elastic(osi, 1, 1)
>>> frn_mdl = o3.friction_model.Coulomb(osi, mu=1.0)
>>> o3.element.RJWatsonEqsBearing2D(osi, ele_nodes=ele_nodes, frn_mdl=frn_mdl, k_init=1.0, p_mat=p_mat, vy_mat=vy_mat,
>>> mz_mat=mz_mat, do_rayleigh=False, max_iter=1, tol=1.0, orient=None, mass=1.0,
>>> shear_dist=1.0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.frn_mdl = frn_mdl
self.k_init = float(k_init)
self.p_mat = p_mat
self.vy_mat = vy_mat
self.mz_mat = mz_mat
self.do_rayleigh = do_rayleigh
if max_iter is None:
self.max_iter = None
else:
self.max_iter = int(max_iter)
if tol is None:
self.tol = None
else:
self.tol = float(tol)
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
if shear_dist is None:
self.shear_dist = None
else:
self.shear_dist = float(shear_dist)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.frn_mdl.tag, self.k_init]
if getattr(self, 'p_mat') is not None:
self._parameters += ['-P', self.p_mat.tag]
if getattr(self, 'vy_mat') is not None:
self._parameters += ['-Vy', self.vy_mat.tag]
if getattr(self, 'mz_mat') is not None:
self._parameters += ['-Mz', self.mz_mat.tag]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'max_iter') is not None:
self._parameters += ['-iter', self.max_iter]
if getattr(self, 'tol') is not None:
if getattr(self, 'max_iter') is None:
raise ValueError('Cannot set: tol and not: max_iter')
self._parameters += [self.tol]
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', self.shear_dist]
self.to_process(osi)
class RJWatsonEqsBearing3D(ElementBase):
"""
The RJWatsonEqsBearing3D Element Class
This command is used to construct a RJWatsonEqsBearing element object, which is defined by two nodes. The iNode
represents the masonry plate and the jNode represents the sliding surface plate. The element can have zero length
or the appropriate bearing height. The bearing has unidirectional (2D) or coupled (3D) friction properties (with
post-yield stiffening due to the mass-energy-regulator (MER) springs) for the shear deformations, and
force-deformation behaviors defined by UniaxialMaterials in the remaining two (2D) or four (3D)
directions. To capture the uplift behavior of the bearing, the user-specified UniaxialMaterial
in the axial direction is modified for no-tension behavior. By default (sDratio = 1.0)
P-Delta moments are entirely transferred to the sliding surface (jNode). It is
important to note that rotations of the sliding surface (rotations at the
jNode) affect the shear behavior of the bearing. To avoid the
introduction of artificial viscous damping in the isolation
system (sometimes referred to as "damping leakage in the
isolation system"), the bearing element does not
contribute to the Rayleigh damping by default.
If the element has non-zero length, the local
x-axis is determined from the nodal geometry
unless the optional x-axis vector is
specified in which case the nodal
geometry is ignored and the user-defined orientation is utilized.
For a three-dimensional problem
"""
op_type = 'RJWatsonEqsBearing'
def __init__(self, osi, ele_nodes, frn_mdl, k_init, p_mat=None, vy_mat=None, vz_mat=None, t_mat=None, my_mat=None, mz_mat=None, do_rayleigh=False, max_iter: int=None, tol: float=None, orient=None, mass: float=None, shear_dist: float=None):
"""
Initial method for RJWatsonEqsBearing3D
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
frn_mdl: obj
Object associated with previously-defined frictionmodel
k_init: float
Initial stiffness of sliding friction component in local shear direction
p_mat: obj, optional
Object associated with previously-defined uniaxial_material in axial direction
vy_mat: obj, optional
Object associated with previously-defined uniaxial_material in shear direction along local y-axis (mer
spring behavior not including friction)
vz_mat: obj, optional
Object associated with previously-defined uniaxial_material in shear direction along local z-axis (mer
spring behavior not including friction)
t_mat: obj, optional
Object associated with previously-defined uniaxial_material in torsional direction
my_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local y-axis
mz_mat: obj, optional
Object associated with previously-defined uniaxial_material in moment direction around local z-axis
do_rayleigh: bool
To include rayleigh damping from the bearing (optional, default = no rayleigh damping contribution)
max_iter: int, optional
Maximum number of iterations to undertake to satisfy element equilibrium (optional, default = 20)
tol: float, optional
Convergence tolerance to satisfy element equilibrium (optional, default = 1e-8)
orient: None, optional
mass: float, optional
Element mass (optional, default = 0.0)
shear_dist: float, optional
Shear distance from inode as a fraction of the element length (optional, default = 0.0)
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=3, ndf=6)
>>> coords = [[0, 0, 0], [0, 1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(2)]
>>> p_mat = o3.uniaxial_material.Elastic(osi, 1, 1)
>>> vy_mat = o3.uniaxial_material.Elastic(osi, 1, 1)
>>> vz_mat = o3.uniaxial_material.Elastic(osi, 1, 1)
>>> t_mat = o3.uniaxial_material.Elastic(osi, 1, 1)
>>> my_mat = o3.uniaxial_material.Elastic(osi, 1, 1)
>>> mz_mat = o3.uniaxial_material.Elastic(osi, 1, 1)
>>> orient_vals = [1, 0, 0]
>>> frn_mdl = o3.friction_model.Coulomb(osi, mu=1.0)
>>> o3.element.RJWatsonEqsBearing3D(osi, ele_nodes=ele_nodes, frn_mdl=frn_mdl, k_init=1.0, p_mat=p_mat,
>>> vy_mat=vy_mat, vz_mat=vz_mat, t_mat=t_mat, my_mat=my_mat, mz_mat=mz_mat,
>>> do_rayleigh=False, max_iter=1, tol=1.0, orient=orient_vals, mass=1.0, shear_dist=1.0)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.frn_mdl = frn_mdl
self.k_init = float(k_init)
self.p_mat = p_mat
self.vy_mat = vy_mat
self.vz_mat = vz_mat
self.t_mat = t_mat
self.my_mat = my_mat
self.mz_mat = mz_mat
self.do_rayleigh = do_rayleigh
if max_iter is None:
self.max_iter = None
else:
self.max_iter = int(max_iter)
if tol is None:
self.tol = None
else:
self.tol = float(tol)
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
if shear_dist is None:
self.shear_dist = None
else:
self.shear_dist = float(shear_dist)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.frn_mdl.tag, self.k_init]
if getattr(self, 'p_mat') is not None:
self._parameters += ['-P', self.p_mat.tag]
if getattr(self, 'vy_mat') is not None:
self._parameters += ['-Vy', self.vy_mat.tag]
if getattr(self, 'vz_mat') is not None:
self._parameters += ['-Vz', self.vz_mat.tag]
if getattr(self, 't_mat') is not None:
self._parameters += ['-T', self.t_mat.tag]
if getattr(self, 'my_mat') is not None:
self._parameters += ['-My', self.my_mat.tag]
if getattr(self, 'mz_mat') is not None:
self._parameters += ['-Mz', self.mz_mat.tag]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'max_iter') is not None:
self._parameters += ['-iter', self.max_iter]
if getattr(self, 'tol') is not None:
if getattr(self, 'max_iter') is None:
raise ValueError('Cannot set: tol and not: max_iter')
self._parameters += [self.tol]
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', self.shear_dist]
self.to_process(osi)
class FPBearingPTV(ElementBase):
"""
The FPBearingPTV Element Class
The FPBearingPTV command creates a single Friction Pendulum bearing element, which is capable of accounting for the
changes in the coefficient of friction at the sliding surface with instantaneous values of the sliding velocity, axial
pressure and temperature at the sliding surface. The constitutive modelling is similar to the existing
singleFPBearing element, otherwise. The FPBearingPTV element has been verified and validated in
accordance with the ASME guidelines, details of which are presented in Chapter 4 of Kumar et al. (2015a).
"""
op_type = 'FPBearingPTV'
def __init__(self, osi, ele_nodes, mu_ref, is_pressure_dependent, p_ref, is_temperature_dependent, diffusivity, conductivity, is_velocity_dependent, rate_parameter, reffective_fp, radius__contact, k_initial, the_material_a, the_material_b, the_material_c, the_material_d, x1, x2, x3, y1, y2, y3, shear_dist, do_rayleigh, mass, max_iter, tol, unit):
"""
Initial method for FPBearingPTV
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
mu_ref: float
Reference coefficient of friction
is_pressure_dependent: int
1 if the coefficient of friction is a function of instantaneous axial pressure
p_ref: float
Reference axial pressure (the bearing pressure under static loads)
is_temperature_dependent: int
1 if the coefficient of friction is a function of instantaneous temperature at the sliding surface
diffusivity: float
Thermal diffusivity of steel
conductivity: float
Thermal conductivity of steel
is_velocity_dependent: int
1 if the coefficient of friction is a function of instantaneous velocity at the sliding surface
rate_parameter: float
The exponent that determines the shape of the coefficient of friction vs. sliding velocity curve
reffective_fp: float
Effective radius of curvature of the sliding surface of the fpbearing
radius__contact: float
Radius of contact area at the sliding surface
k_initial: float
Lateral stiffness of the sliding bearing before sliding begins
the_material_a: int
Object for the uniaxial material in the axial direction
the_material_b: int
Object for the uniaxial material in the torsional direction
the_material_c: int
Object for the uniaxial material for rocking about local y axis
the_material_d: int
Object for the uniaxial material for rocking about local z axis
x1: float
Vector components to define local x axis
x2: float
Vector components to define local x axis
x3: float
Vector components to define local x axis
y1: float
Vector components to define local y axis
y2: float
Vector components to define local y axis
y3: float
Vector components to define local y axis
shear_dist: float
Shear distance from inode as a fraction of the length of the element
do_rayleigh: int
To include rayleigh damping from the bearing
mass: float
Element mass
max_iter: int
Maximum number of iterations to satisfy the equilibrium of element
tol: float
Convergence tolerance to satisfy the equilibrium of the element
unit: int
Object to identify the unit from the list below. * ``1``: n, m, s, c * ``2``: kn, m, s, c * ``3``: n, mm, s,
c * ``4``: kn, mm, s, c * ``5``: lb, in, s, c * ``6``: kip, in, s, c * ``7``: lb, ft, s, c * ``8``: kip, ft, s, c
Examples
--------
>>> import o3seespy as o3
>>> # Example is currently not working
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [1, 0]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(len(coords))]
>>> o3.element.FPBearingPTV(osi, ele_nodes=ele_nodes, mu_ref=1.0, is_pressure_dependent=1, p_ref=1.0, is_temperature_dependent=1, diffusivity=1.0, conductivity=1.0, is_velocity_dependent=1, rate_parameter=1.0, reffective_fp=1.0, radius__contact=1.0, k_initial=1.0, the_material_a=1, the_material_b=1, the_material_c=1, the_material_d=1, x1=1.0, x2=1.0, x3=1.0, y1=1.0, y2=1.0, y3=1.0, shear_dist=1.0, do_rayleigh=1, mass=1.0, max_iter=1, tol=1.0, unit=1)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
self.mu_ref = float(mu_ref)
self.is_pressure_dependent = int(is_pressure_dependent)
self.p_ref = float(p_ref)
self.is_temperature_dependent = int(is_temperature_dependent)
self.diffusivity = float(diffusivity)
self.conductivity = float(conductivity)
self.is_velocity_dependent = int(is_velocity_dependent)
self.rate_parameter = float(rate_parameter)
self.reffective_fp = float(reffective_fp)
self.radius__contact = float(radius__contact)
self.k_initial = float(k_initial)
self.the_material_a = int(the_material_a)
self.the_material_b = int(the_material_b)
self.the_material_c = int(the_material_c)
self.the_material_d = int(the_material_d)
self.x1 = float(x1)
self.x2 = float(x2)
self.x3 = float(x3)
self.y1 = float(y1)
self.y2 = float(y2)
self.y3 = float(y3)
self.shear_dist = float(shear_dist)
self.do_rayleigh = int(do_rayleigh)
self.mass = float(mass)
self.max_iter = int(max_iter)
self.tol = float(tol)
self.unit = int(unit)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.mu_ref, self.is_pressure_dependent, self.p_ref, self.is_temperature_dependent, self.diffusivity, self.conductivity, self.is_velocity_dependent, self.rate_parameter, self.reffective_fp, self.radius__contact, self.k_initial, self.the_material_a, self.the_material_b, self.the_material_c, self.the_material_d, self.x1, self.x2, self.x3, self.y1, self.y2, self.y3, self.shear_dist, self.do_rayleigh, self.mass, self.max_iter, self.tol, self.unit]
self.to_process(osi)
| 45.677833
| 521
| 0.614492
|
45b1ee03736280cebcc40ed2096786f738993b8b
| 3,311
|
py
|
Python
|
frameworks/hdfs/tests/test_tls.py
|
jorgelopez1/hdfs
|
892589180438b90486ec7530d2a63c218b20e79f
|
[
"Apache-2.0"
] | null | null | null |
frameworks/hdfs/tests/test_tls.py
|
jorgelopez1/hdfs
|
892589180438b90486ec7530d2a63c218b20e79f
|
[
"Apache-2.0"
] | null | null | null |
frameworks/hdfs/tests/test_tls.py
|
jorgelopez1/hdfs
|
892589180438b90486ec7530d2a63c218b20e79f
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import sdk_cmd
import sdk_install
import sdk_hosts
import sdk_plan
import sdk_security
import sdk_utils
import shakedown
from tests import config
from tests.config import (
DEFAULT_TASK_COUNT,
PACKAGE_NAME,
SERVICE_NAME,
)
DEFAULT_JOURNAL_NODE_TLS_PORT = 8481
DEFAULT_NAME_NODE_TLS_PORT = 9003
DEFAULT_DATA_NODE_TLS_PORT = 9006
@pytest.fixture(scope='module')
def service_account():
"""
Creates service account with `hdfs` name and yields the name.
"""
name = SERVICE_NAME
sdk_security.create_service_account(
service_account_name=name, service_account_secret=name)
# TODO(mh): Fine grained permissions needs to be addressed in DCOS-16475
sdk_cmd.run_cli(
"security org groups add_user superusers {name}".format(name=name))
yield name
sdk_security.delete_service_account(
service_account_name=name, service_account_secret=name)
@pytest.fixture(scope='module')
def hdfs_service_tls(service_account):
try:
sdk_install.install(
PACKAGE_NAME,
service_name=SERVICE_NAME,
expected_running_tasks=DEFAULT_TASK_COUNT,
additional_options={
"service": {
"service_account_secret": service_account,
"service_account": service_account,
"tls": {
"enabled": True,
}
}
}
)
sdk_plan.wait_for_completed_deployment(SERVICE_NAME)
# Wait for service health check to pass
shakedown.service_healthy(SERVICE_NAME)
except Exception as error:
try:
sdk_install.uninstall(PACKAGE_NAME, SERVICE_NAME)
except:
pass
raise error
yield
sdk_install.uninstall(PACKAGE_NAME, SERVICE_NAME)
@pytest.mark.tls
@pytest.mark.sanity
@sdk_utils.dcos_1_10_or_higher
@sdk_utils.dcos_ee_only
def test_healthy(hdfs_service_tls):
config.check_healthy(service_name=config.SERVICE_NAME)
@pytest.mark.tls
@pytest.mark.sanity
@pytest.mark.data_integrity
@sdk_utils.dcos_1_10_or_higher
@sdk_utils.dcos_ee_only
def test_write_and_read_data_over_tls(hdfs_service_tls):
config.write_data_to_hdfs(config.SERVICE_NAME, config.TEST_FILE_1_NAME)
config.read_data_from_hdfs(config.SERVICE_NAME, config.TEST_FILE_1_NAME)
@pytest.mark.tls
@pytest.mark.sanity
@sdk_utils.dcos_1_10_or_higher
@sdk_utils.dcos_ee_only
@pytest.mark.parametrize("node_type,port", [
('journal', DEFAULT_JOURNAL_NODE_TLS_PORT),
('name', DEFAULT_NAME_NODE_TLS_PORT),
('data', DEFAULT_DATA_NODE_TLS_PORT),
])
def test_verify_https_ports(node_type, port, hdfs_service_tls):
"""
Verify that HTTPS port is open name, journal and data node types.
"""
host = sdk_hosts.autoip_host(
config.SERVICE_NAME, "{}-0-node".format(node_type), port)
exit_status, output = shakedown.run_command_on_master(
_curl_https_get_code(host))
assert exit_status
assert output == '200'
def _curl_https_get_code(host):
"""
Create a curl command for a given host that outputs HTTP status code.
"""
return (
'/opt/mesosphere/bin/curl '
'-s -o /dev/null -w "%{{http_code}}" '
'https://{host}'
).format(host=host)
| 26.918699
| 77
| 0.693144
|
ffaa3f5c0e98f247f46a60629d7d1351f6447702
| 1,568
|
py
|
Python
|
docs/examples/nrelcsm/example.py
|
ptrbortolotti/WISDEM
|
2b7e44716d022e2f62140073dd078c5deeb8bf0a
|
[
"Apache-2.0"
] | 1
|
2020-06-02T14:58:28.000Z
|
2020-06-02T14:58:28.000Z
|
docs/examples/nrelcsm/example.py
|
ptrbortolotti/WISDEM
|
2b7e44716d022e2f62140073dd078c5deeb8bf0a
|
[
"Apache-2.0"
] | 5
|
2016-05-27T07:50:55.000Z
|
2020-05-03T21:28:22.000Z
|
docs/examples/nrelcsm/example.py
|
ptrbortolotti/WISDEM
|
2b7e44716d022e2f62140073dd078c5deeb8bf0a
|
[
"Apache-2.0"
] | 2
|
2018-12-27T06:14:57.000Z
|
2021-04-19T18:39:36.000Z
|
# 1 ---------
from NREL_CSM.config import *
from NREL_CSM.csm import csm
# 1 ---------
# 2 ---------
#Default Cost and Scaling Model inputs for 5 MW turbine (onshore)
ppi.curr_yr = 2009
ppi.curr_mon = 12
hubHeight=90.0
ratedPower=5000.0
maxTipSpd=80.0
rotorDiam=126.0
dtDesign=1
nblades = 3
altitude=0.0
thrustCoeff=0.50
seaDepth=20.0
crane=True
advancedBlade = True
advancedBedplate = 0
advancedTower = False
year = 2009
month = 12
maxCp=0.488
maxTipSpdRatio = 7.525
cutInWS = 3.0
cutOutWS = 25.0
airDensity = 0.0
shearExp=0.1
ws50m=8.02
weibullK=2.15
soilingLosses = 0.0
arrayLosses = 0.10
availability = 0.941
fcr = 0.12
taxrate = 0.4
discountrate = 0.07
constructiontime = 1
projlifetime = 20
turbineNum = 100
# 2 ----------
# 3 ----------
csmtest = csm(dtDesign)
csmtest.compute(hubHeight, ratedPower, maxTipSpd, rotorDiam, dtDesign, nblades, altitude, thrustCoeff, seaDepth, crane, advancedBlade, advancedBedplate, advancedTower, year, month, maxCp, maxTipSpdRatio, cutInWS, cutOutWS, \
airDensity, shearExp, ws50m, weibullK, soilingLosses, arrayLosses, availability, fcr, taxrate, discountrate, constructiontime, projlifetime, turbineNum)
# 3 ----------
# 4 ----------
print "LCOE %9.8f" % (csmtest.fin.LCOE)
print "COE %9.8f"%(csmtest.fin.COE)
print "AEP %9.5f"%(csmtest.aep.aep / 1000.0)
print "BOS %9.5f"%(csmtest.bos.cost / 1000.0)
print "TCC %9.5f"%(csmtest.turb.cost / 1000.0)
print "OM %9.5f"%(csmtest.om.cost / 1000.0)
print "LRC %9.5f"%(csmtest.om.lrc / 1000.0)
print "LLC %9.5f"%(csmtest.om.llc / 1000.0)
# 4 ---------
| 24.123077
| 225
| 0.684949
|
dc8bd6768180bffa0efdbe8d2fce656a4a308149
| 23,150
|
py
|
Python
|
xldlib/objects/abstract/dataframe.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
xldlib/objects/abstract/dataframe.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
xldlib/objects/abstract/dataframe.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
'''
Objects/Abstract/dataframe
________________________
Custom data holders for dataframe-like mapping types.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules
import operator as op
import weakref
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
from collections import Mapping
import six
from xldlib.definitions import get_ident, ZIP
from xldlib.general import mapping, sequence
from xldlib import resources
# TRANSFORM RECIPE
# ----------------
class TransformableDict(dict):
'''
Contains transform keys without overriding any parent subclass
hooks.
'''
_suffixes = ['A', '1']
def __init__(self, transform):
'''
Sets the method by which to transform the keys using a
column dictionary.
:
transform -- tuple with "mode", "table", and "digit" for
the transform mode (comparative, quantitative, normal),
table, for the keys, and digit for the boolean.
'''
self.transform = transform
# TRANSFORMERS
# ------------
def key_transform(self, key, index=None):
'''Public method for internal __keytransform__'''
return self.__keytransform__(key, index)
def __keytransform__(self, key, index=None):
'''Transforms key in other to key in dict'''
if isinstance(key, six.string_types):
return self._transform_string(key)
elif isinstance(key, tuple):
return self._transform_tuple(key)
def _transform_string(self, key):
'''Transforms string-based key in other to key in dict'''
# transforms if no suffix for the key
if key in self:
return key
else:
for suffix in self._suffixes:
newkey = '{0} {1}'.format(key, suffix)
if newkey in self:
return newkey
else:
return key
def _transform_tuple(self, key):
'''Transforms tuple-based key in other to key in dict'''
if key in self:
return key
else:
last = key[-1]
for suffix in self._suffixes:
newkey = key[:-1] + ('{0} {1}'.format(key, suffix),)
if newkey in self:
return newkey
else:
return key
# REVERSE TRANSFORMERS
# --------------------
def reverse_transform(self, other, key):
'''Public method for internal __reverse_transform__'''
return self.__reverse_transform__(other, key)
def __reverse_transform__(self, other, key):
'''Transforms key in self to key in other'''
if isinstance(key, six.string_types):
return self._reverse_transform_string(other, key)
elif isinstance(key, tuple):
return self._reverse_transform_tuple(other, key)
def _reverse_transform_string(self, other, key):
'''Transforms string-based key in self to key in other'''
if key in other:
return key
else:
for suffix in self._suffixes:
ending = ' {}'.format(suffix)
if key.endswith(ending):
newkey = key[: -len(ending)]
if newkey in other:
return newkey
else:
return key
def _reverse_transform_tuple(self, other, key):
'''Transforms tuple-based key in self to key in other'''
if key in other:
return key
else:
last = key[-1]
for suffix in self._suffixes:
ending = ' {}'.format(suffix)
if last.endswith(ending):
newkey = key[:-1] + (last[: -len(ending)],)
if newkey in other:
return newkey
else:
return key
# ORDERED TRANSFORMABLE
# ---------------------
class OrderedTransform(TransformableDict):
'''
Dictionary that remembers insertion order
An inherited dict maps keys to values.
The inherited dict provides __getitem__, __len__, __contains__, and get.
The remaining methods are order-aware.
Big-O running times for all methods are the
same as for regular dictionaries.
'''
index = None
def __init__(self, *args, **kwds):
'''
Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
# call base class, pop kwds so can still use keyworded values
try:
self.transform = kwds.pop('transform')
except KeyError:
self.transform = {}
TransformableDict.__init__(self, self.transform)
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
if len(args) != 0 or len(kwds) != 0:
self.__update(*args, **kwds)
# MAGIC
def __getitem__(self, key, index=None, dict_getitem=dict.__getitem__):
return dict_getitem(self, self.__keytransform__(key, index))
def __setitem__(self, key, value, index=None,
dict_setitem=dict.__setitem__):
'''
od.__setitem__(i, y) <==> od[i]=y
Setting a new item creates a new link which goes at the end of
the linked list, and the inherited dictionary is updated with
the new key/value pair.
'''
key = self.__keytransform__(key, index)
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
# convert key to dict
if not isinstance(value, sequence.ExtendableList):
value = sequence.ExtendableList(value, blank='')
# set index to grab values
dict_setitem(self, key, value)
def __delitem__(self, key, index=None, dict_delitem=dict.__delitem__):
'''
od.__delitem__(y) <==> del od[y]
Deleting an existing item uses self.__map to find the link which
is then removed by updating the links in the predecessor and
successor nodes.
'''
key = self.__keytransform__(key, index)
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
# ITERATORS
def __iter__(self):
'''od.__iter__() <==> iter(od)'''
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'''od.__reversed__() <==> reversed(od)'''
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
# DEL
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.values():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''
od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order
if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
# HELPERS
def keys(self):
'''od.keys() -> list of keys in od'''
return list(self)
def values(self):
'''od.values() -> list of values in od'''
return [self[key] for key in self]
def items(self):
'''od.items() -> list of (key, value) pairs in od'''
return [(key, self[key]) for key in self]
def iterkeys(self):
'''od.iterkeys() -> an iterator over the keys in od'''
return iter(self)
def itervalues(self):
'''od.itervalues -> an iterator over the values in od'''
for k in self:
yield self[k]
def iteritems(self):
'''od.iteritems -> an iterator over the (key, value) items in od'''
for k in self:
yield (k, self[k])
update = mapping.update_setitem
# let subclasses override update without breaking __init__
__update = update
__marker = object()
def pop(self, key, default=__marker):
'''
od.pop(k[,d]) -> v, remove specified key and return the
corresponding value.
If key is not found, d is returned if given, otherwise
KeyError is raised.
'''
key = self.__keytransform__(key)
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'''
od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k
not in od.
'''
key = self.__keytransform__(key)
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'''od.__repr__() <==> repr(od)'''
call_key = id(self), get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
finally:
del _repr_running[call_key]
def __reduce__(self):
'''Return state information for pickling'''
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
# ensure all instance attributes removed and reset
for k in vars(OrderedTransform()):
inst_dict.pop(k, None)
# make sure instance attribute which should carry over is set
inst_dict['transform'] = self.transform
return (self.__class__, (items,), inst_dict)
def copy(self):
'''od.copy() -> a shallow copy of od'''
return self.__class__(self, transform=self.transform)
@classmethod
def fromkeys(cls, iterable, value=None):
'''
OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''
od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, TransformableDict):
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
'''
od.viewkeys() -> a set-like object providing a view on od's
keys.
'''
return KeysView(self)
def viewvalues(self):
'''
od.viewvalues() -> an object providing a view on od's
values.
'''
return ValuesView(self)
def viewitems(self):
'''
od.viewitems() -> a set-like object providing a view on od's
items.
'''
return ItemsView(self)
# LOCATION INDEXING
# -----------------
class _LocationIndexer(object):
'''Custom Indexer used to provide 2D indexing for the DataFrameDict'''
def __init__(self, obj):
super(_LocationIndexer, self).__init__()
self.obj = weakref.proxy(obj)
# MAGIC
def __setitem__(self, key, value):
'''Pandas like indexing from the dict object'''
if type(key) is tuple:
return self._setitem_tuple(key, value)
else:
return self._setitem_axis(key, value, axis=0)
def __getitem__(self, key):
'''Pandas like indexing from the dict object'''
if type(key) is tuple:
return self._getitem_tuple(key)
else:
return self._getitem_axis(key, axis=0)
# SETTERS
def _setitem_tuple(self, key, value):
'''
Expands the indexer and raises an exception if an improper
indexer is included. The key can either be length 1 or 2,
comprising an (index,) or (index, column) pair.
Sets row or col/row value(s).
:
key -- tupled key corresponding to a column/row or
multiindexed column
value -- value to set to cell
'''
if len(key) > 2:
raise IndexError("Too many indexers")
elif len(key) < 1:
raise IndexError("No identified indexers")
if len(key) == 1:
# regular index passed as tuple (return all values at row)
key = key[0]
self._setitem_axis(key, value)
else:
# passed (row, column) pair
row, column = key
self.obj.__getitem__(column, index=None).__setitem__(row, value)
# set remaining values to keep dimensions similar
for newkey in self.obj:
if newkey != key:
self.obj.__getitem__(newkey).setdefault(row)
def _setitem_axis(self, index, value, axis=0):
'''
Sets an item along an axis based on either a row or index
key. Adds each value in a list sequentially to the row if
indexing a row.
:
index -- row to set the item
value -- new value to set in row
'''
if axis == 0:
# along rows
# can't use pure duck typing here -- str is sequencable
if isinstance(value, (list, tuple)):
# check if len same
assert len(value) == len(self)
for key, val in ZIP(self.obj, value):
self.obj[key].__setitem__(index, val)
elif isinstance(value, Mapping):
for key in self.obj:
# reverse transform the key, in case suffix added
key = self.obj.reverse_transform(value, key)
# add series sequentially to df
try:
sublist = self.obj[key]
# need to set the item here, have default
val = value.get(key, sublist.blank)
sublist.__setitem__(index, val)
except KeyError:
pass
else:
for key in self.obj:
self.obj[key].__setitem__(index, value)
elif axis == 1:
# along columns
self.obj.__setitem__(index, value, index=None)
# GETTERS
def _getitem_tuple(self, key):
'''
Expands the indexer and raises an exception if an improper
indexer is included. The key can either be length 1 or 2,
comprising an (index,) or (index, column) pair.
Returns row or col/row value(s).
'''
if len(key) > 2:
raise IndexError("Too many indexers")
elif len(key) < 1:
raise IndexError("No identified indexers")
# regular index passed as tuple (return all values at row)
if len(key) == 1:
key = key[0]
return self._getitem_axis(key)
# passed (row, column) pair
else:
row, column = key
return self.obj.__getitem__(column, index=None).__getitem__(row)
def _getitem_axis(self, key, axis=0):
'''
Grabs an item along an axis based on a key, which is either
an row or column key. While indexing by rows, the key is an
integer axis for the row, and it returns a list.
Indexing by column is equivalent to dict.__getitem__(key).
'''
if axis == 0:
# along rows
return [v.__getitem__(key) for v in self.obj.values()]
elif axis == 1:
# along columns
return self.obj.__getitem__(key, index=None)
# DATAFRAMES
# ----------
class DataFrameDict(OrderedTransform):
'''
Creates an indexable, locable, sortable and concatenable
dictionary that acts like a dataframe but with faster indexing.
'''
def __init__(self, *args, **kwds):
'''Sets the location indexer(s).'''
# grab column parameter to initialize blank index
if 'columns' in kwds:
columns = kwds.pop('columns')
OrderedTransform.__init__(self, *args, **kwds)
try:
self.set_columns(columns)
except NameError:
pass
# set location indexers
self.loc = _LocationIndexer(self)
self.columns = self
# PUBLIC FUNCTIONS
def sort(self, **kwds):
'''Sorts the dataframe'''
columns = kwds.get('columns', [])
# 0 for sorting list indexes, 1 for columns (keys)
axis = kwds.get('axis', 0)
# grabs order keys
ascending = kwds.get('ascending', True)
# skip sort if no sort columns
if columns != [] and axis == 1:
# error message from Pandas
raise ValueError("When sorting by column, axis must be 0 (rows)")
# ensure sort order if designated for all columns same length
if isinstance(ascending, (list, tuple)):
if len(ascending) != len(columns):
raise IndexError("Column dimensions do not match "
"sort order dimensions.")
if axis == 0:
self.sort_index(columns, ascending)
elif axis == 1:
self.sort_columns(ascending)
def concat(self, other):
'''Concats other DataFrameDict to this one'''
# grab key0 to determine length
self_length = len(self[self.get_column()])
other_length = len(other[next(iter(other))])
# add keys currently in dict
for key in self:
try:
self[key] += other[key]
except KeyError:
self[key] += [float('nan')]*other_length
# add new keys
for key in other:
if key not in self:
self[key] = [float('nan')]*self_length + other[key]
def sort_index(self, columns, ascending):
'''
Sorts each list based on values within the index.
Uses the zip builtin for this effect.
:
columns -- list of columns for sort priority
ascending -- bool or list of bools for sort order
self = [('a', [2, 1, 3]), ('b', [3, 4, 2])]
sort_index(['a'], True)->[('a', [1, 2, 3]), ('b', [4, 3, 2])]
'''
# assert all columns in dict (including with mask)
missing = [i for i in columns if i not in self]
if missing:
raise KeyError("sort keys missing from columns", ''.join(missing))
# grab sort keys and then generate zip
sort_keys = list(columns) + [i for i in self if i not in set(columns)]
zipped = list(zip(*[self[i] for i in sort_keys]))
# need to set sort depending on bool/list
if isinstance(ascending, bool):
zipped.sort(reverse=not ascending)
else:
# negative slice to sort lower ranked columns first
for index in range(len(ascending))[::-1]:
asc = ascending[index]
zipped.sort(key=op.itemgetter(index), reverse=not asc)
# now need to set to values
values = list(zip(*zipped))
for index, key in enumerate(sort_keys):
value = values[index]
self[key] = value
def sort_columns(self, ascending):
'''
Sorts the column order, by resetting OrderedTransform.__root.
Just simply replaces OrderedTransform.__root to the proper
form.
:
ascending -- bool or list of bools for sort order
sort_columns(True)->[('a', [...]), ('b', [...])]
'''
# grab keys
keys = sorted(self, reverse=not ascending)
self._change_root(keys)
def rename(self, columns=None, **kwds):
'''
Renames a given key and sets it in the exact same position
within the root.
'''
self._change_root(list(self), mask=columns)
# SETTERS
def set_columns(self, columns, length=0):
'''Sets the columns to initialize the DataFrame'''
for column in columns:
default = sequence.ExtendableList(blank='')
default += [float('nan')] * length
self.setdefault(column, default)
def set_value(self, index=None, value=' '):
'''Sets a value in the dataframe, default a row spacer'''
column = self.get_column()
if index is None:
index = self.get_last_index()
self.loc[index, column] = value
def set_header(self):
'''Sets the dataframe header'''
string = 'xlDiscoverer v{0}'.format(resources.BUILD)
self.set_value(index=0, value=string)
# HELPERS
def _change_root(self, keys, **kwds):
'''
Changes the root order or names based on a key list, with
optional keywords to change the column names.
'''
tmp = {k: self.pop(k) for k in keys}
# blank ordered root and reset
root = self._OrderedTransform__root
root[:] = [root, root, None]
# reset root using same protocol
mask = kwds.get("mask", {})
for key in keys:
new_key = mask.get(key, key)
self[new_key] = tmp[key]
# GETTERS
def get_last_index(self):
'''Grabs the last index in the current instance'''
column = self.get_column()
return len(self[column])
def get_column(self, index=0):
'''Grabs the column from index in the dataframe dict'''
itr = iter(self)
while index:
next(itr)
index -= 1
return next(itr)
| 30.621693
| 79
| 0.553089
|
e9f0473d1aedaa081d08f5ff15ad579b8014577f
| 6,613
|
py
|
Python
|
python/tests/api/expressions/test_inclusive_metrics_evaluator.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 2,161
|
2020-05-28T01:20:01.000Z
|
2022-03-31T14:48:04.000Z
|
python/tests/api/expressions/test_inclusive_metrics_evaluator.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 3,096
|
2020-05-27T20:57:13.000Z
|
2022-03-31T22:55:42.000Z
|
python/tests/api/expressions/test_inclusive_metrics_evaluator.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 879
|
2020-05-28T01:20:01.000Z
|
2022-03-31T12:48:48.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from iceberg.api.expressions import (Expressions,
InclusiveMetricsEvaluator)
from iceberg.exceptions import ValidationException
from pytest import raises
def test_all_nulls(schema, file):
# Should skip: no non-null value in all null column
assert not InclusiveMetricsEvaluator(schema, Expressions.not_null("all_nulls")).eval(file)
# Should read: column with some nulls contains a non-null value
assert InclusiveMetricsEvaluator(schema, Expressions.not_null("some_nulls")).eval(file)
# Should read: non-null column contains a non-null value
assert InclusiveMetricsEvaluator(schema, Expressions.not_null("no_nulls")).eval(file)
def test_no_nulls(schema, file):
# Should read: at least one null value in all null column
assert InclusiveMetricsEvaluator(schema, Expressions.is_null("all_nulls")).eval(file)
# Should read: column with some nulls contains a null value
assert InclusiveMetricsEvaluator(schema, Expressions.is_null("some_nulls")).eval(file)
# Should skip: non-null column contains no null values
assert not InclusiveMetricsEvaluator(schema, Expressions.is_null("no_nulls")).eval(file)
def test_required_column(schema, file):
assert InclusiveMetricsEvaluator(schema, Expressions.not_null("required")).eval(file)
assert not InclusiveMetricsEvaluator(schema, Expressions.is_null("required")).eval(file)
def test_missing_column(schema, file):
with raises(RuntimeError):
InclusiveMetricsEvaluator(schema, Expressions.less_than("missing", 5)).eval(file)
def test_missing_stats(schema, missing_stats, missing_stats_exprs):
assert InclusiveMetricsEvaluator(schema, missing_stats_exprs).eval(missing_stats)
def test_zero_record_file(schema, empty, zero_rows_exprs):
assert not InclusiveMetricsEvaluator(schema, zero_rows_exprs).eval(empty)
def test_not(schema, file):
assert InclusiveMetricsEvaluator(schema, Expressions.not_(Expressions.less_than("id", 5))).eval(file)
assert not InclusiveMetricsEvaluator(schema,
Expressions.not_(Expressions.greater_than("id", 5))).eval(file)
def test_and(schema, file):
assert not InclusiveMetricsEvaluator(schema,
Expressions.and_(Expressions.less_than("id", 5),
Expressions.greater_than_or_equal("id", 0))).eval(file)
assert InclusiveMetricsEvaluator(schema, Expressions.and_(Expressions.greater_than("id", 5),
Expressions.less_than_or_equal("id", 30))).eval(file)
def test_or(schema, file):
assert not InclusiveMetricsEvaluator(schema,
Expressions.or_(Expressions.less_than("id", 5),
Expressions.greater_than_or_equal("id", 80))).eval(file)
assert InclusiveMetricsEvaluator(schema,
Expressions.or_(Expressions.less_than("id", 5),
Expressions.greater_than_or_equal("id", 60))).eval(file)
def test_integer_lt(schema, file):
assert not InclusiveMetricsEvaluator(schema, Expressions.less_than("id", 5)).eval(file)
assert not InclusiveMetricsEvaluator(schema, Expressions.less_than("id", 30)).eval(file)
assert InclusiveMetricsEvaluator(schema, Expressions.less_than("id", 31)).eval(file)
assert InclusiveMetricsEvaluator(schema, Expressions.less_than("id", 79)).eval(file)
def test_integer_gt(schema, file):
assert not InclusiveMetricsEvaluator(schema, Expressions.greater_than("id", 85)).eval(file)
assert not InclusiveMetricsEvaluator(schema, Expressions.greater_than("id", 79)).eval(file)
assert InclusiveMetricsEvaluator(schema, Expressions.greater_than("id", 78)).eval(file)
assert InclusiveMetricsEvaluator(schema, Expressions.greater_than("id", 75)).eval(file)
def test_integer_gt_eq(schema, file):
assert not InclusiveMetricsEvaluator(schema, Expressions.greater_than_or_equal("id", 85)).eval(file)
assert not InclusiveMetricsEvaluator(schema, Expressions.greater_than_or_equal("id", 80)).eval(file)
assert InclusiveMetricsEvaluator(schema, Expressions.greater_than_or_equal("id", 79)).eval(file)
assert InclusiveMetricsEvaluator(schema, Expressions.greater_than_or_equal("id", 75)).eval(file)
def test_integer_eq(schema, file):
assert not InclusiveMetricsEvaluator(schema, Expressions.equal("id", 5)).eval(file)
assert not InclusiveMetricsEvaluator(schema, Expressions.equal("id", 29)).eval(file)
assert not InclusiveMetricsEvaluator(schema, Expressions.equal("id", 80)).eval(file)
assert not InclusiveMetricsEvaluator(schema, Expressions.equal("id", 85)).eval(file)
assert InclusiveMetricsEvaluator(schema, Expressions.equal("id", 30)).eval(file)
assert InclusiveMetricsEvaluator(schema, Expressions.equal("id", 75)).eval(file)
assert InclusiveMetricsEvaluator(schema, Expressions.equal("id", 79)).eval(file)
def test_integer_not_eq(schema, file, not_eq):
assert InclusiveMetricsEvaluator(schema, not_eq).eval(file)
def test_not_eq_rewritten(schema, file, not_eq_rewrite):
assert InclusiveMetricsEvaluator(schema, Expressions.not_(not_eq_rewrite)).eval(file)
def test_case_insensitive_int_not_eq_rewritten(schema, file, not_eq_uc):
assert InclusiveMetricsEvaluator(schema, Expressions.not_(not_eq_uc),
case_sensitive=False).eval(file)
def test_case_sensitive_int_not_eq_rewritten(schema, file, not_eq_uc):
with raises(ValidationException):
assert InclusiveMetricsEvaluator(schema, Expressions.not_(not_eq_uc),
case_sensitive=True).eval(file)
| 50.480916
| 115
| 0.72418
|
91665f41c154bc1000aae6cd406068ca46b3849b
| 1,788
|
py
|
Python
|
ginga/mockw/ImageViewCanvasMock.py
|
kyraikeda/ginga
|
e0ce979de4a87e12ba7a90eec0517a0be05d14bc
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 76
|
2015-01-05T14:46:14.000Z
|
2022-03-23T04:10:54.000Z
|
ginga/mockw/ImageViewCanvasMock.py
|
kyraikeda/ginga
|
e0ce979de4a87e12ba7a90eec0517a0be05d14bc
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 858
|
2015-01-17T01:55:12.000Z
|
2022-03-08T20:20:31.000Z
|
ginga/mockw/ImageViewCanvasMock.py
|
kyraikeda/ginga
|
e0ce979de4a87e12ba7a90eec0517a0be05d14bc
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 60
|
2015-01-14T21:59:07.000Z
|
2022-02-13T03:38:49.000Z
|
#
# ImageViewCanvasMock.py -- A Ginga image widget with canvas drawing in mock
# widget set
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.mockw import ImageViewMock
from ginga.canvas.mixins import DrawingMixin, CanvasMixin, CompoundMixin
class ImageViewCanvasError(ImageViewMock.ImageViewMockError):
pass
class ImageViewCanvas(ImageViewMock.ImageViewZoom,
DrawingMixin, CanvasMixin, CompoundMixin):
def __init__(self, logger=None, settings=None,
rgbmap=None, bindmap=None, bindings=None):
ImageViewMock.ImageViewZoom.__init__(self, logger=logger,
settings=settings,
rgbmap=rgbmap,
bindmap=bindmap,
bindings=bindings)
CompoundMixin.__init__(self)
CanvasMixin.__init__(self)
DrawingMixin.__init__(self)
# we are both a viewer and a canvas
self.set_canvas(self, private_canvas=self)
# METHODS THAT WERE IN IPG
def add_canvas(self, tag=None):
# add a canvas to the view
DrawingCanvas = self.getDrawClass('drawingcanvas')
canvas = DrawingCanvas()
# enable drawing on the canvas
canvas.enable_draw(True)
canvas.ui_set_active(True)
canvas.set_surface(self)
# add the canvas to the view.
self.add(canvas, tag=tag)
return canvas
def show(self):
from IPython.display import Image
return Image(data=bytes(self.get_rgb_image_as_bytes(format='png')),
format='png', embed=True)
#END
| 33.735849
| 76
| 0.606823
|
62198d6ddfc87ec6cd6e287961ec1d735b3976c6
| 10,157
|
py
|
Python
|
Extras/SMI_LSL/DataStreaming.py
|
HIIT/PeyeDF
|
55702482415a4a74820e1eb10536787a238a1054
|
[
"MIT"
] | 3
|
2019-10-01T08:30:04.000Z
|
2021-12-08T18:18:01.000Z
|
Extras/SMI_LSL/DataStreaming.py
|
HIIT/PeyeDF
|
55702482415a4a74820e1eb10536787a238a1054
|
[
"MIT"
] | null | null | null |
Extras/SMI_LSL/DataStreaming.py
|
HIIT/PeyeDF
|
55702482415a4a74820e1eb10536787a238a1054
|
[
"MIT"
] | 1
|
2021-12-06T11:41:40.000Z
|
2021-12-06T11:41:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
#
# (c) Copyright 1997-2013, SensoMotoric Instruments GmbH, Alto University
#
# Permission is hereby granted, free of charge, to any person or
# organization obtaining a copy of the software and accompanying
# documentation covered by this license (the "Software") to use,
# reproduce, display, distribute, execute, and transmit the Software,
# and to prepare derivative works of the Software, and to permit
# third-parties to whom the Software is furnished to do so, all subject
# to the following:
#
# The copyright notices in the Software and this entire statement,
# including the above license grant, this restriction and the following
# disclaimer, must be included in all copies of the Software, in whole
# or in part, and all derivative works of the Software, unless such
# copies or derivative works are solely in the form of
# machine-executable object code generated by a source language
# processor.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
# NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE
# DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER
# LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# -----------------------------------------------------------------------
# REQUIRES PYTHON 2
# This script fetches data from the same machine in which iViewX is running.
# All packets received from iViewX are passed on to Lab Streaming Layer.
from iViewXAPI import * #iViewX library
from iViewXAPIReturnCodes import *
import time
import pylsl as lsl
def marcoTime():
return int(round(time.time() * 1000) - 1446909066675)
# ---------------------------------------------
# ---- connect to iViewX
# ---------------------------------------------
res = iViewXAPI.iV_Connect(c_char_p('127.0.0.1'), c_int(4444), c_char_p('127.0.0.1'), c_int(5555))
if res != 1:
HandleError(res)
exit(0)
res = iViewXAPI.iV_SetLogger(c_int(1), c_char_p("iViewXSDK_Python_lsl.txt"))
res = iViewXAPI.iV_GetSystemInfo(byref(systemData))
print "iV_GetSystemInfo: " + str(res)
samplingRate = round(systemData.samplerate)
print "Samplerate: " + str(samplingRate)
print "iViewX Version: " + str(systemData.iV_MajorVersion) + "." + str(systemData.iV_MinorVersion) + "." + str(systemData.iV_Buildnumber)
print "iViewX API Version: " + str(systemData.API_MajorVersion) + "." + str(systemData.API_MinorVersion) + "." + str(systemData.API_Buildnumber)
# ---------------------------------------------
# ---- constants / support
# ---------------------------------------------
# left eye mapped to -1, right to 1, unkown to 0
eyeDict = {'l': -1, 'L': -1, 'LEFT': -1, 'left': -1, 'Left': -1, 'r': 1, 'R': 1, 'RIGHT': 1, 'right': 1, 'Right': 1}
k_EyeUnknown = 0 # number of eye when unkown
# -- lsl constants --
k_nchans_raw = 13 # raw stream channels
k_nchans_event = 7 # event stream channels
k_chunkSize = 32 # size of chunks (using example given by lsl)
k_maxBuff = 30 # maximum buffer size in seconds
# ---------------------------------------------
# ---- lab streaming layer
# ---------------------------------------------
rawStream_info = lsl.StreamInfo('SMI_Raw', 'Gaze', k_nchans_raw, samplingRate, 'float32', 'smiraw500xa15')
eventStream_info = lsl.StreamInfo('SMI_Event', 'Event', k_nchans_event, samplingRate, 'float32', 'smievent500ds15')
# append meta-data
rawStream_info.desc().append_child_value("manufacturer", "SMI")
eventStream_info.desc().append_child_value("manufacturer", "SMI")
rawStream_info.desc().append_child_value("model", "RED")
eventStream_info.desc().append_child_value("model", "RED")
rawStream_info.desc().append_child_value("api", "iViewPythonLSL")
eventStream_info.desc().append_child_value("api", "iViewPythonLSL")
# -- RAW (GAZE) CHANNELS --
rawChannels = rawStream_info.desc().append_child("channels")
# Make sure order matches order in midas' node
for c in ["timestamp"]:
rawChannels.append_child("channel")\
.append_child_value("label", c)\
.append_child_value("unit", "microseconds")\
.append_child_value("type", "Gaze")
for c in ["leftGazeX", "leftGazeY"]:
rawChannels.append_child("channel")\
.append_child_value("label", c)\
.append_child_value("unit", "pixels")\
.append_child_value("type", "Gaze")
for c in ["leftDiam", "leftEyePositionX", "leftEyePositionY", "leftEyePositionZ", "rightGazeX", "rightGazeY", "rightDiam", "rightEyePositionX", "rightEyePositionY", "rightEyePositionZ"]:
rawChannels.append_child("channel")\
.append_child_value("label", c)\
.append_child_value("unit", "millimetres")\
.append_child_value("type", "Gaze")
# -- EVENT CHANNELS --
eventChannels = eventStream_info.desc().append_child("channels")
# Make sure order matches order in midas' node
for c in ["eye"]:
eventChannels.append_child("channel")\
.append_child_value("label", c)\
.append_child_value("unit", "index")\
.append_child_value("type", "Event")
for c in ["startTime", "endTime", "duration"]:
eventChannels.append_child("channel")\
.append_child_value("label", c)\
.append_child_value("unit", "microseconds")\
.append_child_value("type", "Event")
for c in ["positionX", "positionY"]:
eventChannels.append_child("channel")\
.append_child_value("label", c)\
.append_child_value("unit", "pixels")\
.append_child_value("type", "Event")
for c in ["marcotime"]:
eventChannels.append_child("channel")\
.append_child_value("label", c)\
.append_child_value("unit", "milliseconds")\
.append_child_value("type", "Event")
# ---------------------------------------------
# ---- lsl outlets
# ---------------------------------------------
rawOutlet = lsl.StreamOutlet(rawStream_info, k_chunkSize, k_maxBuff)
eventOutlet = lsl.StreamOutlet(eventStream_info, k_chunkSize, k_maxBuff)
# ---------------------------------------------
# ---- configure and start calibration
# ---------------------------------------------
minAccuracy = 1.0
accLX = 1000
accLY = 1000
accRX = 1000
accRY = 1000
inkey = "x"
while (accLX > minAccuracy or accLY > minAccuracy or accRX > minAccuracy or accRY > minAccuracy) and not 's' in inkey:
displayDevice = 1
if 'm' in inkey:
autoControl = 0
else:
autoControl = 1
calibrationData = CCalibration(9, 1, displayDevice, 0, autoControl, 250, 220, 2, 20, b"")
res = iViewXAPI.iV_SetupCalibration(byref(calibrationData))
print "iV_SetupCalibration " + str(res)
res = iViewXAPI.iV_Calibrate()
print "iV_Calibrate " + str(res)
res = iViewXAPI.iV_Validate()
print "iV_Validate " + str(res)
res = iViewXAPI.iV_GetAccuracy(byref(accuracyData), 0)
print "iV_GetAccuracy " + str(res)
print "deviationXLeft " + str(accuracyData.deviationLX) + " deviationYLeft " + str(accuracyData.deviationLY)
print "deviationXRight " + str(accuracyData.deviationRX) + " deviationYRight " + str(accuracyData.deviationRY)
accLX = accuracyData.deviationLX
accLY = accuracyData.deviationLY
accRX = accuracyData.deviationRX
accRY = accuracyData.deviationRY
if accLX > minAccuracy or accLY > minAccuracy or accRX > minAccuracy or accRY > minAccuracy:
print("One or more accuracies were above " + str(minAccuracy))
inkey = raw_input("Just press enter to repeat auto calibration, 'm' (+ Enter) to repeat calibration under manual control or 's' (+ Enter) to skip further calibration >")
# ---------------------------------------------
# ---- define the callback functions. Also see the enum and string arrays in PeyeConstants for input/output formats.
# ---------------------------------------------
def SampleCallback(sample):
data = [None] * k_nchans_raw
data[0] = sample.timestamp
data[1] = sample.leftEye.gazeX
data[2] = sample.leftEye.gazeY
data[3] = sample.leftEye.diam
data[4] = sample.leftEye.eyePositionX
data[5] = sample.leftEye.eyePositionY
data[6] = sample.leftEye.eyePositionZ
data[7] = sample.rightEye.gazeX
data[8] = sample.rightEye.gazeY
data[9] = sample.rightEye.diam
data[10] = sample.rightEye.eyePositionX
data[11] = sample.rightEye.eyePositionY
data[12] = sample.rightEye.eyePositionZ
rawOutlet.push_sample(data)
return 0
def EventCallback(event):
data = [None] * k_nchans_event
data[0] = eyeDict[event.eye]
data[1] = event.startTime
data[2] = event.endTime
data[3] = event.duration
data[4] = event.positionX
data[5] = event.positionY
data[6] = marcoTime()
eventOutlet.push_sample(data)
return 0
CMPFUNC = WINFUNCTYPE(c_int, CSample)
smp_func = CMPFUNC(SampleCallback)
sampleCB = False
CMPFUNC = WINFUNCTYPE(c_int, CEvent)
event_func = CMPFUNC(EventCallback)
eventCB = False
# ---------------------------------------------
# ---- start DataStreaming, loops until q is entered
# ---------------------------------------------
res = iViewXAPI.iV_SetSampleCallback(smp_func)
sampleCB = True
res = iViewXAPI.iV_SetEventCallback(event_func)
eventCB = True
command = ''
while not command == 'q':
print('')
print('STREAMING STARTED')
print('')
command = raw_input('q+enter to stop streaming eye data. ')
print('Terminating... ')
sampleCB = False
eventCB = False
# ---------------------------------------------
# ---- stop recording and disconnect from iViewX
# ---------------------------------------------
res = iViewXAPI.iV_Disconnect()
| 38.184211
| 187
| 0.624594
|
1469618a58df76cc811c3995a87fe79c3eabbbd2
| 57,138
|
py
|
Python
|
test/functional/test_framework/mininode.py
|
dartdart26/paicoin
|
5c233cf46dfd73fec8ce4934822bd687ec1c2ee4
|
[
"MIT"
] | 1
|
2018-08-06T14:38:53.000Z
|
2018-08-06T14:38:53.000Z
|
test/functional/test_framework/mininode.py
|
git-nkliyc/paicoin
|
f621533b53cab0e3387b22dd0d6be860f077c270
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/mininode.py
|
git-nkliyc/paicoin
|
f621533b53cab0e3387b22dd0d6be860f077c270
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""PAIcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
NodeConn: an object which manages p2p connectivity to a paicoin node
NodeConnCB: a base class that describes the interface for receiving
callbacks with network messages from a NodeConn
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
paicoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization
"""
import asyncore
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from threading import RLock, Thread
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until
BIP0031_VERSION = 60000
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 PAI in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
# NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
logger = logging.getLogger("TestFramework.mininode")
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to paicoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in paicoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in paicoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
class NodeConnCB(object):
"""Callback and helper functions for P2P connection to a paicoind node.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour.
"""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
Optionally waits for deliver_sleep_time before dispatching message.
"""
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
# Connection helper methods
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
return True
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
logger.info('Connecting to PAIcoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
except Exception as e:
logger.exception('got_data:', repr(e))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 31.002713
| 262
| 0.597711
|
8e3d28f91cef4f9fba9780228bf98342447cac02
| 20,100
|
py
|
Python
|
venv/lib/python3.6/site-packages/twilio/rest/api/v2010/account/conference/recording.py
|
fernandoleira/stocktext
|
f755f83ffdaee3b179e21de955854354aced9134
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/twilio/rest/api/v2010/account/conference/recording.py
|
fernandoleira/stocktext
|
f755f83ffdaee3b179e21de955854354aced9134
|
[
"MIT"
] | 11
|
2019-12-26T17:21:03.000Z
|
2022-03-21T22:17:07.000Z
|
venv/lib/python3.6/site-packages/twilio/rest/api/v2010/account/conference/recording.py
|
fernandoleira/stocktext
|
f755f83ffdaee3b179e21de955854354aced9134
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class RecordingList(ListResource):
""" """
def __init__(self, version, account_sid, conference_sid):
"""
Initialize the RecordingList
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource
:param conference_sid: The Conference SID that identifies the conference associated with the recording
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingList
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingList
"""
super(RecordingList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'conference_sid': conference_sid, }
self._uri = '/Accounts/{account_sid}/Conferences/{conference_sid}/Recordings.json'.format(**self._solution)
def stream(self, date_created_before=values.unset, date_created=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
"""
Streams RecordingInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param date date_created_before: The `YYYY-MM-DD` value of the resources to read
:param date date_created: The `YYYY-MM-DD` value of the resources to read
:param date date_created_after: The `YYYY-MM-DD` value of the resources to read
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.conference.recording.RecordingInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
date_created_before=date_created_before,
date_created=date_created,
date_created_after=date_created_after,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, date_created_before=values.unset, date_created=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
"""
Lists RecordingInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param date date_created_before: The `YYYY-MM-DD` value of the resources to read
:param date date_created: The `YYYY-MM-DD` value of the resources to read
:param date date_created_after: The `YYYY-MM-DD` value of the resources to read
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.conference.recording.RecordingInstance]
"""
return list(self.stream(
date_created_before=date_created_before,
date_created=date_created,
date_created_after=date_created_after,
limit=limit,
page_size=page_size,
))
def page(self, date_created_before=values.unset, date_created=values.unset,
date_created_after=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of RecordingInstance records from the API.
Request is executed immediately
:param date date_created_before: The `YYYY-MM-DD` value of the resources to read
:param date date_created: The `YYYY-MM-DD` value of the resources to read
:param date date_created_after: The `YYYY-MM-DD` value of the resources to read
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of RecordingInstance
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingPage
"""
params = values.of({
'DateCreated<': serialize.iso8601_date(date_created_before),
'DateCreated': serialize.iso8601_date(date_created),
'DateCreated>': serialize.iso8601_date(date_created_after),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return RecordingPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of RecordingInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of RecordingInstance
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return RecordingPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a RecordingContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingContext
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingContext
"""
return RecordingContext(
self._version,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['conference_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a RecordingContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingContext
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingContext
"""
return RecordingContext(
self._version,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['conference_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.RecordingList>'
class RecordingPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the RecordingPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The SID of the Account that created the resource
:param conference_sid: The Conference SID that identifies the conference associated with the recording
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingPage
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingPage
"""
super(RecordingPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of RecordingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingInstance
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingInstance
"""
return RecordingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['conference_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.RecordingPage>'
class RecordingContext(InstanceContext):
""" """
def __init__(self, version, account_sid, conference_sid, sid):
"""
Initialize the RecordingContext
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource to fetch
:param conference_sid: Fetch by unique Conference SID for the recording
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingContext
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingContext
"""
super(RecordingContext, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'conference_sid': conference_sid, 'sid': sid, }
self._uri = '/Accounts/{account_sid}/Conferences/{conference_sid}/Recordings/{sid}.json'.format(**self._solution)
def update(self, status, pause_behavior=values.unset):
"""
Update the RecordingInstance
:param RecordingInstance.Status status: The new status of the recording
:param unicode pause_behavior: Whether to record during a pause
:returns: Updated RecordingInstance
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingInstance
"""
data = values.of({'Status': status, 'PauseBehavior': pause_behavior, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return RecordingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['conference_sid'],
sid=self._solution['sid'],
)
def fetch(self):
"""
Fetch a RecordingInstance
:returns: Fetched RecordingInstance
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return RecordingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['conference_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the RecordingInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.RecordingContext {}>'.format(context)
class RecordingInstance(InstanceResource):
""" """
class Status(object):
IN_PROGRESS = "in-progress"
PAUSED = "paused"
STOPPED = "stopped"
PROCESSING = "processing"
COMPLETED = "completed"
ABSENT = "absent"
class Source(object):
DIALVERB = "DialVerb"
CONFERENCE = "Conference"
OUTBOUNDAPI = "OutboundAPI"
TRUNKING = "Trunking"
RECORDVERB = "RecordVerb"
STARTCALLRECORDINGAPI = "StartCallRecordingAPI"
STARTCONFERENCERECORDINGAPI = "StartConferenceRecordingAPI"
def __init__(self, version, payload, account_sid, conference_sid, sid=None):
"""
Initialize the RecordingInstance
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingInstance
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingInstance
"""
super(RecordingInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'api_version': payload['api_version'],
'call_sid': payload['call_sid'],
'conference_sid': payload['conference_sid'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'start_time': deserialize.rfc2822_datetime(payload['start_time']),
'duration': payload['duration'],
'sid': payload['sid'],
'price': deserialize.decimal(payload['price']),
'price_unit': payload['price_unit'],
'status': payload['status'],
'channels': deserialize.integer(payload['channels']),
'source': payload['source'],
'error_code': deserialize.integer(payload['error_code']),
'encryption_details': payload['encryption_details'],
'uri': payload['uri'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'conference_sid': conference_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: RecordingContext for this RecordingInstance
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingContext
"""
if self._context is None:
self._context = RecordingContext(
self._version,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['conference_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The API version used to create the recording
:rtype: unicode
"""
return self._properties['api_version']
@property
def call_sid(self):
"""
:returns: The SID of the Call the resource is associated with
:rtype: unicode
"""
return self._properties['call_sid']
@property
def conference_sid(self):
"""
:returns: The Conference SID that identifies the conference associated with the recording
:rtype: unicode
"""
return self._properties['conference_sid']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def start_time(self):
"""
:returns: The start time of the recording, given in RFC 2822 format
:rtype: datetime
"""
return self._properties['start_time']
@property
def duration(self):
"""
:returns: The length of the recording in seconds
:rtype: unicode
"""
return self._properties['duration']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def price(self):
"""
:returns: The one-time cost of creating the recording.
:rtype: unicode
"""
return self._properties['price']
@property
def price_unit(self):
"""
:returns: The currency used in the price property.
:rtype: unicode
"""
return self._properties['price_unit']
@property
def status(self):
"""
:returns: The status of the recording
:rtype: RecordingInstance.Status
"""
return self._properties['status']
@property
def channels(self):
"""
:returns: The number of channels in the final recording file as an integer
:rtype: unicode
"""
return self._properties['channels']
@property
def source(self):
"""
:returns: How the recording was created
:rtype: RecordingInstance.Source
"""
return self._properties['source']
@property
def error_code(self):
"""
:returns: More information about why the recording is missing, if status is `absent`.
:rtype: unicode
"""
return self._properties['error_code']
@property
def encryption_details(self):
"""
:returns: How to decrypt the recording.
:rtype: dict
"""
return self._properties['encryption_details']
@property
def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri']
def update(self, status, pause_behavior=values.unset):
"""
Update the RecordingInstance
:param RecordingInstance.Status status: The new status of the recording
:param unicode pause_behavior: Whether to record during a pause
:returns: Updated RecordingInstance
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingInstance
"""
return self._proxy.update(status, pause_behavior=pause_behavior, )
def fetch(self):
"""
Fetch a RecordingInstance
:returns: Fetched RecordingInstance
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the RecordingInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.RecordingInstance {}>'.format(context)
| 34.775087
| 121
| 0.626567
|
1ea7c34d9f4f3de0c3fc774af86916d218a6d39c
| 8,308
|
py
|
Python
|
sdks/python/apache_beam/io/filesystems_test.py
|
nancyxu123/beam
|
cd966ec35b6e534f11f1053f0846ae4e2dd25956
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-02-25T21:58:19.000Z
|
2022-02-25T21:58:19.000Z
|
sdks/python/apache_beam/io/filesystems_test.py
|
nancyxu123/beam
|
cd966ec35b6e534f11f1053f0846ae4e2dd25956
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-01-06T16:01:15.000Z
|
2022-01-06T16:01:15.000Z
|
sdks/python/apache_beam/io/filesystems_test.py
|
nancyxu123/beam
|
cd966ec35b6e534f11f1053f0846ae4e2dd25956
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 17
|
2021-12-15T19:31:54.000Z
|
2022-01-31T18:54:23.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for LocalFileSystem."""
# pytype: skip-file
import filecmp
import logging
import os
import shutil
import tempfile
import unittest
import mock
from apache_beam.io import localfilesystem
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystems import FileSystems
def _gen_fake_join(separator):
"""Returns a callable that joins paths with the given separator."""
def _join(first_path, *paths):
return separator.join((first_path.rstrip(separator), ) + paths)
return _join
class FileSystemsTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_get_scheme(self):
self.assertIsNone(FileSystems.get_scheme('/abc/cdf'))
self.assertIsNone(FileSystems.get_scheme('c:\\abc\\cdf'))
self.assertEqual(FileSystems.get_scheme('gs://abc/cdf'), 'gs')
def test_get_filesystem(self):
self.assertTrue(
isinstance(
FileSystems.get_filesystem('/tmp'),
localfilesystem.LocalFileSystem))
self.assertTrue(
isinstance(
FileSystems.get_filesystem('c:\\abc\\def'),
localfilesystem.LocalFileSystem))
with self.assertRaises(ValueError):
FileSystems.get_filesystem('error://abc/def')
@mock.patch('apache_beam.io.localfilesystem.os')
def test_unix_path_join(self, *unused_mocks):
# Test joining of Unix paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('/')
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/path', 'to', 'file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/path', 'to/file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/', 'tmp/path', 'to/file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/', 'path', 'to/file'))
@mock.patch('apache_beam.io.localfilesystem.os')
def test_windows_path_join(self, *unused_mocks):
# Test joining of Windows paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('\\')
self.assertEqual(
r'C:\tmp\path\to\file', FileSystems.join(r'C:\tmp\path', 'to', 'file'))
self.assertEqual(
r'C:\tmp\path\to\file', FileSystems.join(r'C:\tmp\path', r'to\file'))
self.assertEqual(
r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path\\', 'to', 'file'))
def test_mkdirs(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
self.assertTrue(os.path.isdir(path))
def test_mkdirs_failed(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
# Check IOError if existing directory is created
with self.assertRaises(IOError):
FileSystems.mkdirs(path)
with self.assertRaises(IOError):
FileSystems.mkdirs(os.path.join(self.tmpdir, 't1'))
def test_match_file(self):
path = os.path.join(self.tmpdir, 'f1')
open(path, 'a').close()
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path])
def test_match_file_empty(self):
path = os.path.join(self.tmpdir, 'f2') # Does not exist
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [])
def test_match_file_exception(self):
# Match files with None so that it throws an exception
with self.assertRaisesRegex(BeamIOError,
r'^Unable to get the Filesystem') as error:
FileSystems.match([None])
self.assertEqual(list(error.exception.exception_details), [None])
def test_match_directory_with_files(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
open(path1, 'a').close()
open(path2, 'a').close()
# Match both the files in the directory
path = os.path.join(self.tmpdir, '*')
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertCountEqual(files, [path1, path2])
def test_match_directory(self):
result = FileSystems.match([self.tmpdir])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [self.tmpdir])
def test_copy(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path1], [path2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_copy_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegex(BeamIOError,
r'^Copy operation failed') as error:
FileSystems.copy([path1], [path2])
self.assertEqual(
list(error.exception.exception_details.keys()), [(path1, path2)])
def test_copy_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
FileSystems.mkdirs(path_t2)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path_t1], [path_t2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_rename(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path1], [path2])
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_rename_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegex(BeamIOError,
r'^Rename operation failed') as error:
FileSystems.rename([path1], [path2])
self.assertEqual(
list(error.exception.exception_details.keys()), [(path1, path2)])
def test_rename_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path_t1], [path_t2])
self.assertTrue(FileSystems.exists(path_t2))
self.assertFalse(FileSystems.exists(path_t1))
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_exists(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
self.assertFalse(FileSystems.exists(path2))
def test_delete(self):
path1 = os.path.join(self.tmpdir, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
FileSystems.delete([path1])
self.assertFalse(FileSystems.exists(path1))
def test_delete_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
with self.assertRaisesRegex(BeamIOError,
r'^Delete operation failed') as error:
FileSystems.delete([path1])
self.assertEqual(list(error.exception.exception_details.keys()), [path1])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 33.635628
| 79
| 0.675855
|
9881a15c04ec9d9703bbfc3f6b8dd9a7d9f62f0c
| 11,708
|
py
|
Python
|
apiclient/model.py
|
mdornseif/google-api-python-client
|
562ec98ea410793fe983f3ba72155fdacb80b4f6
|
[
"Apache-2.0"
] | null | null | null |
apiclient/model.py
|
mdornseif/google-api-python-client
|
562ec98ea410793fe983f3ba72155fdacb80b4f6
|
[
"Apache-2.0"
] | null | null | null |
apiclient/model.py
|
mdornseif/google-api-python-client
|
562ec98ea410793fe983f3ba72155fdacb80b4f6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import logging
import urllib
from apiclient import __version__
from errors import HttpError
from oauth2client.anyjson import simplejson
dump_request_response = False
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class BaseModel(Model):
"""Base model class.
Subclasses should provide implementations for the "serialize" and
"deserialize" methods, as well as values for the following class attributes.
Attributes:
accept: The value to use for the HTTP Accept header.
content_type: The value to use for the HTTP Content-type header.
no_content_response: The value to return when deserializing a 204 "No
Content" response.
alt_param: The value to supply as the "alt" query parameter for requests.
"""
accept = None
content_type = None
no_content_response = None
alt_param = None
def _log_request(self, headers, path_params, query, body):
"""Logs debugging information about the request if requested."""
if dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = self.accept
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/%s (gzip)' % __version__
if body_value is not None:
headers['content-type'] = self.content_type
body_value = self.serialize(body_value)
self._log_request(headers, path_params, query, body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
if self.alt_param is not None:
params.update({'alt': self.alt_param})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def _log_response(self, resp, content):
"""Logs debugging information about the response if requested."""
if dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
content = content.decode('utf-8')
self._log_response(resp, content)
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return self.no_content_response
return self.deserialize(content)
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
def serialize(self, body_value):
"""Perform the actual Python object serialization.
Args:
body_value: object, the request body as a Python object.
Returns:
string, the body in serialized form.
"""
_abstract()
def deserialize(self, content):
"""Perform the actual deserialization from response string to Python
object.
Args:
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
"""
_abstract()
class JsonModel(BaseModel):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
accept = 'application/json'
content_type = 'application/json'
alt_param = 'json'
def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def serialize(self, body_value):
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
return simplejson.dumps(body_value)
def deserialize(self, content):
body = simplejson.loads(content)
if self._data_wrapper and isinstance(body, dict) and 'data' in body:
body = body['data']
return body
@property
def no_content_response(self):
return {}
class RawModel(JsonModel):
"""Model class for requests that don't return JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = None
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class MediaModel(JsonModel):
"""Model class for requests that return Media.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = 'media'
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class ProtocolBufferModel(BaseModel):
"""Model class for protocol buffers.
Serializes and de-serializes the binary protocol buffer sent in the HTTP
request and response bodies.
"""
accept = 'application/x-protobuf'
content_type = 'application/x-protobuf'
alt_param = 'proto'
def __init__(self, protocol_buffer):
"""Constructs a ProtocolBufferModel.
The serialzed protocol buffer returned in an HTTP response will be
de-serialized using the given protocol buffer class.
Args:
protocol_buffer: The protocol buffer class used to de-serialize a
response from the API.
"""
self._protocol_buffer = protocol_buffer
def serialize(self, body_value):
return body_value.SerializeToString()
def deserialize(self, content):
return self._protocol_buffer.FromString(content)
@property
def no_content_response(self):
return self._protocol_buffer()
def makepatch(original, modified):
"""Create a patch object.
Some methods support PATCH, an efficient way to send updates to a resource.
This method allows the easy construction of patch bodies by looking at the
differences between a resource before and after it was modified.
Args:
original: object, the original deserialized resource
modified: object, the modified deserialized resource
Returns:
An object that contains only the changes from original to modified, in a
form suitable to pass to a PATCH method.
Example usage:
item = service.activities().get(postid=postid, userid=userid).execute()
original = copy.deepcopy(item)
item['object']['content'] = 'This is updated.'
service.activities.patch(postid=postid, userid=userid,
body=makepatch(original, item)).execute()
"""
patch = {}
for key, original_value in original.iteritems():
modified_value = modified.get(key, None)
if modified_value is None:
# Use None to signal that the element is deleted
patch[key] = None
elif original_value != modified_value:
if type(original_value) == type({}):
# Recursively descend objects
patch[key] = makepatch(original_value, modified_value)
else:
# In the case of simple types or arrays we just replace
patch[key] = modified_value
else:
# Don't add anything to patch if there's no change
pass
for key in modified:
if key not in original:
patch[key] = modified[key]
return patch
| 30.489583
| 79
| 0.691835
|
4426554eaef7c5a7c16bba34a4b551d9b05e7d08
| 1,377
|
py
|
Python
|
aiida/backends/tests/orm/test_entities.py
|
borellim/aiida_core
|
eebef392c81e8b130834a92e1d7abf5e2e30b3ce
|
[
"BSD-2-Clause"
] | 1
|
2019-03-15T10:37:53.000Z
|
2019-03-15T10:37:53.000Z
|
aiida/backends/tests/orm/test_entities.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/tests/orm/test_entities.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Test for general backend entities"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.backends.testbase import AiidaTestCase
from aiida import orm
class TestBackendEntitiesAndCollections(AiidaTestCase):
"""Test backend entities and their collections"""
def test_collections_cache(self):
"""Make sure that we're not recreating collections each time .objects is called"""
# Check directly
user_collection = orm.User.objects
self.assertIs(user_collection, orm.User.objects)
# Now check passing an explicit backend
backend = user_collection.backend
self.assertIs(user_collection, user_collection(backend))
| 44.419355
| 90
| 0.578794
|
25635d42c141a29dcd741bb6e45f94955c0e9255
| 16,944
|
py
|
Python
|
src/sage/combinat/sf/homogeneous.py
|
yzpopulation/sage
|
d2dc2f80b5a8e039701e292653e25366e3e5ec1e
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/sf/homogeneous.py
|
yzpopulation/sage
|
d2dc2f80b5a8e039701e292653e25366e3e5ec1e
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/sf/homogeneous.py
|
yzpopulation/sage
|
d2dc2f80b5a8e039701e292653e25366e3e5ec1e
|
[
"BSL-1.0"
] | null | null | null |
r"""
Homogeneous symmetric functions
By this we mean the basis formed of the complete homogeneous
symmetric functions `h_\lambda`, not an arbitrary graded basis.
"""
#*****************************************************************************
# Copyright (C) 2007 Mike Hansen <mhansen@gmail.com>
# 2012 Mike Zabrocki <mike.zabrocki@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
####################################
# #
# Homogeneous Symmetric Functions #
# #
####################################
from . import multiplicative, classical
from sage.combinat.partition import Partition
from sage.rings.all import infinity
from sage.misc.all import prod
from sage.arith.all import factorial, binomial
class SymmetricFunctionAlgebra_homogeneous(multiplicative.SymmetricFunctionAlgebra_multiplicative):
def __init__(self, Sym):
"""
A class of methods specific to the homogeneous basis of
symmetric functions.
INPUT:
- ``self`` -- a homogeneous basis of symmetric functions
- ``Sym`` -- an instance of the ring of symmetric functions
TESTS::
sage: h = SymmetricFunctions(QQ).e()
sage: h == loads(dumps(h))
True
sage: TestSuite(h).run(skip=['_test_associativity', '_test_distributivity', '_test_prod'])
sage: TestSuite(h).run(elements = [h[1,1]+h[2], h[1]+2*h[1,1]])
"""
classical.SymmetricFunctionAlgebra_classical.__init__(self, Sym, "homogeneous", 'h')
def _dual_basis_default(self):
r"""
Return the dual basis to ``self``.
INPUT:
- ``self`` -- a homogeneous basis of symmetric functions
- ``scalar`` -- optional input which specifies a function ``zee``
on partitions. The function ``zee`` determines the scalar
product on the power sum basis with normalization
`\langle p_\mu, p_\mu \rangle = \mathrm{zee}(mu)`.
(default: uses standard ``zee`` function)
- ``scalar_name`` -- specifies the name of the scalar function
(optional)
- ``prefix`` -- optional input, specifies the prefix to be
used to display the basis.
OUTPUT:
The dual basis of the homogeneous basis with respect to the
standard scalar product (the monomial basis). If a function
``zee`` is specified, the dual basis is with respect to the
modified scalar product.
EXAMPLES::
sage: m = SymmetricFunctions(QQ).m()
sage: h = SymmetricFunctions(QQ).h()
sage: h.dual_basis() == m
True
sage: zee = lambda x : 2
sage: hh = h.dual_basis(zee); hh
Dual basis to Symmetric Functions over Rational Field in the homogeneous basis
sage: hh[2,1].scalar(h[2,1])
1
sage: hh[2,2].scalar(h[2,2])
4
TESTS::
sage: h._dual_basis_default() is h.dual_basis()
True
"""
return self.realization_of().m()
def coproduct_on_generators(self, i):
r"""
Return the coproduct on `h_i`.
INPUT:
- ``self`` -- a homogeneous basis of symmetric functions
- ``i`` -- a nonnegative integer
OUTPUT:
- the sum `\sum_{r=0}^i h_r \otimes h_{i-r}`
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: h = Sym.homogeneous()
sage: h.coproduct_on_generators(2)
h[] # h[2] + h[1] # h[1] + h[2] # h[]
sage: h.coproduct_on_generators(0)
h[] # h[]
"""
def P(i):
return Partition([i]) if i else Partition([])
T = self.tensor_square()
return T.sum_of_monomials( (P(j), P(i-j)) for j in range(i+1) )
class Element(classical.SymmetricFunctionAlgebra_classical.Element):
def omega(self):
r"""
Return the image of ``self`` under the omega automorphism.
The *omega automorphism* is defined to be the unique algebra
endomorphism `\omega` of the ring of symmetric functions that
satisfies `\omega(e_k) = h_k` for all positive integers `k`
(where `e_k` stands for the `k`-th elementary symmetric
function, and `h_k` stands for the `k`-th complete homogeneous
symmetric function). It furthermore is a Hopf algebra
endomorphism and an involution, and it is also known as the
*omega involution*. It sends the power-sum symmetric function
`p_k` to `(-1)^{k-1} p_k` for every positive integer `k`.
The images of some bases under the omega automorphism are given by
.. MATH::
\omega(e_{\lambda}) = h_{\lambda}, \qquad
\omega(h_{\lambda}) = e_{\lambda}, \qquad
\omega(p_{\lambda}) = (-1)^{|\lambda| - \ell(\lambda)}
p_{\lambda}, \qquad
\omega(s_{\lambda}) = s_{\lambda^{\prime}},
where `\lambda` is any partition, where `\ell(\lambda)` denotes
the length (:meth:`~sage.combinat.partition.Partition.length`)
of the partition `\lambda`, where `\lambda^{\prime}` denotes the
conjugate partition
(:meth:`~sage.combinat.partition.Partition.conjugate`) of
`\lambda`, and where the usual notations for bases are used
(`e` = elementary, `h` = complete homogeneous, `p` = powersum,
`s` = Schur).
:meth:`omega_involution()` is a synonym for the :meth:`omega()`
method.
OUTPUT:
- the image of ``self`` under the omega automorphism
EXAMPLES::
sage: h = SymmetricFunctions(QQ).h()
sage: a = h([2,1]); a
h[2, 1]
sage: a.omega()
h[1, 1, 1] - h[2, 1]
sage: e = SymmetricFunctions(QQ).e()
sage: e(h([2,1]).omega())
e[2, 1]
"""
e = self.parent().realization_of().e()
return self.parent()(e._from_element(self))
omega_involution = omega
def expand(self, n, alphabet='x'):
"""
Expand the symmetric function ``self`` as a symmetric polynomial
in ``n`` variables.
INPUT:
- ``n`` -- a nonnegative integer
- ``alphabet`` -- (default: ``'x'``) a variable for the expansion
OUTPUT:
A monomial expansion of ``self`` in the `n` variables
labelled by ``alphabet``.
EXAMPLES::
sage: h = SymmetricFunctions(QQ).h()
sage: h([3]).expand(2)
x0^3 + x0^2*x1 + x0*x1^2 + x1^3
sage: h([1,1,1]).expand(2)
x0^3 + 3*x0^2*x1 + 3*x0*x1^2 + x1^3
sage: h([2,1]).expand(3)
x0^3 + 2*x0^2*x1 + 2*x0*x1^2 + x1^3 + 2*x0^2*x2 + 3*x0*x1*x2 + 2*x1^2*x2 + 2*x0*x2^2 + 2*x1*x2^2 + x2^3
sage: h([3]).expand(2,alphabet='y')
y0^3 + y0^2*y1 + y0*y1^2 + y1^3
sage: h([3]).expand(2,alphabet='x,y')
x^3 + x^2*y + x*y^2 + y^3
sage: h([3]).expand(3,alphabet='x,y,z')
x^3 + x^2*y + x*y^2 + y^3 + x^2*z + x*y*z + y^2*z + x*z^2 + y*z^2 + z^3
sage: (h([]) + 2*h([1])).expand(3)
2*x0 + 2*x1 + 2*x2 + 1
sage: h([1]).expand(0)
0
sage: (3*h([])).expand(0)
3
"""
if n == 0: # Symmetrica crashes otherwise...
return self.counit()
condition = lambda part: False
return self._expand(condition, n, alphabet)
def principal_specialization(self, n=infinity, q=None):
r"""
Return the principal specialization of a symmetric function.
The *principal specialization* of order `n` at `q`
is the ring homomorphism `ps_{n,q}` from the ring of
symmetric functions to another commutative ring `R`
given by `x_i \mapsto q^{i-1}` for `i \in \{1,\dots,n\}`
and `x_i \mapsto 0` for `i > n`.
Here, `q` is a given element of `R`, and we assume that
the variables of our symmetric functions are
`x_1, x_2, x_3, \ldots`.
(To be more precise, `ps_{n,q}` is a `K`-algebra
homomorphism, where `K` is the base ring.)
See Section 7.8 of [EnumComb2]_.
The *stable principal specialization* at `q` is the ring
homomorphism `ps_q` from the ring of symmetric functions
to another commutative ring `R` given by
`x_i \mapsto q^{i-1}` for all `i`.
This is well-defined only if the resulting infinite sums
converge; thus, in particular, setting `q = 1` in the
stable principal specialization is an invalid operation.
INPUT:
- ``n`` (default: ``infinity``) -- a nonnegative integer or
``infinity``, specifying whether to compute the principal
specialization of order ``n`` or the stable principal
specialization.
- ``q`` (default: ``None``) -- the value to use for `q`; the
default is to create a ring of polynomials in ``q``
(or a field of rational functions in ``q``) over the
given coefficient ring.
We use the formulas from Proposition 7.8.3 of [EnumComb2]_
(using Gaussian binomial coefficients `\binom{u}{v}_q`):
.. MATH::
ps_{n,q}(h_\lambda) = \prod_i \binom{n+\lambda_i-1}{\lambda_i}_q,
ps_{n,1}(h_\lambda) = \prod_i \binom{n+\lambda_i-1}{\lambda_i},
ps_q(h_\lambda) = 1 / \prod_i \prod_{j=1}^{\lambda_i} (1-q^j).
EXAMPLES::
sage: h = SymmetricFunctions(QQ).h()
sage: x = h[2,1]
sage: x.principal_specialization(3)
q^6 + 2*q^5 + 4*q^4 + 4*q^3 + 4*q^2 + 2*q + 1
sage: x = 3*h[2] + 2*h[1] + 1
sage: x.principal_specialization(3, q=var("q"))
2*(q^3 - 1)/(q - 1) + 3*(q^4 - 1)*(q^3 - 1)/((q^2 - 1)*(q - 1)) + 1
TESTS::
sage: x = h.zero()
sage: s = x.principal_specialization(3); s
0
"""
from sage.combinat.q_analogues import q_binomial
def get_variable(ring, name):
try:
ring(name)
except TypeError:
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
return PolynomialRing(ring, name).gen()
else:
raise ValueError("the variable %s is in the base ring, pass it explicitly" % name)
if q is None:
q = get_variable(self.base_ring(), 'q')
if q == 1:
if n == infinity:
raise ValueError("the stable principal specialization at q=1 is not defined")
f = lambda partition: prod(binomial(n+part-1, part) for part in partition)
elif n == infinity:
f = lambda partition: prod(1/prod((1-q**i) for i in range(1, part+1)) for part in partition)
else:
f = lambda partition: prod(q_binomial(n+part-1, part, q=q) for part in partition)
return self.parent()._apply_module_morphism(self, f, q.parent())
def exponential_specialization(self, t=None, q=1):
r"""
Return the exponential specialization of a
symmetric function (when `q = 1`), or the
`q`-exponential specialization (when `q \neq 1`).
The *exponential specialization* `ex` at `t` is a
`K`-algebra homomorphism from the `K`-algebra of
symmetric functions to another `K`-algebra `R`.
It is defined whenever the base ring `K` is a
`\QQ`-algebra and `t` is an element of `R`.
The easiest way to define it is by specifying its
values on the powersum symmetric functions to be
`p_1 = t` and `p_n = 0` for `n > 1`.
Equivalently, on the homogeneous functions it is
given by `ex(h_n) = t^n / n!`; see Proposition 7.8.4 of
[EnumComb2]_.
By analogy, the `q`-exponential specialization is a
`K`-algebra homomorphism from the `K`-algebra of
symmetric functions to another `K`-algebra `R` that
depends on two elements `t` and `q` of `R` for which
the elements `1 - q^i` for all positive integers `i`
are invertible.
It can be defined by specifying its values on the
complete homogeneous symmetric functions to be
.. MATH::
ex_q(h_n) = t^n / [n]_q!,
where `[n]_q!` is the `q`-factorial. Equivalently, for
`q \neq 1` and a homogeneous symmetric function `f` of
degree `n`, we have
.. MATH::
ex_q(f) = (1-q)^n t^n ps_q(f),
where `ps_q(f)` is the stable principal specialization of `f`
(see :meth:`principal_specialization`).
(See (7.29) in [EnumComb2]_.)
The limit of `ex_q` as `q \to 1` is `ex`.
INPUT:
- ``t`` (default: ``None``) -- the value to use for `t`;
the default is to create a ring of polynomials in ``t``.
- ``q`` (default: `1`) -- the value to use for `q`. If
``q`` is ``None``, then a ring (or fraction field) of
polynomials in ``q`` is created.
EXAMPLES::
sage: h = SymmetricFunctions(QQ).h()
sage: x = h[5,3]
sage: x.exponential_specialization()
1/720*t^8
sage: factorial(5)*factorial(3)
720
sage: x = 5*h[1,1,1] + 3*h[2,1] + 1
sage: x.exponential_specialization()
13/2*t^3 + 1
We also support the `q`-exponential_specialization::
sage: factor(h[3].exponential_specialization(q=var("q"), t=var("t")))
t^3/((q^2 + q + 1)*(q + 1))
TESTS::
sage: x = h.zero()
sage: s = x.exponential_specialization(); s
0
"""
from sage.combinat.q_analogues import q_factorial
def get_variable(ring, name):
try:
ring(name)
except TypeError:
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
return PolynomialRing(ring, name).gen()
else:
raise ValueError("the variable %s is in the base ring, pass it explicitly" % name)
if q == 1:
if t is None:
t = get_variable(self.base_ring(), 't')
def f(partition):
n = 0
m = 1
for part in partition:
n += part
m *= factorial(part)
return t**n/m
return self.parent()._apply_module_morphism(self, f, t.parent())
if q is None and t is None:
q = get_variable(self.base_ring(), 'q')
t = get_variable(q.parent(), 't')
elif q is None:
q = get_variable(t.parent(), 'q')
elif t is None:
t = get_variable(q.parent(), 't')
def f(partition):
n = 0
m = 1
for part in partition:
n += part
m *= q_factorial(part, q=q)
return t**n/m
return self.parent()._apply_module_morphism(self, f, t.parent())
# Backward compatibility for unpickling
from sage.misc.persist import register_unpickle_override
register_unpickle_override('sage.combinat.sf.homogeneous', 'SymmetricFunctionAlgebraElement_homogeneous', SymmetricFunctionAlgebra_homogeneous.Element)
| 38.076404
| 152
| 0.517469
|
b151f11e641a963f227e99d999eec86b1cb2268c
| 4,016
|
py
|
Python
|
dataprep/eda/report.py
|
kla55/dataprep
|
75542cda64570f1fe3c4ac61e670cc0ce2febaa3
|
[
"MIT"
] | null | null | null |
dataprep/eda/report.py
|
kla55/dataprep
|
75542cda64570f1fe3c4ac61e670cc0ce2febaa3
|
[
"MIT"
] | null | null | null |
dataprep/eda/report.py
|
kla55/dataprep
|
75542cda64570f1fe3c4ac61e670cc0ce2febaa3
|
[
"MIT"
] | null | null | null |
"""
This module implements the Report class.
"""
import sys
import webbrowser
from pathlib import Path
from tempfile import NamedTemporaryFile
from bokeh.io import save
from bokeh.io.notebook import load_notebook
from bokeh.embed.notebook import notebook_content
from bokeh.models import LayoutDOM
from bokeh.resources import CDN
from jinja2 import Template
from .utils import is_notebook
INLINE_TEMPLATE = Template(
"""
{% from macros import embed %}
{% block inner_body %}
{% block contents %}
{% for doc in docs %}
{{ embed(doc) if doc.elementid }}
{% for root in doc.roots %}
{% block root scoped %}
{{ embed(root) | indent(10) }}
{% endblock %}
{% endfor %}
{% endfor %}
{% endblock %}
{{ plot_script | indent(8) }}
{% endblock %}
"""
)
class Report:
"""
This class creates a customized Report object for the plot* functions and create_report function
"""
to_render: LayoutDOM
def __init__(self, to_render: LayoutDOM) -> None:
self.to_render = to_render
def save(self, filename: str) -> None:
"""
save function
"""
save(
self.to_render,
filename=filename,
resources=CDN,
title="DataPrep.EDA Report",
)
def _repr_html_(self) -> str:
"""
Display itself inside a notebook
"""
# Speical case inside Google Colab
if "google.colab" in sys.modules:
load_notebook(hide_banner=True)
script, div, _ = notebook_content(self.to_render)
return f"{div}<script>{script}</script>"
# Windows forbids us open the file twice as the result bokeh cannot
# write to the opened temporary file.
with NamedTemporaryFile(suffix=".html", delete=False) as tmpf:
pass
save(
self.to_render,
filename=tmpf.name,
resources=CDN,
template=INLINE_TEMPLATE,
title="DataPrep.EDA Report",
)
with open(tmpf.name, "r") as f:
output_html = f.read()
# Delete the temporary file
Path(tmpf.name).unlink()
# Fix the bokeh: bokeh wrongly call the "waiting for bokeh to load" function
# inside "Bokeh.safely", which causes Bokeh not found because
# Bokeh is even not loaded!
patched_html = output_html.replace(
"Bokeh.safely",
"var __dataprep_bokeh_fix = (f) => document.Bokeh === undefined ? setTimeout(f, 1000) : f(); __dataprep_bokeh_fix", # pylint: disable=line-too-long
)
# embed into report template created by us here
return patched_html
def show(self) -> None:
"""
Render the report. This is useful when calling plot in a for loop.
"""
# if not call from notebook environment, ref to show_browser function.
if not is_notebook():
print(
"The report will not show in a notebook environment, "
"please try 'show_browser' if you want to open it in browser",
file=sys.stderr,
)
try:
from IPython.display import ( # pylint: disable=import-outside-toplevel
HTML,
display,
)
display(HTML(self._repr_html_()))
except ImportError:
pass
def show_browser(self) -> None:
"""
Open the report in the browser. This is useful when plotting
from terminmal or when the fig is very large in notebook.
"""
# set delete = False to avoid early delete when user open multiple plots.
with NamedTemporaryFile(suffix=".html", delete=False) as tmpf:
save(
self.to_render,
filename=tmpf.name,
resources=CDN,
title="DataPrep.EDA Report",
)
webbrowser.open_new_tab(f"file://{tmpf.name}")
| 29.748148
| 160
| 0.580428
|
507db557c66fa71194a9097947163d9aca1b10c2
| 256
|
py
|
Python
|
exe020.py
|
evertondutra/Curso_em-_Video_Python
|
c44b0ca79d3a5f3d0db0f9a8b32a1ea210d07bba
|
[
"MIT"
] | null | null | null |
exe020.py
|
evertondutra/Curso_em-_Video_Python
|
c44b0ca79d3a5f3d0db0f9a8b32a1ea210d07bba
|
[
"MIT"
] | null | null | null |
exe020.py
|
evertondutra/Curso_em-_Video_Python
|
c44b0ca79d3a5f3d0db0f9a8b32a1ea210d07bba
|
[
"MIT"
] | null | null | null |
import random
n1 = input('Digite o primeiro nome: ')
n2 = input('Digite o segundo aluno: ')
n3 = input('Digite o terceiro aluno: ')
n4 = input('Digite o quarto aluno: ')
lista = [n1, n2, n3, n4]
random.shuffle(lista)
print(f'A ordem sorteada é\n {lista}')
| 28.444444
| 39
| 0.679688
|
270119555534cf1232b5587bbaf0870c550e606d
| 4,678
|
py
|
Python
|
onlinecourse/models.py
|
twicechild/ibm-final-cloud-app-with-database
|
235237fa0873351377a78055ee7276f0af2f9ff8
|
[
"Apache-2.0"
] | 1
|
2022-02-15T11:25:32.000Z
|
2022-02-15T11:25:32.000Z
|
onlinecourse/models.py
|
twicechild/ibm-final-cloud-app-with-database
|
235237fa0873351377a78055ee7276f0af2f9ff8
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/models.py
|
twicechild/ibm-final-cloud-app-with-database
|
235237fa0873351377a78055ee7276f0af2f9ff8
|
[
"Apache-2.0"
] | null | null | null |
import sys
from django.utils.timezone import now
try:
from django.db import models
except Exception:
print("There was an error loading django modules. Do you have django installed?")
sys.exit()
from django.conf import settings
import uuid
# Instructor model
class Instructor(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
full_time = models.BooleanField(default=True)
total_learners = models.IntegerField()
def __str__(self):
return self.user.username
# Learner model
class Learner(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
STUDENT = 'student'
DEVELOPER = 'developer'
DATA_SCIENTIST = 'data_scientist'
DATABASE_ADMIN = 'dba'
OCCUPATION_CHOICES = [
(STUDENT, 'Student'),
(DEVELOPER, 'Developer'),
(DATA_SCIENTIST, 'Data Scientist'),
(DATABASE_ADMIN, 'Database Admin')
]
occupation = models.CharField(
null=False,
max_length=20,
choices=OCCUPATION_CHOICES,
default=STUDENT
)
social_link = models.URLField(max_length=200)
def __str__(self):
return self.user.username + "," + \
self.occupation
# Course model
class Course(models.Model):
name = models.CharField(null=False, max_length=30, default='online course')
image = models.ImageField(upload_to='course_images/')
description = models.CharField(max_length=1000)
pub_date = models.DateField(null=True)
instructors = models.ManyToManyField(Instructor)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Enrollment')
total_enrollment = models.IntegerField(default=0)
is_enrolled = False
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
# Lesson model
class Lesson(models.Model):
title = models.CharField(max_length=200, default="title")
order = models.IntegerField(default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
content = models.TextField()
# Enrollment model
# <HINT> Once a user enrolled a class, an enrollment entry should be created between the user and course
# And we could use the enrollment to track information such as exam submissions
class Enrollment(models.Model):
AUDIT = 'audit'
HONOR = 'honor'
BETA = 'BETA'
COURSE_MODES = [
(AUDIT, 'Audit'),
(HONOR, 'Honor'),
(BETA, 'BETA')
]
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
date_enrolled = models.DateField(default=now)
mode = models.CharField(max_length=5, choices=COURSE_MODES, default=AUDIT)
rating = models.FloatField(default=5.0)
# <HINT> Create a Question Model with:
# Used to persist question content for a course
# Has a One-To-Many (or Many-To-Many if you want to reuse questions) relationship with course
# Has a grade point for each question
# Has question content
# Other fields and methods you would like to design
class Question(models.Model):
course_id = models.ForeignKey(Course, on_delete=models.CASCADE)
question_text = models.CharField(max_length=1000)
question_grade = models.IntegerField()
# <HINT> A sample model method to calculate if learner get the score of the question
def is_get_score(self, selected_ids):
all_answers = self.choice_set.filter(is_correct=True).count()
selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
if all_answers == selected_correct:
return True
else:
return False
# <HINT> Create a Choice Model with:
# Used to persist choice content for a question
# One-To-Many (or Many-To-Many if you want to reuse choices) relationship with Question
# Choice content
# Indicate if this choice of the question is a correct one or not
# Other fields and methods you would like to design
class Choice(models.Model):
question_id = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.TextField()
is_correct = models.BooleanField()
# <HINT> The submission model
# One enrollment could have multiple submission
# One submission could have multiple choices
# One choice could belong to multiple submissions
class Submission(models.Model):
enrollment = models.ForeignKey(Enrollment, on_delete=models.CASCADE)
choices = models.ManyToManyField(Choice)
#Other fields and methods you would like to design
| 34.145985
| 104
| 0.703292
|
ddfd7f23264a201a727793c7063f8cb1a4a6d175
| 287
|
py
|
Python
|
ch4/outer.py
|
chunhua2017/pythonprogrammingdemo
|
64e4ac2b33c54cde4671291a6203e94cd96de4ba
|
[
"MIT"
] | 4
|
2020-05-18T05:25:44.000Z
|
2021-07-30T01:02:39.000Z
|
ch4/outer.py
|
chunhua2017/pythonprogrammingdemo
|
64e4ac2b33c54cde4671291a6203e94cd96de4ba
|
[
"MIT"
] | null | null | null |
ch4/outer.py
|
chunhua2017/pythonprogrammingdemo
|
64e4ac2b33c54cde4671291a6203e94cd96de4ba
|
[
"MIT"
] | 2
|
2021-09-15T05:41:05.000Z
|
2022-01-25T05:44:43.000Z
|
x = 10 #全局变量
def outer():
x = 20 #对outer()来说是局部变量
#对inner()来说是非局部变量
def inner():
x = 30 #对inner()来说是局部变量
print("From inner():x={0:d}".format(x))
inner()
print("From outer():x={0:d}".format(x))
outer()
print("From module:x={0:d}".format(x))
| 20.5
| 47
| 0.529617
|
720f4750294440fc808123c80162d2bafde03333
| 1,538
|
py
|
Python
|
example/config.py
|
slippers/Flask-Prose
|
809f563d16f1e3e6b5cab5bee84033934122aa6a
|
[
"MIT"
] | null | null | null |
example/config.py
|
slippers/Flask-Prose
|
809f563d16f1e3e6b5cab5bee84033934122aa6a
|
[
"MIT"
] | null | null | null |
example/config.py
|
slippers/Flask-Prose
|
809f563d16f1e3e6b5cab5bee84033934122aa6a
|
[
"MIT"
] | null | null | null |
import os
import binascii
from tempfile import gettempdir
def basedir():
return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
class BaseConfig(object):
SITE_NAME = 'Default'
DEBUG = False
TESTING = False
SECRET_KEY = binascii.hexlify(os.urandom(24))
# flask-sqlalchemy
DATABASE_URI = 'sqlite://' # in memory
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevelopmentConfig(BaseConfig):
DEBUG = True
TESTING = False
DATABASE_URI = 'sqlite://{0}/myapp.db'.format(gettempdir())
SQLALCHEMY_ECHO = True
prose = 'sqlite:///' + os.path.join(os.getcwd(), 'prose.db')
security = 'sqlite:///' + os.path.join(os.getcwd(), 'security.db')
SQLALCHEMY_BINDS = {
'prose': prose,
'security': security,
}
print(SQLALCHEMY_BINDS)
# celery settinngs
BROKER_TRANSPORT = 'redis'
BROKER_URL = 'redis://127.0.0.1:6379/0'
CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379/0'
#config flask-security
WTF_CSRF_ENABLED = False
class ProductionConfig(BaseConfig):
pass
class TestingConfig(BaseConfig):
DEBUG = False
TESTING = True
config = {
'dev': 'config.DevelopmentConfig',
'default': 'config.DevelopmentConfig',
}
def configure_app(app):
# set MYAPP_CONFIG in the apache httpd.conf file using SetEnv
selected_config = os.getenv('MYAPP_CONFIG', 'default')
app.config.from_object(config[selected_config])
app.config.from_pyfile('settings.cfg', silent=False)
| 22.617647
| 70
| 0.674902
|
e5643f526246e6dc61d0ac17c4138f7244be9626
| 3,994
|
py
|
Python
|
model-optimizer/mo/front/mxnet/loader_test.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/mo/front/mxnet/loader_test.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | 19
|
2021-03-26T08:11:00.000Z
|
2022-02-21T13:06:26.000Z
|
model-optimizer/mo/front/mxnet/loader_test.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | 1
|
2021-07-28T17:30:46.000Z
|
2021-07-28T17:30:46.000Z
|
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from unittest.mock import patch
from mo.front.mxnet.loader import load_symbol_nodes, parse_input_model
class MockSymbolLoadObj():
def tojson(self):
pass
class TestLoader(unittest.TestCase):
@patch('json.load')
@patch('json.loads')
@patch('os.path.isfile')
@patch('mxnet.symbol.load')
def test_load_symbol_nodes(self, mock_symbol_load, mock_isfile, mock_json_loads, mock_json_load):
mock_isfile.return_value = True
mock_json_load.return_value = {'nodes': ''}
mock_json_loads.return_value = {'nodes': {'node1': 1}}
mock_symbol_load_obj = MockSymbolLoadObj()
mock_symbol_load.return_value = mock_symbol_load_obj
with patch('mo.front.mxnet.loader.open') as mock_open:
self.assertEqual({'node1': 1}, load_symbol_nodes("model_name", legacy_mxnet_model=True))
@patch('json.load')
@patch('json.loads')
@patch('os.path.isfile')
@patch('mxnet.symbol.load')
def test_load_symbol_with_custom_nodes(self, mock_symbol_load, mock_isfile, mock_json_loads, mock_json_load):
mock_isfile.return_value = True
mock_json_load.return_value = {'nodes': [{'op': 'custom_op'}, {'op': 'custom_op'}]}
mock_json_loads.return_value = {'nodes': {'node1': 1}}
mock_symbol_load_obj = MockSymbolLoadObj()
mock_symbol_load.return_value = mock_symbol_load_obj
with patch('mo.front.mxnet.loader.open') as mock_open:
list_nodes = load_symbol_nodes("model_name", legacy_mxnet_model=False)
self.assertEqual(2, len(list_nodes))
for node in list_nodes:
self.assertEqual({'op': 'custom_op'}, node)
def test_parse_input_model(self):
input_model = '/model-optimizer-mxnet/data/nd/vgg19-0015.params'
model_name, iteration_number = parse_input_model(input_model)
self.assertEqual(model_name, '/model-optimizer-mxnet/data/nd/vgg19')
self.assertEqual(iteration_number, 15)
@patch('json.load')
@patch('json.loads')
@patch('os.path.isfile')
@patch('mxnet.symbol.load')
def test_load_symbol_nodes_with_json_and_lagacy_mode(self, mock_symbol_load, mock_isfile, mock_json_loads, mock_json_load):
mock_isfile.return_value = True
mock_json_load.return_value = {'nodes': ''}
mock_json_loads.return_value = {'nodes': {'node1': 1}}
mock_symbol_load_obj = MockSymbolLoadObj()
mock_symbol_load.return_value = mock_symbol_load_obj
with patch('mo.front.mxnet.loader.open') as mock_open:
self.assertEqual({'node1': 1}, load_symbol_nodes("model_name", input_symbol="some-symbol.json", legacy_mxnet_model=True))
@patch('json.load')
@patch('json.loads')
@patch('os.path.isfile')
@patch('mxnet.symbol.load')
def test_load_symbol_nodes_with_json(self, mock_symbol_load, mock_isfile, mock_json_loads, mock_json_load):
mock_isfile.return_value = True
#json.load
mock_json_load.return_value = {'nodes': {'node1': 1}}
mock_json_loads.return_value = {'nodes': ''}
mock_symbol_load_obj = MockSymbolLoadObj()
mock_symbol_load.return_value = mock_symbol_load_obj
with patch('mo.front.mxnet.loader.open') as mock_open:
self.assertEqual({'node1': 1}, load_symbol_nodes("model_name", input_symbol="some-symbol.json", legacy_mxnet_model=False))
| 43.413043
| 134
| 0.704056
|
fdea68eb9085eed64528ee2b94b401d74d1530a8
| 5,762
|
py
|
Python
|
python/foglamp/services/core/api/configuration.py
|
praveen-garg/FogLAMP
|
abdec78b59c562513478d64402cdfc9681dc9dc0
|
[
"Apache-2.0"
] | 1
|
2017-08-16T22:34:27.000Z
|
2017-08-16T22:34:27.000Z
|
python/foglamp/services/core/api/configuration.py
|
praveen-garg/FogLAMP
|
abdec78b59c562513478d64402cdfc9681dc9dc0
|
[
"Apache-2.0"
] | 35
|
2017-07-05T06:26:35.000Z
|
2017-12-28T12:27:00.000Z
|
python/foglamp/services/core/api/configuration.py
|
praveen-garg/FogLAMP
|
abdec78b59c562513478d64402cdfc9681dc9dc0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
from aiohttp import web
from foglamp.services.core import connect
from foglamp.common.configuration_manager import ConfigurationManager
__author__ = "Amarendra K. Sinha, Ashish Jabble"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_help = """
-------------------------------------------------------------------------------
| GET | /foglamp/categories |
| GET | /foglamp/category/{category_name} |
| GET PUT | /foglamp/category/{category_name}/{config_item} |
| DELETE | /foglamp/category/{category_name}/{config_item}/value |
-------------------------------------------------------------------------------
"""
#################################
# Configuration Manager
#################################
async def get_categories(request):
"""
Args:
request:
Returns:
the list of known categories in the configuration database
:Example:
curl -X GET http://localhost:8081/foglamp/categories
"""
# TODO: make it optimized and elegant
cf_mgr = ConfigurationManager(connect.get_storage())
categories = await cf_mgr.get_all_category_names()
categories_json = [{"key": c[0], "description": c[1]} for c in categories]
return web.json_response({'categories': categories_json})
async def get_category(request):
"""
Args:
request: category_name is required
Returns:
the configuration items in the given category.
:Example:
curl -X GET http://localhost:8081/category/PURGE_READ
"""
category_name = request.match_info.get('category_name', None)
if not category_name:
raise web.HTTPBadRequest(reason="Category Name is required")
# TODO: make it optimized and elegant
cf_mgr = ConfigurationManager(connect.get_storage())
category = await cf_mgr.get_category_all_items(category_name)
if category is None:
raise web.HTTPNotFound(reason="No such Category Found for {}".format(category_name))
return web.json_response(category)
async def get_category_item(request):
"""
Args:
request: category_name & config_item are required
Returns:
the configuration item in the given category.
:Example:
curl -X GET http://localhost:8081/foglamp/category/PURGE_READ/age
"""
category_name = request.match_info.get('category_name', None)
config_item = request.match_info.get('config_item', None)
if not category_name or not config_item:
raise web.HTTPBadRequest(reason="Both Category Name and Config items are required")
# TODO: make it optimized and elegant
cf_mgr = ConfigurationManager(connect.get_storage())
category_item = await cf_mgr.get_category_item(category_name, config_item)
if category_item is None:
raise web.HTTPNotFound(reason="No Category Item Found")
return web.json_response(category_item)
async def set_configuration_item(request):
"""
Args:
request: category_name, config_item, {"value" : <some value>} are required
Returns:
set the configuration item value in the given category.
:Example:
curl -X PUT -H "Content-Type: application/json" -d '{"value": <some value> }' http://localhost:8081/foglamp/category/{category_name}/{config_item}
For {category_name}=>PURGE update value for {config_item}=>age
curl -X PUT -H "Content-Type: application/json" -d '{"value": 24}' http://localhost:8081/foglamp/category/PURGE/age
"""
category_name = request.match_info.get('category_name', None)
config_item = request.match_info.get('config_item', None)
data = await request.json()
# TODO: make it optimized and elegant
cf_mgr = ConfigurationManager(connect.get_storage())
try:
value = data['value']
await cf_mgr.set_category_item_value_entry(category_name, config_item, value)
result = await cf_mgr.get_category_item(category_name, config_item)
if result is None:
raise web.HTTPNotFound(reason="No detail found for the category_name: {} and config_item: {}".format(category_name, config_item))
except KeyError:
raise web.HTTPBadRequest(reason='Missing required value for {}'.format(config_item))
return web.json_response(result)
async def delete_configuration_item_value(request):
"""
Args:
request: category_name, config_item are required
Returns:
set the configuration item value to empty string in the given category
:Example:
curl -X DELETE http://localhost:8081/foglamp/category/{category_name}/{config_item}/value
For {category_name}=>PURGE delete value for {config_item}=>age
curl -X DELETE http://localhost:8081/foglamp/category/PURGE/age/value
"""
category_name = request.match_info.get('category_name', None)
config_item = request.match_info.get('config_item', None)
if not category_name or not config_item:
raise web.HTTPBadRequest(reason="Both Category Name and Config items are required")
# TODO: make it optimized and elegant
cf_mgr = ConfigurationManager(connect.get_storage())
await cf_mgr.set_category_item_value_entry(category_name, config_item, '')
result = await cf_mgr.get_category_item(category_name, config_item)
if result is None:
raise web.HTTPNotFound(reason="No detail found for the category_name: {} and config_item: {}".format(category_name, config_item))
return web.json_response(result)
| 34.297619
| 154
| 0.657931
|
55b273c22418a96a41e25ffafac455847b208900
| 24,899
|
py
|
Python
|
vaspy/atomco.py
|
MrLonelyZC88/VASPy
|
9edd6cb02a08653a0de5ad501496827cd5a19147
|
[
"MIT"
] | 1
|
2018-11-29T13:04:36.000Z
|
2018-11-29T13:04:36.000Z
|
vaspy/atomco.py
|
MrLonelyZC88/VASPy
|
9edd6cb02a08653a0de5ad501496827cd5a19147
|
[
"MIT"
] | null | null | null |
vaspy/atomco.py
|
MrLonelyZC88/VASPy
|
9edd6cb02a08653a0de5ad501496827cd5a19147
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
===================================================================
Provide coordinate file class which do operations on these files.
===================================================================
Written by PytLab <shaozhengjiang@gmail.com>, November 2014
Updated by PytLab <shaozhengjiang@gmail.com>, May 2017
==============================================================
"""
import logging
import re
from collections import namedtuple
from math import acos, degrees
from itertools import combinations
import numpy as np
from vaspy import VasPy
from vaspy.errors import CarfileValueError
from vaspy.functions import *
class AtomCo(VasPy):
"Base class to be inherited by atomco classes."
def __init__(self, filename):
VasPy.__init__(self, filename)
def verify(self):
if len(self.data) != self.natom:
raise CarfileValueError('Atom numbers mismatch!')
@property
def atomco_dict(self):
"""
Return the current atom type and coordinates mapping,
make sure the data in dict can be updated in time.
"""
return self.get_atomco_dict(self.data)
@property
def tf_dict(self):
"""
Return the current atom type and T/F mapping, make sure the data
can be updated in time when returned.
"""
return self.get_tf_dict(self.tf)
def get_atomco_dict(self, data):
"""
根据已获取的data和atoms, atoms_num, 获取atomco_dict
"""
# [1, 1, 1, 16] -> [0, 1, 2, 3, 19]
idx_list = [sum(self.atom_numbers[:i])
for i in range(1, len(self.atom_types)+1)]
idx_list = [0] + idx_list
data_list = data.tolist()
atomco_dict = {}
for atom_type, idx, next_idx in zip(self.atom_types,
idx_list[:-1],
idx_list[1:]):
atomco_dict.setdefault(atom_type, data_list[idx: next_idx])
return atomco_dict
def get_tf_dict(self, tf):
"""
根据已获取的tf和atoms, atoms_num, 获取tf_dict
"""
# [1, 1, 1, 16] -> [0, 1, 2, 3, 19]
idx_list = [sum(self.atom_numbers[:i])
for i in range(1, len(self.atom_types)+1)]
idx_list = [0] + idx_list
tf_list = tf.tolist()
tf_dict = {}
for atom_type, idx, next_idx in zip(self.atom_types,
idx_list[:-1],
idx_list[1:]):
tf_dict.setdefault(atom_type, tf_list[idx: next_idx])
return tf_dict
def get_xyz_content(self, step=None, bases=None):
"""
Get xyz file content.
获取最新.xyz文件内容字符串
Parameters:
-----------
step: The step number, int, optional, 1 by default.
bases: If the bases is provided, default data is regarded as direct
coordinates and would be converted to Cartesian coordinates using
bases.
"""
natom = "{:12d}\n".format(self.natom)
try:
step = self.step if step is None else step
except AttributeError:
step = 1
step = "STEP ={:9d}\n".format(step)
data = atomdict2str(self.atomco_dict, self.atom_types)
data = ''
for atom in self.atom_types:
if bases is not None:
coords = self.dir2cart(bases, np.array(self.atomco_dict[atom]))
coords = coords.tolist()
else:
coords = self.atomco_dict[atom]
template = '{:<3s}{:>16.7f}{:>16.7f}{:>16.7}\n'
for i in range(len(coords)):
data += template.format(atom, *coords[i])
content = natom + step + data
return content
def get_poscar_content(self, **kwargs):
"""
Get POSCAR content.
根据对象数据获取poscar文件内容字符串
Parameters:
-----------
bases_const: The constant for basis vectors, optional, 1.0 by default.
bases: The basis vectors for the lattice, option, 3x3 np.array.
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] by default.
tf: The T/F info for all atoms. Nx3 np.array, n is the length of 0th axis of data.
"""
content = 'Created by VASPy\n'
# bases constant.
try:
bases_const = self.bases_const
except AttributeError:
bases_const = kwargs.get("bases_const", 1.0)
bases_const = " {:.9f}\n".format(bases_const)
# bases
try:
bases = self.bases
except AttributeError:
bases = kwargs.get("bases", np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]))
bases_list = bases.tolist()
bases = ''
for basis in bases_list:
bases += "{:14.8f}{:14.8f}{:14.8f}\n".format(*basis)
# atom info
types, numbers = self.atom_types, self.atom_numbers
atom_types = ("{:>5s}"*len(types) + "\n").format(*types)
atom_numbers = ("{:>5d}"*len(numbers) + "\n").format(*numbers)
# string
info = "Selective Dynamics\nDirect\n"
# data and tf
try:
tf = self.tf
except AttributeError:
# Initialize tf with 'T's.
default_tf = np.full(self.data.shape, 'T', dtype=np.str)
tf = kwargs.get("tf", default_tf)
data_tf = ''
for data, tf in zip(self.data.tolist(), tf.tolist()):
data_tf += ("{:18.12f}"*3 + "{:>5s}"*3 + "\n").format(*(data+tf))
# merge all strings
content += (bases_const + bases + atom_types + atom_numbers +
info + data_tf)
return content
def get_cif_content(self):
"""
Get the cif file content.
"""
content = 'data_VESTA_phase_1\n\n'
# Phase name
phase_name = ('xyz {}'*len(self.atom_types)).format(*self.atom_types)
content += "{:<40s}'{}'\n".format('_pd_phase_name', phase_name)
# Basis vectors lengths.
length_a, length_b, length_c = [np.linalg.norm(basis) for basis in self.bases]
content += '{:<40s}{:<.5f}\n'.format('_cell_length_a', length_a)
content += '{:<40s}{:<.5f}\n'.format('_cell_length_b', length_b)
content += '{:<40s}{:<.5f}\n'.format('_cell_length_c', length_c)
# Angles between basis vectors.
angle = lambda X, Y: degrees(acos(np.dot(X, Y)/(np.linalg.norm(X)*np.linalg.norm(Y))))
alpha, beta, gamma = [angle(X, Y) for X, Y in combinations(self.bases, 2)]
content += '{:<40s}{:<.2f}\n'.format('_cell_angle_alpha', alpha)
content += '{:<40s}{:<.2f}\n'.format('_cell_angle_beta', beta)
content += '{:<40s}{:<.2f}\n'.format('_cell_angle_gamma', gamma)
# Other info.
content += "{:<40s}'P 1'\n".format('_symmetry_space_group_name_H-M')
content += '{:<40s}1\n\n'.format('_symmetry_Int_Tables_number')
content += "loop_\n_symmetry_equiv_pos_as_xyz\n 'x, y, z'\n\n"
# Atom info.
content += ('loop_\n' +
' _atom_site_label\n' +
' _atom_site_occupancy\n' +
' _atom_site_fract_x\n' +
' _atom_site_fract_y\n' +
' _atom_site_fract_z\n' +
' _atom_site_adp_type\n' +
' _atom_site_B_iso_or_equiv\n' +
' _atom_site_type_symbol\n')
# Atom coordinates.
line_template = ' {:<9s}{:<7.1}{:<13.5f}{:<13.5f}{:<13.5f}{:<6s}{:<7.3f}{:s}\n'
atom_count = 0
for atom_type, coordinates in self.atomco_dict.items():
for x, y, z in coordinates:
atom_count += 1
name = '{}{}'.format(atom_type, atom_count)
content += line_template.format(name, 1.0, x, y, z, 'Biso', 1.0, atom_type)
return content
def get_volume(self):
"""
Get volume of slab(Angstrom^3)
获取晶格体积
"""
if hasattr(self, 'bases_const') and hasattr(self, 'bases'):
bases = self.bases_const*self.bases
volume = np.linalg.det(bases)
self.volume = volume
else:
raise AttributeError("Object has no bases and bases_const")
return volume
@staticmethod
def dir2cart(bases, data):
"""
Static method to convert direct coordinates to Cartisan coordinates.
Parameters:
-----------
bases: The 3x3 array for basis vectors, 3x3 numpy.array.
data: The direct coordinate data, Nx3 numpy.array.
"""
A = np.matrix(bases).T
x = np.matrix(data).T
b = A*x
b = np.array(b.T)
if b.shape[0] == 1:
b = b.reshape(3, )
return b
@staticmethod
def cart2dir(bases, data):
"""
Static method to convert Cartisian coordinates to direct coordinates.
Parameters:
-----------
bases: The 3x3 array for basis vectors, 3x3 numpy.array.
data: The Cartisan coordinate data, Nx3 numpy.array or a single 3D vector.
"""
b = np.matrix(data).T
A = np.matrix(bases).T
x = A.I*b
x = np.array(x.T)
if x.shape[0] == 1:
x = x.reshape(3, )
return x
class XyzFile(AtomCo):
"""
Create a .xyz file class.
Example:
>>> a = XyzFile(filename='ts.xyz')
Class attributes descriptions
=======================================================================
Attribute Description
============ =======================================================
filename string, name of the file the direct coordiante data
stored in
natom int, the number of total atom number
step int, STEP number in OUT.ANI file
atom_types list of string, atom types
atom_numbers list of int, atom number of atoms
atomco_dict dict, {atom name: coordinates}
data np.array, coordinates of atoms, dtype=float64
============ =======================================================
"""
def __init__(self, **kwargs):
filename = kwargs.pop("filename", None)
content = kwargs.pop("content", None)
content_list = kwargs.pop("content_list", None)
if content_list is not None:
content_list = content_list
elif filename is not None:
super(self.__class__, self).__init__(filename)
with open(self.filename, 'r') as f:
content_list = f.readlines()
elif content is not None:
content = content.strip()
content_list = content.split("\n")
self.load(content_list)
self.verify()
def load(self, content_list):
""" Load all data in xyz file.
"""
# Total number of all atoms.
natom = int(content_list[0].strip())
# The iteration step for this xyz file.
step = int(str2list(content_list[1])[-1])
# Get atom coordinate and number info
data_list = [str2list(line) for line in content_list[2:]]
data_array = np.array(data_list) # dtype=np.string
atoms_list = list(data_array[:, 0]) # 1st column
data = np.float64(data_array[:, 1:]) # rest columns
# Atom number for each atom
atom_types = []
for atom in atoms_list:
if atom not in atom_types:
atom_types.append(atom)
atom_numbers = [atoms_list.count(atom) for atom in atom_types]
# Set attributes.
self.natom = natom
self.step = step
self.atom_types = atom_types
self.atom_numbers = atom_numbers
self.data = data
def coordinate_transform(self, bases=None):
"Use Ax=b to do coordinate transform cartesian to direct"
if bases is None:
bases = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
return self.cart2dir(bases, self.data)
def get_content(self):
"获取最新文件内容字符串"
content = self.get_xyz_content()
return content
def tofile(self, filename='atomco.xyz'):
"XyzFile object to .xyz file."
content = self.get_content()
with open(filename, 'w') as f:
f.write(content)
return
class PosCar(AtomCo):
def __init__(self, filename='POSCAR'):
"""
Class to generate POSCAR or CONTCAR-like objects.
Example:
>>> a = PosCar(filename='POSCAR')
Class attributes descriptions
=======================================================================
Attribute Description
============ =======================================================
filename string, name of the file the direct coordiante data
stored in
bases_const float, lattice bases constant
bases np.array, bases of POSCAR
natom int, the number of total atom number
atom_types list of strings, atom types
atom_numbers list of int, same shape with atoms
atom number of atoms in atoms
tf list of list, T&F info of atoms
data np.array, coordinates of atoms, dtype=float64
============ =======================================================
"""
AtomCo.__init__(self, filename)
# Load all data in file
self.load()
self.verify()
def load(self):
""" Load all information in POSCAR.
"""
with open(self.filename, 'r') as f:
content_list = f.readlines()
# get scale factor
bases_const = float(content_list[1])
# bases
bases = [str2list(basis) for basis in content_list[2:5]]
# Atom info
atom_types = str2list(content_list[5])
# Atom number (str).
atom_numbers = str2list(content_list[6])
if content_list[7][0] in 'Ss':
data_begin = 9
else:
data_begin = 8
# get total number before load data
atom_numbers = [int(i) for i in atom_numbers]
natom = sum(atom_numbers)
# data
data, tf = [], [] # data and T or F info
tf_dict = {} # {tf: atom number}
for line_str in content_list[data_begin: data_begin+natom]:
line_list = str2list(line_str)
data.append(line_list[:3])
if len(line_list) > 3:
tf_list = line_list[3:]
tf.append(tf_list)
# gather tf info to tf_dict
tf_str = ','.join(tf_list)
if tf_str not in tf_dict:
tf_dict[tf_str] = 1
else:
tf_dict[tf_str] += 1
else:
tf.append(['T', 'T', 'T'])
# gather tf info to tf_dict
if 'T,T,T' not in tf_dict:
tf_dict['T,T,T'] = 1
else:
tf_dict['T,T,T'] += 1
# Data type convertion
bases = np.float64(np.array(bases)) # to float
data = np.float64(np.array(data))
tf = np.array(tf)
# set class attrs
self.bases_const = bases_const
self.bases = bases
self.atom_types = atom_types
self.atom_numbers = atom_numbers
self.natom = natom
self.data = data
self.tf = tf
self.totline = data_begin + natom # total number of line
def constrain_atom(self, atom, to='F', axis='all'):
"修改某一类型原子的FT信息"
# [1, 1, 1, 16] -> [0, 1, 2, 3, 19]
idx_list = [sum(self.atom_numbers[:i])
for i in range(1, len(self.atom_types)+1)]
idx_list = [0] + idx_list
if to not in ['T', 'F']:
raise CarfileValueError('Variable to must be T or F.')
for atom_type, idx, next_idx in zip(self.atom_types,
idx_list[:-1],
idx_list[1:]):
if atom_type == atom:
if axis in ['x', 'X']:
self.tf[idx:next_idx, 0] = to
elif axis in ['y', 'Y']:
self.tf[idx:next_idx, 1] = to
elif axis in ['z', 'Z']:
self.tf[idx:next_idx, 2] = to
else:
self.tf[idx:next_idx, :] = to
break
return self.tf
def get_content(self):
"根据对象数据获取文件内容字符串"
content = self.get_poscar_content()
return content
def add_atom(self, atom_type, coordinate, fix=['T', 'T', 'T']):
"""
Add a new atom to coordinate file.
Parameters:
-----------
atom_type: element type of the atom, str.
coordinate: position of the added atom, list of float.
fix: flags for fixed atom in three directions, list of str.
Example:
--------
>>> poscar.add_atom('C', [0.5, 0.5, 0.3])
"""
atomco_dict = self.atomco_dict
tf_dict = self.tf_dict
self.natom += 1
self.totline += 1
if atom_type in self.atom_types:
atomco_dict[atom_type].append(coordinate)
tf_dict[atom_type].append(fix)
idx = self.atom_types.index(atom_type)
self.atom_numbers[idx] += 1
else:
self.atom_types.append(atom_type)
atomco_dict[atom_type] = [coordinate]
tf_dict[atom_type] = [fix]
self.atom_numbers.append(1)
# New data and fix info.
data, tf = [], []
for atom_type in self.atom_types:
data += atomco_dict[atom_type]
tf += tf_dict[atom_type]
self.data = np.float64(np.array(data))
self.tf = np.array(tf)
def tofile(self, filename='POSCAR_c'):
"生成文件"
"PosCar object to POSCAR or CONTCAR."
content = self.get_content()
with open(filename, 'w') as f:
f.write(content)
return
class ContCar(PosCar):
def __init__(self, filename='CONTCAR'):
'''
Class to generate POSCAR or CONTCAR-like objects.
Totally same as PosCar class.
Example:
>>> a = ContCar(filename='POSCAR')
'''
PosCar.__init__(self, filename=filename)
def tofile(self, filename='CONTCAR_c'):
PosCar.tofile(self, filename=filename)
class XdatCar(AtomCo):
def __init__(self, filename='XDATCAR'):
"""
Class to generate XDATCAR objects.
Example:
>>> a = XdatCar()
Class attributes descriptions
=======================================================================
Attribute Description
============ =======================================================
filename string, name of the file the direct coordiante data
stored in
bases_const float, lattice bases constant
bases np.array, bases of POSCAR
natom int, the number of total atom number
atom_types list of strings, atom types
tf list of list, T&F info of atoms
info_nline int, line numbers of lattice info
============ =======================================================
"""
AtomCo.__init__(self, filename)
self.info_nline = 7 # line numbers of lattice info
self.load()
def load(self):
with open(self.filename, 'r') as f:
# read lattice info
self.system = f.readline().strip()
self.bases_const = float(f.readline().strip())
# lattice basis
self.bases = []
for i in range(3):
basis = line2list(f.readline())
self.bases.append(basis)
# atom info
self.atom_types = str2list(f.readline())
atoms_num = str2list(f.readline())
self.atom_numbers = [int(i) for i in atoms_num]
self.natom = sum(self.atom_numbers)
def __iter__(self):
""" Make the XdatCar object iterable.
"""
# Define namedtuple for the item in iteration.
XdatCarItem = namedtuple("XdatCarItem", ["step", "coordinates"])
with open(self.filename, 'r') as f:
# pass info lines
for i in range(self.info_nline):
f.readline()
prompt = f.readline().strip()
while '=' in prompt:
step = int(prompt.split('=')[-1])
data = []
for i in range(self.natom):
data_line = f.readline()
data.append(line2list(data_line))
prompt = f.readline().strip()
yield XdatCarItem._make([step, np.array(data)])
class CifFile(AtomCo):
def __init__(self, filename):
"""
Class for *.cif files.
Example:
>>> a = CifFile(filename='ts.cif')
Class attributes descriptions
=======================================================================
Attribute Description
=============== ====================================================
filename string, name of the file the direct coordiante data
stored in
natom int, the number of total atom number
atom_types list of strings, atom types
atom_numbers list of int, atom number of atoms in atoms
atom_names list of string,
Value of attribute 'Name' in Atom3d tag.
data np.array, coordinates of atoms, dtype=float64
cell_length_a float, length of cell vector a
cell_length_a float, length of cell vector b
cell_length_c float, length of cell vector c
cell_angle_alpha float, angle of cell alpha
cell_angle_beta float, angle of cell beta
cell_angle_gamma float, angle of cell gamma
=============== ====================================================
"""
super(CifFile, self).__init__(filename)
self.__logger = logging.getLogger("vaspy.CifCar")
self.load()
def load(self):
"""
Load data and attributes from *.cif file.
"""
# Regular expression for attributes matching.
regex = re.compile(r'^_(\w+)(?:\s+)(.+)$')
with open(self.filename, 'r') as f:
lines = f.readlines()
# Split lines by 'loop_' indices.
loop_indices = [i for i, line in enumerate(lines)
if line.startswith('loop_')]
# [19, 23] -> [(0, 19), (20, 23), (24, line_length)]
start_indices = [0] + [i + 1 for i in loop_indices]
end_indices = loop_indices + [len(lines)]
lines_groups = [lines[start: end] for start, end in
zip(start_indices, end_indices)]
# Get attributes.
float_candidates = ['cell_length_a', 'cell_length_b', 'cell_length_c',
'cell_angle_alpha', 'cell_angle_beta', 'cell_angle_gamma']
for line in lines_groups[0]:
line = line.strip()
if line.startswith('_'):
m = regex.match(line)
if m:
attr, value = m.groups()
if attr in float_candidates:
value = float(value)
setattr(self, attr, value)
self.__logger.debug("{} = {}".format(attr, value))
# Get coordinates data.
titles = []
data = []
atom_names = []
atom_types = []
for line in lines_groups[-1]:
line = line.strip()
if line.startswith('_'):
titles.append(line[1:])
elif line:
atom_name, _, x, y, z, _, _, atom_type = line2list(line, dtype=str)
atom_names.append(atom_name)
atom_types.append(atom_type)
data.append([float(i) for i in (x, y, z)])
# Set attributes.
self.data = np.array(data)
self.atom_names = atom_names
self.atom_types = list(set(atom_types))
self.atom_numbers = [atom_types.count(atom) for atom in self.atom_types]
self.titles = titles
self.natom = len(atom_names)
| 34.06156
| 94
| 0.506205
|
2420d0f54f7c2111520b8c2f8471162e1fb1a3df
| 23,079
|
py
|
Python
|
services/web/server/src/simcore_service_webserver/resource_manager/garbage_collector.py
|
GitHK/osparc-simcore-forked
|
5b01a28d1b8028afcf9a735e1d46a73daa13686e
|
[
"MIT"
] | null | null | null |
services/web/server/src/simcore_service_webserver/resource_manager/garbage_collector.py
|
GitHK/osparc-simcore-forked
|
5b01a28d1b8028afcf9a735e1d46a73daa13686e
|
[
"MIT"
] | 17
|
2020-10-15T16:06:05.000Z
|
2022-03-21T18:48:21.000Z
|
services/web/server/src/simcore_service_webserver/resource_manager/garbage_collector.py
|
GitHK/osparc-simcore-forked
|
5b01a28d1b8028afcf9a735e1d46a73daa13686e
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
from contextlib import suppress
from itertools import chain
from typing import Dict, List, Tuple
from aiohttp import web
from aiopg.sa.result import RowProxy
from aioredlock import Aioredlock
from servicelib.observer import emit
from servicelib.utils import logged_gather
from simcore_service_webserver import users_exceptions
from simcore_service_webserver.db_models import GroupType
from simcore_service_webserver.director.director_api import (
get_running_interactive_services,
stop_service,
)
from simcore_service_webserver.director.director_exceptions import (
DirectorException,
ServiceNotFoundError,
)
from simcore_service_webserver.groups_api import get_group_from_gid
from simcore_service_webserver.projects.projects_api import (
delete_project_from_db,
get_project_for_user,
get_workbench_node_ids_from_project_uuid,
is_node_id_present_in_any_project_workbench,
)
from simcore_service_webserver.projects.projects_db import (
APP_PROJECT_DBAPI,
ProjectAccessRights,
)
from simcore_service_webserver.projects.projects_exceptions import ProjectNotFoundError
from simcore_service_webserver.users_api import (
delete_user,
get_guest_user_ids_and_names,
get_user,
get_user_id_from_gid,
is_user_guest,
)
from simcore_service_webserver.users_to_groups_api import get_users_for_gid
from .config import (
APP_CLIENT_REDIS_LOCK_KEY,
APP_GARBAGE_COLLECTOR_KEY,
GUEST_USER_RC_LOCK_FORMAT,
get_garbage_collector_interval,
)
from .registry import RedisResourceRegistry, get_registry
logger = logging.getLogger(__name__)
async def setup_garbage_collector_task(app: web.Application):
loop = asyncio.get_event_loop()
app[APP_GARBAGE_COLLECTOR_KEY] = loop.create_task(garbage_collector_task(app))
yield
task = app[APP_GARBAGE_COLLECTOR_KEY]
task.cancel()
await task
def setup_garbage_collector(app: web.Application):
app.cleanup_ctx.append(setup_garbage_collector_task)
async def garbage_collector_task(app: web.Application):
keep_alive = True
while keep_alive:
logger.info("Starting garbage collector...")
try:
interval = get_garbage_collector_interval(app)
while True:
await collect_garbage(app)
await asyncio.sleep(interval)
except asyncio.CancelledError:
keep_alive = False
logger.info("Garbage collection task was cancelled, it will not restart!")
except Exception: # pylint: disable=broad-except
logger.warning(
"There was an error during garbage collection, restarting...",
exc_info=True,
)
# will wait 5 seconds to recover before restarting to avoid restart loops
# - it might be that db/redis is down, etc
#
await asyncio.sleep(5)
async def collect_garbage(app: web.Application):
"""
Garbage collection has the task of removing trash (i.e. unused resources) from the system. The trash
can be divided in:
- Websockets & Redis (used to keep track of current active connections)
- GUEST users (used for temporary access to the system which are created on the fly)
- Deletion of users. If a user needs to be deleted it can be set as GUEST in the database
The resources are Redis entries where all information regarding all the
websocket identifiers for all opened tabs accross all browser for each user
are stored.
The alive/dead keys are normal Redis keys. To each key an ALIVE key is associated,
which has an assigned TTL (Time To Live). The browser will call the `client_heartbeat` websocket
endpoint to refresh the TTL, thus declaring that the user (websocket connection) is
still active. The `resource_deletion_timeout_seconds` is the TTL of the key.
The field `garbage_collection_interval_seconds` defines the interval at which this
function will be called.
"""
logger.info("Collecting garbage...")
registry: RedisResourceRegistry = get_registry(app)
# Removes disconnected user resources
# Triggers signal to close possible pending opened projects
# Removes disconnected GUEST users after they finished their sessions
await remove_disconnected_user_resources(registry, app)
# Users manually marked for removal:
# if a user was manually marked as GUEST it needs to be
# removed together with all the associated projects
await remove_users_manually_marked_as_guests(registry, app)
# For various reasons, some services remain pending after
# the projects are closed or the user was disconencted.
# This will close and remove all these services from
# the cluster, thus freeing important resources.
# Temporary disabling GC to until the dynamic service
# safe function is invoked by the GC. This will avoid
# data loss for current users.
await remove_orphaned_services(registry, app)
async def remove_disconnected_user_resources(
registry: RedisResourceRegistry, app: web.Application
) -> None:
lock_manager: Aioredlock = app[APP_CLIENT_REDIS_LOCK_KEY]
#
# In redis jargon, every entry is denoted as "key"
# - A key can contain one or more fields: name-value pairs
# - A key can have a limited livespan by setting the Time-to-live (TTL) which
# is automatically decreasing
#
# - Every user can open multiple sessions (e.g. in different tabs and/or browser) and
# each session is hierarchically represented in the redis registry with two keys:
# - "alive" that keeps a TLL
# - "resources" to keep a list of resources
# - A resource is defined as something that can be acquire/released and in some times
# also shared. For instance, websocket_id, project_id are resource ids. The first is established
# between the web-client and the backend.
#
# - If all sessions of a GUEST user close (i.e. "alive" key expires)
#
#
# alive_keys = currently "active" users
# dead_keys = users considered as "inactive" (i.e. resource has expired since TLL reached 0!)
# these keys hold references to more than one websocket connection ids
# the websocket ids are referred to as resources (but NOT the only resource)
alive_keys, dead_keys = await registry.get_all_resource_keys()
logger.debug("potential dead keys: %s", dead_keys)
# clean up all resources of expired keys
for dead_key in dead_keys:
# Skip locked keys for the moment
user_id = int(dead_key["user_id"])
if await lock_manager.is_locked(
GUEST_USER_RC_LOCK_FORMAT.format(user_id=user_id)
):
logger.debug(
"Skipping garbage-collecting user '%d' since it is still locked",
user_id,
)
continue
# (0) If key has no resources => remove from registry and continue
dead_key_resources = await registry.get_resources(dead_key)
if not dead_key_resources:
await registry.remove_key(dead_key)
continue
# (1,2) CAREFULLY releasing every resource acquired by the expired key
logger.debug(
"Key '%s' expired. Cleaning the following resources: '%s'",
dead_key,
dead_key_resources,
)
for resource_name, resource_value in dead_key_resources.items():
# Releasing a resource consists of two steps
# - (1) release actual resource (e.g. stop service, close project, deallocate memory, etc)
# - (2) remove resource field entry in expired key registry after (1) is completed.
# collects a list of keys for (2)
keys_to_update = [
dead_key,
]
# Every resource might be shared with other keys.
# In that case, the resource is released by THE LAST DYING KEY
# (we could call this the "last-standing-man" pattern! :-) )
#
other_keys_with_this_resource = [
k
for k in await registry.find_keys((resource_name, resource_value))
if k != dead_key
]
is_resource_still_in_use: bool = any(
k in alive_keys for k in other_keys_with_this_resource
)
if not is_resource_still_in_use:
# adds the remaining resource entries for (2)
keys_to_update.extend(other_keys_with_this_resource)
# (1) releasing acquired resources
logger.info(
"(1) Releasing resource %s:%s acquired by expired key %s",
resource_name,
resource_value,
dead_key,
)
if resource_name == "project_id":
# inform that the project can be closed on the backend side
await emit(
event="SIGNAL_PROJECT_CLOSE",
user_id=None,
project_uuid=resource_value,
app=app,
)
# if this user was a GUEST also remove it from the database
# with the only associated project owned
await remove_guest_user_with_all_its_resources(
app=app,
user_id=int(dead_key["user_id"]),
)
# (2) remove resource field in collected keys since (1) is completed
logger.info(
"(2) Removing resource %s field entry from registry keys: %s",
resource_name,
keys_to_update,
)
with suppress(asyncio.CancelledError):
on_released_tasks = [
registry.remove_resource(key, resource_name)
for key in keys_to_update
]
await logged_gather(*on_released_tasks, reraise=False)
# NOTE:
# - if releasing a resource (1) fails, annotations in registry allows GC to try in next round
# - if any task in (2) fails, GC will clean them up in next round as well
# - if all resource fields are removed from a key, next GC iteration will remove the key (see (0))
async def remove_users_manually_marked_as_guests(
registry: RedisResourceRegistry, app: web.Application
) -> None:
"""
Removes all the projects associated with GUEST users in the system.
If the user defined a TEMPLATE, this one also gets removed.
"""
lock_manager: Aioredlock = app[APP_CLIENT_REDIS_LOCK_KEY]
# collects all users with registed sessions
alive_keys, dead_keys = await registry.get_all_resource_keys()
user_ids_to_ignore = set()
for entry in chain(alive_keys, dead_keys):
user_ids_to_ignore.add(int(entry["user_id"]))
# Prevent creating this list if a guest user
guest_users: List[Tuple[int, str]] = await get_guest_user_ids_and_names(app)
logger.info("GUEST user candidates to clean %s", guest_users)
for guest_user_id, guest_user_name in guest_users:
if guest_user_id in user_ids_to_ignore:
logger.info(
"Ignoring user '%s' as it previously had alive or dead resource keys ",
guest_user_id,
)
continue
lock_during_construction: bool = await lock_manager.is_locked(
GUEST_USER_RC_LOCK_FORMAT.format(user_id=guest_user_name)
)
lock_during_initialization: bool = await lock_manager.is_locked(
GUEST_USER_RC_LOCK_FORMAT.format(user_id=guest_user_id)
)
if lock_during_construction or lock_during_initialization:
logger.debug(
"Skipping garbage-collecting user '%s','%s' since it is still locked",
guest_user_id,
guest_user_name,
)
continue
await remove_guest_user_with_all_its_resources(
app=app,
user_id=guest_user_id,
)
async def remove_orphaned_services(
registry: RedisResourceRegistry, app: web.Application
) -> None:
"""Removes services which are no longer tracked in the database
Multiple deployments can be active at the same time on the same cluster.
This will also check the current SWARM_STACK_NAME label of the service which
must be matching its own. The director service spawns dynamic services
which have this new label and it also filters by this label.
If the service is a dynamic service
"""
logger.info("Starting orphaned services removal...")
currently_opened_projects_node_ids = set()
alive_keys, _ = await registry.get_all_resource_keys()
for alive_key in alive_keys:
resources = await registry.get_resources(alive_key)
if "project_id" not in resources:
continue
project_uuid = resources["project_id"]
node_ids = await get_workbench_node_ids_from_project_uuid(app, project_uuid)
currently_opened_projects_node_ids.update(node_ids)
running_interactive_services = await get_running_interactive_services(app)
logger.info(
"Will collect the following: %s",
[x["service_host"] for x in running_interactive_services],
)
for interactive_service in running_interactive_services:
# if not present in DB or not part of currently opened projects, can be removed
node_id = interactive_service["service_uuid"]
if (
not await is_node_id_present_in_any_project_workbench(app, node_id)
or node_id not in currently_opened_projects_node_ids
):
logger.info("Will remove service %s", interactive_service["service_host"])
try:
await stop_service(app, node_id)
except (ServiceNotFoundError, DirectorException) as e:
logger.warning("Error while stopping service: %s", e)
logger.info("Finished orphaned services removal")
async def remove_guest_user_with_all_its_resources(
app: web.Application, user_id: int
) -> None:
"""Removes a GUEST user with all its associated projects and S3/MinIO files"""
logger.debug("Will try to remove resources for user '%s' if GUEST", user_id)
if not await is_user_guest(app, user_id):
logger.debug("User is not GUEST, skipping cleanup")
return
await remove_all_projects_for_user(app=app, user_id=user_id)
await remove_user(app=app, user_id=user_id)
async def remove_all_projects_for_user(app: web.Application, user_id: int) -> None:
"""
Goes through all the projects and will try to remove them but first it will check if
the project is shared with others.
Based on the given access rights it will deltermine the action to take:
- if other users have read access & execute access it will get deleted
- if other users have write access the project's owner will be changed to a new owner:
- if the project is directly shared with a one or more users, one of these
will be picked as the new owner
- if the project is not shared with any user but with groups of users, one
of the users inside the group (which currently exists) will be picked as
the new owner
"""
# recover user's primary_gid
try:
project_owner: Dict = await get_user(app=app, user_id=user_id)
except users_exceptions.UserNotFoundError:
logger.warning(
"Could not recover user data for user '%s', stopping removal of projects!",
user_id,
)
return
user_primary_gid: str = str(project_owner["primary_gid"])
# fetch all projects for the user
user_project_uuids = await app[
APP_PROJECT_DBAPI
].list_all_projects_by_uuid_for_user(user_id=user_id)
logger.info(
"Project uuids, to clean, for user '%s': '%s'",
user_id,
user_project_uuids,
)
for project_uuid in user_project_uuids:
logger.debug(
"Removing or transfering project '%s'",
project_uuid,
)
try:
project: Dict = await get_project_for_user(
app=app,
project_uuid=project_uuid,
user_id=user_id,
include_templates=True,
)
except web.HTTPNotFound:
logger.warning(
"Could not recover project data for project_uuid '%s', skipping...",
project_uuid,
)
continue
new_project_owner_gid = await get_new_project_owner_gid(
app=app,
project_uuid=project_uuid,
user_id=user_id,
user_primary_gid=user_primary_gid,
project=project,
)
if new_project_owner_gid is None:
# when no new owner is found just remove the project
logger.info(
"The project can be removed as is not shared with write access with other users"
)
try:
await delete_project_from_db(app, project_uuid, user_id)
except ProjectNotFoundError:
logging.warning(
"Project '%s' not found, skipping removal", project_uuid
)
continue
# Try to change the project owner and remove access rights from the current owner
await replace_current_owner(
app=app,
project_uuid=project_uuid,
user_primary_gid=user_primary_gid,
new_project_owner_gid=new_project_owner_gid,
project=project,
)
async def get_new_project_owner_gid(
app: web.Application,
project_uuid: str,
user_id: int,
user_primary_gid: int,
project: RowProxy,
) -> str:
"""Goes through the access rights and tries to find a new suitable owner.
The first viable user is selected as a new owner.
In order to become a new owner the user must have write access right.
"""
access_rights = project["accessRights"]
other_users_access_rights = set(access_rights.keys()) - {user_primary_gid}
logger.debug(
"Processing other user and groups access rights '%s'",
other_users_access_rights,
)
# Selecting a new project owner
# divide permissions between types of groups
standard_groups = {} # groups of users, multiple users can be part of this
primary_groups = {} # each individual user has a unique primary group
for other_gid in other_users_access_rights:
group = await get_group_from_gid(app=app, gid=int(other_gid))
# only process for users and groups with write access right
if group is None:
continue
if access_rights[other_gid]["write"] is not True:
continue
if group.type == GroupType.STANDARD:
standard_groups[other_gid] = access_rights[other_gid]
elif group.type == GroupType.PRIMARY:
primary_groups[other_gid] = access_rights[other_gid]
logger.debug(
"Possible new owner groups: standard='%s', primary='%s'",
standard_groups,
primary_groups,
)
new_project_owner_gid = None
# the primary group contains the users which which the project was directly shared
if len(primary_groups) > 0:
# fetch directly from the direct users with which the project is shared with
new_project_owner_gid = list(primary_groups.keys())[0]
# fallback to the groups search if the user does not exist
if len(standard_groups) > 0 and new_project_owner_gid is None:
new_project_owner_gid = await fetch_new_project_owner_from_groups(
app=app,
standard_groups=standard_groups,
user_id=user_id,
)
logger.info(
"Will move project '%s' to user with gid '%s', if user exists",
project_uuid,
new_project_owner_gid,
)
return new_project_owner_gid
async def fetch_new_project_owner_from_groups(
app: web.Application, standard_groups: Dict, user_id: int
) -> int:
"""Iterate over all the users in a group and if the users exists in the db
return its gid"""
# fetch all users in the group and then get their gid to put in here
# go through user_to_groups table and fetch all uid for matching gid
for group_gid in standard_groups.keys():
# remove the current owner from the bunch
target_group_users = await get_users_for_gid(app=app, gid=group_gid) - {user_id}
logger.error("Found group users '%s'", target_group_users)
for possible_user_id in target_group_users:
# check if the possible_user is still present in the db
try:
possible_user = await get_user(app=app, user_id=possible_user_id)
return possible_user["primary_gid"]
except users_exceptions.UserNotFoundError:
logger.warning(
"Could not find new owner '%s' will try a new one",
possible_user_id,
)
return None
async def replace_current_owner(
app: web.Application,
project_uuid: str,
user_primary_gid: int,
new_project_owner_gid: str,
project: RowProxy,
) -> None:
try:
new_project_owner_id = await get_user_id_from_gid(
app=app, primary_gid=int(new_project_owner_gid)
)
except Exception: # pylint: disable=broad-except
logger.exception(
"Could not recover new user id from gid %s", new_project_owner_gid
)
return
# the result might me none
if new_project_owner_id is None:
logger.warning(
"Could not recover a new user id from gid %s", new_project_owner_gid
)
return
# unseting the project owner and saving the project back
project["prj_owner"] = int(new_project_owner_id)
# removing access rights entry
del project["accessRights"][str(user_primary_gid)]
project["accessRights"][
str(new_project_owner_gid)
] = ProjectAccessRights.OWNER.value
logger.error("Syncing back project %s", project)
# syncing back project data
try:
await app[APP_PROJECT_DBAPI].update_project_without_enforcing_checks(
project_data=project,
project_uuid=project_uuid,
)
except Exception: # pylint: disable=broad-except
logger.exception(
"Could not remove old owner and replaced it with user %s",
new_project_owner_id,
)
async def remove_user(app: web.Application, user_id: int) -> None:
"""Tries to remove a user, if the users still exists a warning message will be displayed"""
try:
await delete_user(app, user_id)
except Exception: # pylint: disable=broad-except
logger.warning(
"User '%s' still has some projects, could not be deleted", user_id
)
| 38.147107
| 112
| 0.661554
|
e8af60906df92e7c9a00c23b9577061c95b081da
| 1,112
|
py
|
Python
|
python_scripts/logging/main.py
|
NostraDavid/GODS
|
101777d50d3a9b0a22865cca7ccb4a09f1bc65c5
|
[
"MIT"
] | 1
|
2022-02-05T06:36:19.000Z
|
2022-02-05T06:36:19.000Z
|
python_scripts/logging/main.py
|
NostraDavid/GODS
|
101777d50d3a9b0a22865cca7ccb4a09f1bc65c5
|
[
"MIT"
] | null | null | null |
python_scripts/logging/main.py
|
NostraDavid/GODS
|
101777d50d3a9b0a22865cca7ccb4a09f1bc65c5
|
[
"MIT"
] | null | null | null |
# pip install structlog colorama
from structlog import get_logger
log = get_logger()
log.info("key_value_logging", out_of_the_box=True, effort=0)
from structlog.processors import format_exc_info
try:
raise ValueError
except ValueError:
format_exc_info(None, None, {"exc_info": True})
import structlog
log = structlog.get_logger()
log.msg("greeted", whom="world", more_than_a_string=[1, 2, 3])
# Using the defaults, as above, is equivalent to:
import logging
import structlog
structlog.configure(
processors=[
structlog.processors.add_log_level,
structlog.processors.StackInfoRenderer(),
structlog.dev.set_exc_info,
structlog.processors.format_exc_info,
structlog.processors.TimeStamper(),
structlog.dev.ConsoleRenderer()
],
wrapper_class=structlog.make_filtering_bound_logger(logging.NOTSET),
context_class=dict,
logger_factory=structlog.PrintLoggerFactory(),
cache_logger_on_first_use=False
)
log = structlog.get_logger()
structlog.configure(processors=[structlog.processors.JSONRenderer()])
structlog.get_logger().msg("hi")
| 28.512821
| 72
| 0.757194
|
267891bc657dffd35f68951ca94c9d961818efdb
| 391
|
py
|
Python
|
TuShare/wsgi.py
|
lwh2015/TuShare
|
f244e05e5cf208e18e6237d3b81f71f0d3c1394a
|
[
"MIT"
] | 1
|
2018-09-26T08:34:02.000Z
|
2018-09-26T08:34:02.000Z
|
TuShare/wsgi.py
|
lwh2015/TuShare
|
f244e05e5cf208e18e6237d3b81f71f0d3c1394a
|
[
"MIT"
] | null | null | null |
TuShare/wsgi.py
|
lwh2015/TuShare
|
f244e05e5cf208e18e6237d3b81f71f0d3c1394a
|
[
"MIT"
] | null | null | null |
"""
WSGI config for TuShare project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TuShare.settings")
application = get_wsgi_application()
| 23
| 78
| 0.785166
|
a43bef9ef6becd116f92f625425181c851908247
| 295
|
py
|
Python
|
yahoo_auc_crawler/pipelines.py
|
lamphanqg/yahoo_auction_crawler
|
2800527dfb24f2283cb8fe7e7ec6faa577b4acb2
|
[
"MIT"
] | 1
|
2021-11-15T01:33:13.000Z
|
2021-11-15T01:33:13.000Z
|
yahoo_auc_crawler/pipelines.py
|
lamphanqg/yahoo_auction_crawler
|
2800527dfb24f2283cb8fe7e7ec6faa577b4acb2
|
[
"MIT"
] | null | null | null |
yahoo_auc_crawler/pipelines.py
|
lamphanqg/yahoo_auction_crawler
|
2800527dfb24f2283cb8fe7e7ec6faa577b4acb2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class YahooAucCrawlerPipeline(object):
def process_item(self, item, spider):
return item
| 24.583333
| 65
| 0.718644
|
25b7e5060f27278bc0ae9ff827fb12480fc04c88
| 890
|
py
|
Python
|
thefuck/rules/git_branch_exists.py
|
pybenchmark/thefuck
|
993a661c6048063e84645015cc832602b6ec32df
|
[
"MIT"
] | 1
|
2020-12-23T15:56:56.000Z
|
2020-12-23T15:56:56.000Z
|
thefuck/rules/git_branch_exists.py
|
pybenchmark/thefuck
|
993a661c6048063e84645015cc832602b6ec32df
|
[
"MIT"
] | 4
|
2020-12-23T15:44:08.000Z
|
2020-12-23T16:48:59.000Z
|
thefuck/rules/git_branch_exists.py
|
pybenchmark/thefuck
|
993a661c6048063e84645015cc832602b6ec32df
|
[
"MIT"
] | 1
|
2020-12-23T14:46:54.000Z
|
2020-12-23T14:46:54.000Z
|
import re
from thefuck.shells import shell
from thefuck.specific.git import git_support
from thefuck.utils import eager
@git_support
def match(command):
return ("fatal: A branch named '" in command.stderr
and " already exists." in command.stderr)
@git_support
@eager
def get_new_command(command):
branch_name = re.findall(
r"fatal: A branch named '([^']*)' already exists.", command.stderr)[0]
new_command_templates = [['git branch -d {0}', 'git branch {0}'],
['git branch -d {0}', 'git checkout -b {0}'],
['git branch -D {0}', 'git branch {0}'],
['git branch -D {0}', 'git checkout -b {0}'],
['git checkout {0}']]
for new_command_template in new_command_templates:
yield shell.and_(*new_command_template).format(branch_name)
| 35.6
| 78
| 0.58764
|
07aa7b41a6420a3433a66b65e5defd216b520197
| 2,087
|
py
|
Python
|
src/mnist_reader.py
|
arjun-krishna/Multi-Layered-Perceptron
|
48afae44ae6ea118af941e2dc3b33e5313f14282
|
[
"MIT"
] | 4
|
2017-11-18T17:30:41.000Z
|
2022-01-12T21:39:04.000Z
|
src/mnist_reader.py
|
arjun-krishna/Multi-Layered-Perceptron
|
48afae44ae6ea118af941e2dc3b33e5313f14282
|
[
"MIT"
] | null | null | null |
src/mnist_reader.py
|
arjun-krishna/Multi-Layered-Perceptron
|
48afae44ae6ea118af941e2dc3b33e5313f14282
|
[
"MIT"
] | null | null | null |
"""
@author : arjun-krishna
@desc : Read the byte encoded MNIST data in Lecun's page
"""
from __future__ import print_function
import struct
import numpy as np
from PIL import Image
"""
display flattended image with (r,c) dimension
"""
def display_img(img, r, c, file=None) :
img = img.reshape(r,c)
disp = Image.fromarray(img)
if file :
disp.save(file)
else :
disp.show()
"""
output : List of flattended images
"""
def extract_data(filename) :
print ('Extracting data from', filename.split('/')[-1])
print ('-------------------------------------------------')
data = []
with open(filename, 'r') as bytestream :
MAGIC_NUM = struct.unpack('>i', bytestream.read(4))[0]
NUM_IMAGES = struct.unpack('>i', bytestream.read(4))[0]
NUM_ROW = struct.unpack('>i', bytestream.read(4))[0]
NUM_COL = struct.unpack('>i', bytestream.read(4))[0]
print ('Number of Images : ', NUM_IMAGES)
for i in xrange(NUM_IMAGES) :
mssg = "Loading [{0:0.2f}%]".format(float((i+1)*100)/NUM_IMAGES)
clear = "\b"*(len(mssg))
print(mssg, end="")
buf = bytestream.read(NUM_ROW*NUM_COL)
img = np.frombuffer(buf, dtype=np.uint8)
# img = img.reshape(NUM_ROW, NUM_COL)
data.append(img)
print(clear, end="")
print ('\nExtraction Completed!')
print ('-------------------------------------------------')
return data
"""
output : List of labels
"""
def extract_labels(filename) :
print ('Extracting Labels from', filename.split('/')[-1])
print ('-------------------------------------------------')
data = []
with open(filename, 'r') as bytestream :
MAGIC_NUM = struct.unpack('>i', bytestream.read(4))[0]
NUM_ITEMS = struct.unpack('>i', bytestream.read(4))[0]
print ('Number of Items : ', NUM_ITEMS)
for i in xrange(NUM_ITEMS) :
mssg = "Loading [{0:0.2f}%]".format(float((i+1)*100)/NUM_ITEMS)
clear = "\b"*(len(mssg))
print(mssg, end="")
label = struct.unpack('>B', bytestream.read(1))[0]
data.append(label)
print(clear, end="")
print ('\nExtraction Completed!')
print ('-------------------------------------------------')
return data
| 27.826667
| 67
| 0.587925
|
4aeb5cb919a70c0ac2be053ebf69b329fe3c2ae2
| 109
|
py
|
Python
|
tests/test_advanced.py
|
dhaitz/python-package-template
|
b4c636e48ae192e5efe30fe71af37be6f8273d29
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_advanced.py
|
dhaitz/python-package-template
|
b4c636e48ae192e5efe30fe71af37be6f8273d29
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_advanced.py
|
dhaitz/python-package-template
|
b4c636e48ae192e5efe30fe71af37be6f8273d29
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from .context import sample
def test_thoughts():
assert(sample.hmm() is None)
| 13.625
| 32
| 0.642202
|
7cbfd1a6b67b408811681c1aac18d72fe14509b4
| 13,817
|
py
|
Python
|
virtualenv/lib/python3.6/site-packages/sphinx/apidoc.py
|
coderunn3r/HadoopSpike
|
3e57219d0489fae1d755bc4bd97eaf22f1898464
|
[
"MIT"
] | null | null | null |
virtualenv/lib/python3.6/site-packages/sphinx/apidoc.py
|
coderunn3r/HadoopSpike
|
3e57219d0489fae1d755bc4bd97eaf22f1898464
|
[
"MIT"
] | null | null | null |
virtualenv/lib/python3.6/site-packages/sphinx/apidoc.py
|
coderunn3r/HadoopSpike
|
3e57219d0489fae1d755bc4bd97eaf22f1898464
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
sphinx.apidoc
~~~~~~~~~~~~~
Parses a directory tree looking for Python modules and packages and creates
ReST files appropriately to create code documentation with Sphinx. It also
creates a modules index (named modules.<suffix>).
This is derived from the "sphinx-autopackage" script, which is:
Copyright 2008 Société des arts technologiques (SAT),
http://www.sat.qc.ca/
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import optparse
from os import path
# automodule options
if 'SPHINX_APIDOC_OPTIONS' in os.environ:
OPTIONS = os.environ['SPHINX_APIDOC_OPTIONS'].split(',')
else:
OPTIONS = [
'members',
'undoc-members',
# 'inherited-members', # disabled because there's a bug in sphinx
'show-inheritance',
]
INITPY = '__init__.py'
PY_SUFFIXES = set(['.py', '.pyx'])
def makename(package, module):
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
def write_file(name, text, opts):
"""Write the output file for module/package <name>."""
fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix))
if opts.dryrun:
print('Would create file %s.' % fname)
return
if not opts.force and path.isfile(fname):
print('File %s already exists, skipping.' % fname)
else:
print('Creating file %s.' % fname)
f = open(fname, 'w')
try:
f.write(text)
finally:
f.close()
def format_heading(level, text):
"""Create a heading of <level> [1, 2 or 3 supported]."""
underlining = ['=', '-', '~', ][level-1] * len(text)
return '%s\n%s\n\n' % (text, underlining)
def format_directive(module, package=None):
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
directive += ' :%s:\n' % option
return directive
def create_module_file(package, module, opts):
"""Build the text of the file and write the file."""
if not opts.noheadings:
text = format_heading(1, '%s module' % module)
else:
text = ''
#text += format_heading(2, ':mod:`%s` Module' % module)
text += format_directive(module, package)
write_file(makename(package, module), text, opts)
def create_package_file(root, master_package, subroot, py_files, opts, subs):
"""Build the text of the file and write the file."""
text = format_heading(1, '%s package' % makename(master_package, subroot))
# build a list of directories that are szvpackages (contain an INITPY file)
subs = [sub for sub in subs if path.isfile(path.join(root, sub, INITPY))]
# if there are some package directories, add a TOC for theses subpackages
if subs:
text += format_heading(2, 'Subpackages')
text += '.. toctree::\n\n'
for sub in subs:
text += ' %s.%s\n' % (makename(master_package, subroot), sub)
text += '\n'
submods = [path.splitext(sub)[0] for sub in py_files
if not shall_skip(path.join(root, sub), opts)
and sub != INITPY]
if submods:
text += format_heading(2, 'Submodules')
if opts.separatemodules:
text += '.. toctree::\n\n'
for submod in submods:
modfile = makename(master_package, makename(subroot, submod))
text += ' %s\n' % modfile
# generate separate file for this module
if not opts.noheadings:
filetext = format_heading(1, '%s module' % modfile)
else:
filetext = ''
filetext += format_directive(makename(subroot, submod),
master_package)
write_file(modfile, filetext, opts)
else:
for submod in submods:
modfile = makename(master_package, makename(subroot, submod))
if not opts.noheadings:
text += format_heading(2, '%s module' % modfile)
text += format_directive(makename(subroot, submod),
master_package)
text += '\n'
text += '\n'
text += format_heading(2, 'Module contents')
text += format_directive(subroot, master_package)
write_file(makename(master_package, subroot), text, opts)
def create_modules_toc_file(modules, opts, name='modules'):
"""Create the module's index."""
text = format_heading(1, '%s' % opts.header)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
write_file(name, text, opts)
def shall_skip(module, opts):
"""Check if we want to skip this module."""
# skip it if there is nothing (or just \n or \r\n) in the file
if path.getsize(module) <= 2:
return True
# skip if it has a "private" name and this is selected
filename = path.basename(module)
if filename != '__init__.py' and filename.startswith('_') and \
not opts.includeprivate:
return True
return False
def recurse_tree(rootpath, excludes, opts):
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
# use absolute path for root, as relative paths like '../../foo' cause
# 'if "/." in root ...' to filter out *all* modules otherwise
rootpath = path.normpath(path.abspath(rootpath))
# check if the base directory is a package and get its name
if INITPY in os.listdir(rootpath):
root_package = rootpath.split(path.sep)[-1]
else:
# otherwise, the base is a directory with packages
root_package = None
toplevels = []
followlinks = getattr(opts, 'followlinks', False)
for root, subs, files in os.walk(rootpath, followlinks=followlinks):
if is_excluded(root, excludes):
del subs[:]
continue
# document only Python module files
py_files = sorted(f for f in files
if path.splitext(f)[1] in PY_SUFFIXES)
is_pkg = INITPY in py_files
if is_pkg:
py_files.remove(INITPY)
py_files.insert(0, INITPY)
elif root != rootpath:
# only accept non-package at toplevel
del subs[:]
continue
# remove hidden ('.') and private ('_') directories
subs[:] = sorted(sub for sub in subs if sub[0] not in ['.', '_'])
if is_pkg:
# we are in a package with something to document
if subs or len(py_files) > 1 or not \
shall_skip(path.join(root, INITPY), opts):
subpackage = root[len(rootpath):].lstrip(path.sep).\
replace(path.sep, '.')
create_package_file(root, root_package, subpackage,
py_files, opts, subs)
toplevels.append(makename(root_package, subpackage))
else:
# if we are at the root level, we don't require it to be a package
assert root == rootpath and root_package is None
for py_file in py_files:
if not shall_skip(path.join(rootpath, py_file), opts):
module = path.splitext(py_file)[0]
create_module_file(root_package, module, opts)
toplevels.append(module)
return toplevels
def normalize_excludes(rootpath, excludes):
"""
Normalize the excluded directory list:
* must be either an absolute path or start with rootpath,
* otherwise it is joined with rootpath
* with trailing slash
"""
f_excludes = []
for exclude in excludes:
if not path.isabs(exclude) and not exclude.startswith(rootpath):
exclude = path.join(rootpath, exclude)
f_excludes.append(path.normpath(exclude) + path.sep)
return f_excludes
def is_excluded(root, excludes):
"""
Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
sep = path.sep
if not root.endswith(sep):
root += sep
for exclude in excludes:
if root.startswith(exclude):
return True
return False
def main(argv=sys.argv):
"""
Parse and check the command line arguments.
"""
parser = optparse.OptionParser(
usage="""\
usage: %prog [options] -o <output_path> <module_path> [exclude_paths, ...]
Look recursively in <module_path> for Python modules and packages and create
one reST file with automodule directives per package in the <output_path>.
Note: By default this script will not overwrite already created files.""")
parser.add_option('-o', '--output-dir', action='store', dest='destdir',
help='Directory to place all output', default='')
parser.add_option('-d', '--maxdepth', action='store', dest='maxdepth',
help='Maximum depth of submodules to show in the TOC '
'(default: 4)', type='int', default=4)
parser.add_option('-f', '--force', action='store_true', dest='force',
help='Overwrite existing files')
parser.add_option('-l', '--follow-links', action='store_true',
dest='followlinks', default=False,
help='Follow symbolic links. Powerful when combined '
'with collective.recipe.omelette.')
parser.add_option('-n', '--dry-run', action='store_true', dest='dryrun',
help='Run the script without creating files')
parser.add_option('-e', '--separate', action='store_true',
dest='separatemodules',
help='Put documentation for each module on its own page')
parser.add_option('-P', '--private', action='store_true',
dest='includeprivate',
help='Include "_private" modules')
parser.add_option('-T', '--no-toc', action='store_true', dest='notoc',
help='Don\'t create a table of contents file')
parser.add_option('-E', '--no-headings', action='store_true',
dest='noheadings',
help='Don\'t create headings for the module/package '
'packages (e.g. when the docstrings already contain '
'them)')
parser.add_option('-s', '--suffix', action='store', dest='suffix',
help='file suffix (default: rst)', default='rst')
parser.add_option('-F', '--full', action='store_true', dest='full',
help='Generate a full project with sphinx-quickstart')
parser.add_option('-H', '--doc-project', action='store', dest='header',
help='Project name (default: root module name)')
parser.add_option('-A', '--doc-author', action='store', dest='author',
type='str',
help='Project author(s), used when --full is given')
parser.add_option('-V', '--doc-version', action='store', dest='version',
help='Project version, used when --full is given')
parser.add_option('-R', '--doc-release', action='store', dest='release',
help='Project release, used when --full is given, '
'defaults to --doc-version')
(opts, args) = parser.parse_args(argv[1:])
if not args:
parser.error('A package path is required.')
rootpath, excludes = args[0], args[1:]
if not opts.destdir:
parser.error('An output directory is required.')
if opts.header is None:
opts.header = path.normpath(rootpath).split(path.sep)[-1]
if opts.suffix.startswith('.'):
opts.suffix = opts.suffix[1:]
if not path.isdir(rootpath):
print('%s is not a directory.' % rootpath, file=sys.stderr)
sys.exit(1)
if not path.isdir(opts.destdir):
if not opts.dryrun:
os.makedirs(opts.destdir)
excludes = normalize_excludes(rootpath, excludes)
modules = recurse_tree(rootpath, excludes, opts)
if opts.full:
from sphinx import quickstart as qs
modules.sort()
prev_module = ''
text = ''
for module in modules:
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
d = dict(
path = opts.destdir,
sep = False,
dot = '_',
project = opts.header,
author = opts.author or 'Author',
version = opts.version or '',
release = opts.release or opts.version or '',
suffix = '.' + opts.suffix,
master = 'index',
epub = True,
ext_autodoc = True,
ext_viewcode = True,
makefile = True,
batchfile = True,
mastertocmaxdepth = opts.maxdepth,
mastertoctree = text,
)
if not opts.dryrun:
qs.generate(d, silent=True, overwrite=opts.force)
elif not opts.notoc:
create_modules_toc_file(modules, opts)
| 37.958791
| 80
| 0.581602
|
5aedcd9ea35efc8e64aacddccafcceccd0463f47
| 8,403
|
py
|
Python
|
doc/source/conf.py
|
liguowang/epage
|
2ce60ddbcd23f06dc4d635681e8e52b66ba519f9
|
[
"MIT"
] | null | null | null |
doc/source/conf.py
|
liguowang/epage
|
2ce60ddbcd23f06dc4d635681e8e52b66ba519f9
|
[
"MIT"
] | null | null | null |
doc/source/conf.py
|
liguowang/epage
|
2ce60ddbcd23f06dc4d635681e8e52b66ba519f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# CpGtools documentation build configuration file, created by
# sphinx-quickstart on Thu Jue 25 12:09:37 2019.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'epage'
copyright = u'2020, Liguo Wang et al'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#XXXXXXXXXXXXXXXXXXXXX
#html_theme = 'nature'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#XXXXXXXXXXXXXXXXXXXXX
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = ["_themes", ]
#XXXXXXXXXXXXXXXXXXX
html_theme_options = {
'canonical_url': '',
'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
'style_nav_header_background': 'white',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False,
'sidebarwidth':150
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "epage documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = ''
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = 'July 9, 2020'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'epagedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'epage.tex', u'epage Documentation',
u'Liguo Wang', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'epage', u'epage Documentation',
[u'Liguo Wang'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'epage', u'epage Documentation',
u'Liguo Wang', 'epage', 'Evaluate Protein Activity with Gene Expression.',
'Bioinformatics'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
#XXXXXXXXXXXXXXXXXXX
def setup(app):
app.add_stylesheet('custom.css')
| 30.78022
| 80
| 0.712841
|
5616d6dd6b5e5bd1e3e8dfef90a2c1d76566de03
| 2,959
|
py
|
Python
|
abstract-codegen/src/atmfjstc/lib/abstract_codegen/ast/text.py
|
goc9000/python-library
|
0a4a09278df6e84061baedda8997071e2201103f
|
[
"MIT"
] | null | null | null |
abstract-codegen/src/atmfjstc/lib/abstract_codegen/ast/text.py
|
goc9000/python-library
|
0a4a09278df6e84061baedda8997071e2201103f
|
[
"MIT"
] | null | null | null |
abstract-codegen/src/atmfjstc/lib/abstract_codegen/ast/text.py
|
goc9000/python-library
|
0a4a09278df6e84061baedda8997071e2201103f
|
[
"MIT"
] | null | null | null |
from textwrap import dedent, wrap
from typing import Iterable
from atmfjstc.lib.text_utils import split_paragraphs
from atmfjstc.lib.abstract_codegen.CodegenContext import CodegenContext
from atmfjstc.lib.abstract_codegen.ast.base import AbstractCodegenASTNode
class ReflowableText(AbstractCodegenASTNode):
"""
A node that represents a block of text that can be reflowed so as to take up all the available width.
Notes:
- The text will automatically be dedent-ed (thus you can use triple-quote strings to specify it)
- Lines of text separated by a single newline will be merged into a single reflowable paragraph. Paragraphs are
separated by more than one newline.
- This is particularly useful for the text inside comment blocks
- Leading and trailing blank lines will be automatically removed
- Do not put bulleted lists, etc. or other formatting inside the text, as they will not respond to the reflow
correctly. Instead, represent the text using a `Sequence` of `ReflowableText` paragraphs and other nodes to
represent the non-text content.
"""
AST_NODE_CONFIG = (
('PARAM', 'text', dict(type=str)),
)
def render(self, context: CodegenContext) -> Iterable[str]:
parts = split_paragraphs(dedent(self.text).strip("\n"), keep_separators=True)
for i in range(0, len(parts), 2):
if i > 0:
for _ in range(parts[i - 1].count('\n') - 1):
yield ''
for line in wrap(parts[i].rstrip(), width=context.width):
yield line
class WrapText(AbstractCodegenASTNode):
"""
Adds decorations around text content (usually for turning it into a comment block).
How it works:
- The caller will specify a 'head', 'indent' and/or 'tail' (by default all are empty)
- If the content is non-empty, the rendering will look like::
<head>
<indent>content line 1
<indent>content line 2
...
<tail>
- If the content is empty, nothing will be generated (even if head and tail are non-empty)
Notes:
- This is particularly useful for comment blocks (e.g. for a classic doc comment, try head='/**', indent=' * ',
tail=' */')
"""
AST_NODE_CONFIG = (
('CHILD', 'content', dict(type=AbstractCodegenASTNode)),
('PARAM', 'indent', dict(type=str, default='')),
('PARAM', 'head', dict(type=str, default='')),
('PARAM', 'tail', dict(type=str, default='')),
)
def render(self, context: CodegenContext) -> Iterable[str]:
subcontext = context.derive(sub_width=len(self.indent))
first = True
for line in self.content.render(subcontext):
if first and (self.head != ''):
yield self.head
yield (self.indent + line).rstrip()
first = False
if not first and (self.tail != ''):
yield self.tail
| 34.811765
| 115
| 0.640419
|
3acbc7bf73e1abc5f694c29f63268f2fc79aad7c
| 3,486
|
py
|
Python
|
chat_backend/settings.py
|
CTXO/Chat-App
|
98c25905c0f897826e786b18d2d512b063bda957
|
[
"MIT"
] | 2
|
2021-10-03T01:02:49.000Z
|
2021-11-20T13:46:39.000Z
|
chat_backend/settings.py
|
CTXO/Chat-App
|
98c25905c0f897826e786b18d2d512b063bda957
|
[
"MIT"
] | null | null | null |
chat_backend/settings.py
|
CTXO/Chat-App
|
98c25905c0f897826e786b18d2d512b063bda957
|
[
"MIT"
] | null | null | null |
"""
Django settings for chat_backend project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-eeu3t^g4_wvwala8&g5s=-p_el(wde4aoybc6ot@bzunxf-87d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
'rest_framework',
'corsheaders'
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'chat_backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chat_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'chatDB',
'USER': 'romero',
'PASSWORD': 'mypassword',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'main.ChatUser'
| 24.723404
| 83
| 0.699369
|
06d11dba522d4b4aaf61dfbaad74271ee37a0f7c
| 1,145
|
py
|
Python
|
vis/grad_modifiers.py
|
chrisPiemonte/keras-vis
|
b1e44e3b480bec0c51b38f85bdc4fbaffc954a31
|
[
"MIT"
] | null | null | null |
vis/grad_modifiers.py
|
chrisPiemonte/keras-vis
|
b1e44e3b480bec0c51b38f85bdc4fbaffc954a31
|
[
"MIT"
] | null | null | null |
vis/grad_modifiers.py
|
chrisPiemonte/keras-vis
|
b1e44e3b480bec0c51b38f85bdc4fbaffc954a31
|
[
"MIT"
] | 1
|
2019-08-07T12:39:21.000Z
|
2019-08-07T12:39:21.000Z
|
from __future__ import absolute_import
import numpy as np
from tensorflow.keras import backend as K
def negate(grads):
"""Negates the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The negated gradients.
"""
return -grads
def absolute(grads):
"""Computes absolute gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The absolute gradients.
"""
return np.abs(grads)
def invert(grads):
"""Inverts the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The inverted gradients.
"""
return 1. / (grads + K.epsilon())
def relu(grads):
"""Clips negative gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The rectified gradients.
"""
grads[grads < 0.] = 0.
return grads
def small_values(grads):
"""Can be used to highlight small gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The modified gradients that highlight small values.
"""
return absolute(invert(grads))
| 17.348485
| 59
| 0.614847
|
291ffabb09df503c0497fd82cc9202748ceb8a98
| 1,601
|
py
|
Python
|
src/baboon_tracking/stages/motion_detector/quantize_history_frames.py
|
philipper905/baboon-tracking
|
d68735a7889a09516610f77969550aec74b769f2
|
[
"MIT"
] | null | null | null |
src/baboon_tracking/stages/motion_detector/quantize_history_frames.py
|
philipper905/baboon-tracking
|
d68735a7889a09516610f77969550aec74b769f2
|
[
"MIT"
] | null | null | null |
src/baboon_tracking/stages/motion_detector/quantize_history_frames.py
|
philipper905/baboon-tracking
|
d68735a7889a09516610f77969550aec74b769f2
|
[
"MIT"
] | null | null | null |
"""Quantizes the shifted history frame."""
import numpy as np
from baboon_tracking.mixins.shifted_history_frames_mixin import (
ShiftedHistoryFramesMixin,
)
from baboon_tracking.models.frame import Frame
from baboon_tracking.mixins.quantized_frames_mixin import QuantizedFramesMixin
from pipeline.decorators import config, stage
from pipeline.stage import Stage
from pipeline.stage_result import StageResult
@config(parameter_name="scale_factor", key="quantize_frames/scale_factor")
@stage("shifted_history_frames")
class QuantizeHistoryFrames(Stage, QuantizedFramesMixin):
"""Quantizes the shifted history frame."""
def __init__(
self, scale_factor: float, shifted_history_frames: ShiftedHistoryFramesMixin
):
QuantizedFramesMixin.__init__(self)
Stage.__init__(self)
self._scale_factor = scale_factor
self._shifted_history_frames = shifted_history_frames
def _quantize_frame(self, frame: Frame):
"""
Normalize pixel values from 0-255 to values from 0-self._scale_factor
Returns quantized frame
"""
return (
np.floor(frame.get_frame().astype(np.float32) * self._scale_factor / 255.0)
.astype(np.uint8)
.astype(np.int32)
)
def execute(self) -> StageResult:
"""Quantizes the shifted history frame."""
self.quantized_frames = [
self._quantize_frame(f)
for f in self._shifted_history_frames.shifted_history_frames
]
return StageResult(True, True)
| 34.06383
| 88
| 0.690194
|
273e420cf8e81a491a1bc5b137a7eca834f277ec
| 3,759
|
py
|
Python
|
worlds/admin.py
|
veselosky/storyworlds
|
7ad6f09787d0a4566abd2b29472e4faf84f8ea34
|
[
"Apache-2.0"
] | null | null | null |
worlds/admin.py
|
veselosky/storyworlds
|
7ad6f09787d0a4566abd2b29472e4faf84f8ea34
|
[
"Apache-2.0"
] | 3
|
2020-02-12T00:30:03.000Z
|
2021-03-18T22:20:33.000Z
|
worlds/admin.py
|
veselosky/storyworlds
|
7ad6f09787d0a4566abd2b29472e4faf84f8ea34
|
[
"Apache-2.0"
] | null | null | null |
from adminsortable2.admin import SortableInlineAdminMixin
from django.contrib import admin
from django.contrib.gis import admin as geoadmin
from .models import (
Character,
Event,
EventParticipation,
FamilyTie,
Honor,
Organization,
Place,
Reference,
Setting,
Title,
World,
)
# ======================================================================
# Inlines used in entity admins
# ======================================================================
class EventParticipationInline(admin.TabularInline):
model = Event.participants.through
# For the inline, just show and allow the event association. To edit timespans
# or other properties, go to the Event Participation admin.
fields = ("character", "role")
class ChildrenInline(SortableInlineAdminMixin, admin.TabularInline):
model = Character.children.through
fk_name = "parent" # Relations where the current character is parent
extra = 1
class ParentsInline(SortableInlineAdminMixin, admin.TabularInline):
model = Character.parents.through
fk_name = "child" # Relations where the current character is child
extra = 1
class HonorsInline(admin.TabularInline):
model = Honor
extra = 1
fields = ("org", "start_year", "start_month", "start_day", "end_year", "end_month", "end_day")
class CharacterTitlesInline(admin.TabularInline):
model = Title
extra = 1
class ReferencesInline(admin.TabularInline):
model = Reference
extra = 1
# ======================================================================
# Admins for primary entities
# ======================================================================
@admin.register(World)
class WorldAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
@admin.register(Place)
class PlaceAdmin(geoadmin.OSMGeoAdmin):
prepopulated_fields = {"slug": ("name",)}
search_fields = ("name",)
@admin.register(Setting)
class SettingAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
search_fields = ("name",)
@admin.register(Organization)
class OrgAdmin(geoadmin.OSMGeoAdmin):
fields = (
"world",
("name", "slug"),
"time_type",
("start_year", "start_month", "start_day", "start_time"),
("end_year", "end_month", "end_day", "end_time"),
"tags",
"notes",
)
prepopulated_fields = {"slug": ("name",)}
search_fields = ("name",)
@admin.register(Character)
class CharacterAdmin(admin.ModelAdmin):
fields = (
"world",
("name", "slug"),
("start_year", "start_month", "start_day", "start_time"),
("end_year", "end_month", "end_day", "end_time"),
"tags",
"notes",
)
prepopulated_fields = {"slug": ("name",)}
search_fields = ("name",)
inlines = [ParentsInline, ChildrenInline, CharacterTitlesInline, HonorsInline]
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
fields = (
"world",
("name", "slug"),
"time_type",
("start_year", "start_month", "start_day", "start_time"),
("end_year", "end_month", "end_day", "end_time"),
"place",
"tags",
"notes",
)
prepopulated_fields = {"slug": ("name",)}
search_fields = ("name",)
inlines = [EventParticipationInline]
list_display = ("start_year", "start_month", "start_day", "name")
list_display_links = ("name",)
@admin.register(EventParticipation)
class EventParticipationAdmin(admin.ModelAdmin):
empty_value_display = "unknown"
@admin.register(FamilyTie)
class FamilyTieAdmin(admin.ModelAdmin):
empty_value_display = "unknown"
@admin.register(Reference)
class ReferenceAdmin(admin.ModelAdmin):
pass
| 26.471831
| 98
| 0.611865
|
544b715e064b6b7ed42de12f7249324adf021ef4
| 1,318
|
py
|
Python
|
test/lib/state.py
|
morgante/cnrm-blueprints
|
34453c4acde2cd321f71b76b3e6c6b086bc8ada1
|
[
"Apache-2.0"
] | 9
|
2020-07-10T18:20:19.000Z
|
2021-10-08T23:58:06.000Z
|
test/lib/state.py
|
morgante/cnrm-blueprints
|
34453c4acde2cd321f71b76b3e6c6b086bc8ada1
|
[
"Apache-2.0"
] | 1
|
2021-09-27T20:38:35.000Z
|
2021-09-27T20:38:35.000Z
|
test/lib/state.py
|
isabella232/cnrm-blueprints
|
19d7c459c4f71198208282da17bcade53d28cc9c
|
[
"Apache-2.0"
] | 4
|
2020-07-10T23:22:20.000Z
|
2021-09-27T19:27:02.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import json
import e2e
def save_state(state):
p = os.path.join(e2e.artifacts_dir(), "state.json")
with open(p, "w") as f:
json.dump(state, f, sort_keys=True, indent=4, separators=(",", ": "))
def load_state():
p = os.path.join(e2e.artifacts_dir(), "state.json")
state = {}
if os.path.exists(p):
with open(p) as f:
state = json.load(f)
return state
def update_state(state, add):
m = merge_dicts(state, add)
for k, v in m.items():
state[k] = v
def merge_dicts(l, r):
m = {**l, **r}
for k, v in m.items():
if k in l and k in r:
if isinstance(r[k], dict):
m[k] = merge_dicts(l[k], r[k])
return m
| 26.36
| 77
| 0.640364
|
5e18e62000d6c7b649a59b4f642719462c1f8c59
| 48,180
|
py
|
Python
|
Python/multiscale_quad_retrieval.py
|
NeTatsu/video-diff
|
c2eb75373d20aefc82a0d8d198eddd7eb9b9675a
|
[
"BSD-3-Clause"
] | 8
|
2017-03-20T16:40:04.000Z
|
2021-12-21T11:38:33.000Z
|
Python/multiscale_quad_retrieval.py
|
NeTatsu/video-diff
|
c2eb75373d20aefc82a0d8d198eddd7eb9b9675a
|
[
"BSD-3-Clause"
] | 2
|
2019-12-19T23:22:05.000Z
|
2020-01-27T06:51:23.000Z
|
Python/multiscale_quad_retrieval.py
|
NeTatsu/video-diff
|
c2eb75373d20aefc82a0d8d198eddd7eb9b9675a
|
[
"BSD-3-Clause"
] | 4
|
2017-02-22T12:39:29.000Z
|
2019-12-18T22:33:02.000Z
|
# !!!!TODO: takeout DBGPRINT - it's already replaced with common.MY_DEBUG_STDOUT (I think):
import math
import numpy as np
from numpy import linalg as npla
import cv2
import common
import config
import findquads
import spatial_consistency
import Matlab
DBGPRINT = False
#DBGPRINT = True
#FILTER = False
FILTER = True
USE_GPS_COORDINATES = False
if config.USE_MULTITHREADING == True:
import threading
from threading import Thread
import multiprocessing
class Globals:
r_quadsTree = None;
r_harlocs = None;
q_harlocs = None;
md_threshold = None;
st_threshold = None;
all_ori = None;
all_id = None;
all_max = None;
all_cen = None;
nos = None;
scale_index = None;
cropflag = None;
sequence = None;
RD_start = None;
RD_end = None;
MAXDIS = None;
MAXORI = None;
tolers = None;
g = Globals();
"""
When parallelizing multiscale_quad_retrieval(), we will obtain
slightly different results for crossref (etc) - see crossref.txt.
Note that when running in serial (without a POOL), we obtain the same result ALWAYS.
(less relevant: if we run only 1 process in pool instead of 3, we get
fewer changes in crossref.txt - NOT sure if this means something).
It appears the reason is the fact I am running the FLANN KD-tree implementation,
which is an approximate NN search library, employing randomization
(see http://answers.opencv.org/question/32664/flannbasedmatcher-returning-different-results/).
I guess the random number sequence, when having the same seed, evolves
differently when serial VS in parallel
threads, so the results tend to be different.
"""
def IterationStandaloneMQR(queryFrame):
r_quadsTree = g.r_quadsTree;
r_harlocs = g.r_harlocs;
q_harlocs = g.q_harlocs;
md_threshold = g.md_threshold;
st_threshold = g.st_threshold;
all_ori = g.all_ori;
all_id = g.all_id;
all_max = g.all_max;
all_cen = g.all_cen;
nos = g.nos;
scale_index = g.scale_index;
cropflag = g.cropflag;
sequence = g.sequence;
RD_start = g.RD_start;
RD_end = g.RD_end;
MAXDIS = g.MAXDIS;
MAXORI = g.MAXORI;
tolers = g.tolers;
"""
common.DebugPrint( \
"Entered IterationStandaloneMQR(): crossref=%s, captureQ=%s, "\
"captureR=%s, refined_crossref=%s, warp_p=%s, "
"x0=%s, y0=%s, start=%s, t=%d, iWhile=%d." % \
(str(crossref), str(captureQ), str(captureR), \
str(g.refined_crossref), str(g.warp_p), \
str(g.x0), str(g.y0), str(g.start), g.t, iWhile));
common.DebugPrint("IterationStandalone(): id(g)=%s" % str(id(g)));
"""
# tic
"""
str1=['load ' q_path QD(q).name]
eval(str1)
"""
"""
We make pp reference the desired multiharloc list for the query video
frame queryFrame
"""
pp = q_harlocs[queryFrame];
#pp = np.array(pp);
#common.DebugPrint("multiscale_quad_retrieval(): pp = %s" % str(pp));
"""
Alex: for the query frame queryFrame we retrieve, for scale scale_index, the
harris features in var points.
Then we build the quads from points.
Then for each quad (4 float values) we query the corresponding scale
kd-tree, and we get the indices.
Then we build the histogram and compute idf, ....!!!!
Note: scale is 1 for original frame resolution and the higher
we go we have lower image resolutions (we go higher in the
Guassian pyramid I think).
"""
#[qout,qcen,qmaxdis,qori]=findquads(pp(pp(:,3)==scale_index,1:2),md_threshold,0);
points = pp[pp[:, 2] == scale_index, 0:2];
qout, qcen, qmaxdis, qori = findquads.findquads(points, md_threshold, 0);
common.DebugPrint("multiscale_quad_retrieval(): queryFrame = %d, " \
"qout.shape = %s" % (queryFrame, str(qout.shape)));
# disp([num2str(q) ' of ' num2str(length(QD)) ' -> ' num2str(size(qout,1)) ' quads'])
#space_xy=zeros(size(qcen,1),2*length(RD))+nan;
#space_xy = np.zeros( (qcen.shape[0], 2 * len(RD)) ) + np.nan;
space_xy = np.zeros( (qcen.shape[0], 2 * len(r_harlocs)) ) + np.nan;
# votes=zeros(length(RD),1)
#votes=zeros(length(RD),length(tolers));
#votes = np.zeros( (len(RD), 1) );
votes = np.zeros( (len(r_harlocs), 1) );
#nep = np.array([]);
#m_points = np.array([]);
assert isinstance(tolers, float);
"""
We substitute queryFrameQuad - 1 with queryFrameQuad, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
#for queryFrameQuad in range(1, qout.shape[0] + 1):
for queryFrameQuad in range(qout.shape[0]):
"""
Matlab's polymorphism is really bugging here: although it's
normally a float, tolers is considered to be a size 1 vector...
so len(tolers) == 1
"""
#for tol_i in range(1, len(tolers) + 1):
# tol = tolers[tol_i - 1]
"""
We substitute tol_i - 1 with tol, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
#for tol_i in range(1, 1 + 1):
for tol_i in range(1):
tol = tolers;
#common.DebugPrint("multiscale_quad_retrieval(): qout[i - 1, :] = %s" % str(qout[i - 1, :]))
#% default for first PAMI with tol= 0.1 approximately
# NOTE: SciPy's KDTree finds a few more results, in some cases,
# than the Matlab code from Evangelidis.
#idx, di = kdtree_ball_query(tree, qout(i, :), tol)
#idx, distKD = kdtree_ball_query(tree, qout[i - 1, :], tol)
#idx, di = tree.query(x=xQuery, k=4)
#resPoints = [data[i] for i in resBallIndices]
# tol is a scalar representing the radius of the ball
if config.KDTREE_IMPLEMENTATION == 0:
idx = r_quadsTree.query_ball_point(qout[queryFrameQuad, :], tol);
elif config.KDTREE_IMPLEMENTATION == 1:
#pt = qout[queryFrameQuad - 1, :].astype(np.float32);
pt = qout[queryFrameQuad, :];
pt = np.array([[pt[0], pt[1], pt[2], pt[3]]], dtype=np.float32);
retval, idx, dists = r_quadsTree.radiusSearch( \
query=pt, \
radius=(tol**2), \
maxResults=NUM_MAX_ELEMS, \
params=search_params);
if common.MY_DEBUG_STDOUT and DBGPRINT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"retval (number NNs) = %s" % str(retval));
"""
common.DebugPrint( \
"multiscale_quad_retrieval(): radiusSearch's retval " \
"(at queryFrame=%d, queryFrameQuad=%d) is %d\n" % (queryFrame, queryFrameQuad, retval));
idx = idx[0];
dists = dists[0];
idx = idx[: retval];
dists = dists[: retval];
if common.MY_DEBUG_STDOUT and DBGPRINT:
print("multiscale_quad_retrieval(): " \
"qout[queryFrameQuad, :] = %s" % str(qout[queryFrameQuad, :]));
print("multiscale_quad_retrieval(): " \
"idx = %s" % str(idx));
print("multiscale_quad_retrieval(): " \
"tol = %s" % str(tol));
if config.KDTREE_IMPLEMENTATION == 0:
print("multiscale_quad_retrieval(): " \
"r_quadsTree.data[idx] = %s" % \
str(r_quadsTree.data[idx]));
# We print the distances to the points returned in idx
a = qout[queryFrameQuad, :];
if False: #!!!! This is just for debugging purposes
for myI, index in enumerate(idx):
b = r_quadsTree.data[index];
"""
if False:
common.DebugPrint("multiscale_quad_retrieval(): distance to " \
"%d point (%s) inside ball = %.4f" % \
(myI, str(b), npla.norm(a - b)));
"""
idx = np.array(idx);
#if False:
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"all_max.shape = %s" % str(all_max.shape));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qmaxdis.shape = %s" % str(qmaxdis.shape));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qmaxdis = %s" % str(qmaxdis));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qori.shape = %s" % str(qori.shape));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qori = %s" % str(qori));
#dis_idx=abs(qmaxdis(i)-all_max(idx))<MAXDIS;
if len(idx) == 0:
# NOT A GOOD IDEA: continue;
#idx = np.array([]);
dis_idx = np.array([]);
ori_idx = np.array([]);
else:
#if False:
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"queryFrameQuad = %s" % str(queryFrameQuad));
common.DebugPrint("multiscale_quad_retrieval(): " \
"all_max[idx] = %s" % str(all_max[idx]));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qmaxdis[queryFrameQuad] = %s" % str(qmaxdis[queryFrameQuad]));
dis_idx = np.abs(qmaxdis[queryFrameQuad] - all_max[idx]) < MAXDIS;
#if False:
if common.MY_DEBUG_STDOUT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"idx = %s" % str(idx));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"dis_idx = %s" % str(dis_idx));
#idx=idx(dis_idx)
idx = idx[dis_idx];
#if False:
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"idx (after idx = idx[dis_idx]) = %s" % str(idx));
#ori_idx=abs(qori(i)-all_ori(idx))<MAXORI;
ori_idx = np.abs(qori[queryFrameQuad] - all_ori[idx]) < MAXORI;
#if False:
if common.MY_DEBUG_STDOUT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"all_ori = %s" % str(all_ori));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qori[queryFrameQuad] = %s" % str(qori[queryFrameQuad]));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"ori_idx = %s" % str(ori_idx));
#idx=idx(ori_idx);
idx = idx[ori_idx];
# IMPORTANT ###################################################
# IMPORTANT ###################################################
# IMPORTANT ###################################################
#% spatio-temporal consistency
# IMPORTANT ###################################################
# IMPORTANT ###################################################
# IMPORTANT ###################################################
#if numel(idx) > 0:
if idx.size > 0:
# Normally cropflag == 0
if cropflag == 0:
dy = qcen[queryFrameQuad, 0] - all_cen[idx, 0];
dx = qcen[queryFrameQuad, 1] - all_cen[idx, 1];
#D=dy.^2+dx.^2;
D = dy**2 + dx**2;
co_idx = D < pow(st_threshold, 2);
idx = idx[co_idx];
else:
"""
We substitute iii - 1 with iii, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
#for iii in range(1, len(idx) + 1):
for iii in range(len(idx)):
#space_xy(i,(all_id(idx(iii))-RD_start)*2+1:(all_id(idx(iii))-RD_start)*2+2) = all_cen(idx(iii),:)
space_xy[queryFrameQuad, \
(all_id[idx[iii]] - RD_start) * 2: (all_id[idx[iii] - 1] - RD_start) * 2 + 1] = \
all_cen[idx[iii], :]
#hh=hist(all_id(idx),RD_start:RD_end);
# It has to be an np.array because we multiply it with a scalar
histoRange = np.array(range(RD_start, RD_end + 1));
hh = Matlab.hist(x=all_id[idx], binCenters=histoRange);
#if False:
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"hh = %s" % (str(hh)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"hh.shape = %s" % (str(hh.shape)));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"all_id = %s" % (str(all_id)));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"all_id.shape = %s" % (str(all_id.shape)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"idx = %s" % (str(idx)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"idx.shape = %s" % (str(idx.shape)));
# % nz can be computed more optimally
#nz=find(hh~=0); # nz can be computed more optimally
# np.nonzero() always returns a tuple, even if it contains 1 element since hh has only 1 dimension
nz = np.nonzero(hh != 0)[0];
#if False:
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"nz = %s" % (str(nz)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"nz.shape = %s" % (str(nz.shape)));
#if numel(nz) > 0:
if nz.size > 0:
#%%----text-retrieval-like
#votes(nz, tol_i) = votes(nz, tol_i) + log10(length(RD) / (length(nz)))^2 #Note: log10(a)^2 means (log10(a))^2 #PREVIOUSLY
#myVal = pow(math.log10(float(len(RD)) / len(nz)), 2);
myVal = pow(math.log10(float(len(r_harlocs)) / len(nz)), 2);
#if False:
if common.MY_DEBUG_STDOUT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"len(RD) = %d" % len(RD));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"len(r_harlocs) = %d" % len(r_harlocs));
common.DebugPrint("multiscale_quad_retrieval(): " \
"len(nz) = %d" % len(nz));
common.DebugPrint("multiscale_quad_retrieval(): " \
"myVal = %.5f" % myVal);
# PREVIOUSLY
votes[nz, tol_i] = votes[nz, tol_i] + myVal;
# votes(nz)=votes(nz)+log10(length(RD)/(length(nz)));
# votes(nz)=votes(nz)+1;
#if False:
if common.MY_DEBUG_STDOUT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"Votes_space.shape = %s" % (str(Votes_space.shape)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"votes.shape = %s" % (str(votes.shape)));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"votes.shape = %s" % (str(votes.shape)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"votes = %s" % (str(votes)));
return (queryFrame, np.ravel(votes));
# NOT performing these in each worker - the central dispatcher will do these
if False:
#Votes_space(:,q)=votes;
# Gives: "ValueError: output operand requires a reduction, but reduction is not enabled"
#Votes_space[:, queryFrame - 1] = votes;
Votes_space[:, queryFrame] = np.ravel(votes);
if cropflag == 0:
HH[:, queryFrame] = 1;
else:
"""
HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
qcen, len(RD), st_threshold, cropflag);
"""
HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
qcen, len(r_harlocs), st_threshold, cropflag);
"""
From http://www.mathworks.com/help/matlab/matlab_prog/symbol-reference.html:
Dot-Dot-Dot (Ellipsis) - ...
A series of three consecutive periods (...) is the line continuation operator in MATLAB.
Line Continuation
Continue any MATLAB command or expression by placing an ellipsis at the end of the line to be continued:
"""
NUM_MAX_ELEMS = 100000;
search_params = dict(checks=1000000000); # Gives fewer results than scipy's tree.query_ball_point when we have 65K features
# returns Votes_space, HH
# Alex: r_harlocs and q_harlocs are the corresponding lists of harlocs computed
"""
md_threshold = max-distance threshold used to build quads out of Harris features
st_threshold = threshold value for spatio-temporal consistency (coherence)
all_ori, all_id, all_max, all_cen = orientation, reference frame ids, max distances,
respectively centroids coordinates of each
reference quad for scale scale_index
"""
def multiscale_quad_retrieval(r_quadsTree, r_harlocs, q_harlocs, md_threshold, st_threshold, \
all_ori, all_id, all_max, all_cen, nos, scale_index, cropflag, \
sequence):
common.DebugPrint("Entered multiscale_quad_retrieval(): " \
"md_threshold = %s, st_threshold = %s." % \
(str(md_threshold), \
str(st_threshold)));
assert len(r_harlocs) != 0;
assert len(q_harlocs) != 0;
try:
Votes_space = np.load("Votes_space%d.npz" % scale_index)['arr_0'];
HH = np.load("HH%d.npz" % scale_index)['arr_0'];
return Votes_space, HH;
except:
common.DebugPrintErrorTrace();
if common.MY_DEBUG_STDOUT and DBGPRINT:
common.DebugPrint("multiscale_quad_retrieval(): r_quadsTree = %s" % \
str(r_quadsTree));
common.DebugPrint("multiscale_quad_retrieval(): len(r_harlocs) = %d" % len(r_harlocs));
common.DebugPrint("multiscale_quad_retrieval(): r_harlocs = %s" % str(r_harlocs));
common.DebugPrint("multiscale_quad_retrieval(): q_harlocs = %s" % str(q_harlocs));
common.DebugPrint("multiscale_quad_retrieval(): md_threshold = %s" % str(md_threshold));
print("multiscale_quad_retrieval(): st_threshold = %s" % str(st_threshold));
#common.DebugPrint("multiscale_quad_retrieval(): all_ori, all_id, all_max, all_cen, nos, scale_index, cropflag = %s" % str(all_ori, all_id, all_max, all_cen, nos, scale_index, cropflag));
common.DebugPrint("multiscale_quad_retrieval(): all_id = %s" % str(all_id));
common.DebugPrint("multiscale_quad_retrieval(): all_id.shape = %s" % (str(all_id.shape)));
#common.DebugPrint("multiscale_quad_retrieval(): all_max, all_cen, nos, scale_index, cropflag = %s" % str(all_max, all_cen, nos, scale_index, cropflag));
#common.DebugPrint("multiscale_quad_retrieval(): all_max = %s" % str(all_max));
#common.DebugPrint("multiscale_quad_retrieval(): all_cen, nos, scale_index, cropflag = %s" % str(all_cen, nos, scale_index, cropflag));
common.DebugPrint("multiscale_quad_retrieval(): sequence = %s" % str(sequence));
print("multiscale_quad_retrieval(): cropflag = %s" % str(cropflag));
t1 = float(cv2.getTickCount());
if scale_index > nos:
assert scale_index <= nos;
#error('Wrong scale index or number-of-scales');
#QD = dir([q_path "multiharlocs*.mat"])
#QD = [q_path + "multiharlocs*.mat"]
#QD = q_harlocs;
#RD = dir([r_path "multiharlocs*.mat"])
#RD = [r_path + "multiharlocs*.mat"]
#RD = r_harlocs;
#TODO: take out RD_start
#RD_start = str2num(RD(1).name(end - 9 : end - 4))
#RD_start = int(RD[0][-9 : -4])
RD_start = 0;
#RD_end = str2num(RD(end).name(end - 9 : end - 4))
#RD_end = int(RD[-1][-9 : -4])
#RD_end = len(RD) - 1;
RD_end = len(r_harlocs) - 1;
if False: # n_d not used anywhere
#n_d = hist(all_id, RD_start : RD_end)
#n_d = hist[all_id, RD_start : RD_end]
n_d = Matlab.hist(x=all_id, \
binCenters=np.array(range(RD_start, RD_end + 1)) );
#cross_indices = np.zeros( (len(QD), 2) );
cross_indices = np.zeros( (len(q_harlocs), 2) );
j = 1;
#tic
#ORI = np.array([]); # ORI NOT used anywhere
"""
Inspired from
https://stackoverflow.com/questions/17559140/matlab-twice-as-fast-as-numpy
BUT doesn't help in this case:
Votes_space = np.asfortranarray(np.zeros( (len(RD), len(QD)) ));
"""
#Votes_space = np.zeros( (len(RD), len(QD)) );
Votes_space = np.zeros( (len(r_harlocs), len(q_harlocs)) );
# Make a distinct copy of HH from Votes_space...
#HH = Votes_space.copy().astype(np.int16); #Votes_space + 0;
#HH = np.zeros((len(RD), len(QD)), dtype=np.int8);
HH = np.zeros((len(r_harlocs), len(q_harlocs)), dtype=np.int8); #!!!!TODO use MAYBE even np.bool - OR take it out
#common.DebugPrint("multiscale_quad_retrieval(): Votes_space = %s,\n HH = %s" % (str(Votes_space), str(HH)))
tolers = 0.1 - float(scale_index) / 100.0; # it helps to make more strict the threshold as the scale goes up
# tolers = 0.15 - float(scale_index) / 100.0;
MAXDIS = 3 + scale_index;
MAXORI = 0.25;
"""
!!!!TODO TODO: I am using multiprocessing.Poll and return votes;
the dispatcher assembles the results,
but the results are NOT the same with the serial case - although they
look pretty decent, but they seem to be suboptimal - dp_Alex returns
suboptimal cost path for USE_MULTITHREADING == True instead of
False.
(Note: running under the same preconditions
multiscale_quad_retrieval I got the same results in dp_Alex().
"""
if False: #config.USE_MULTITHREADING == True:
global g;
g.r_quadsTree = r_quadsTree;
g.r_harlocs = r_harlocs;
g.q_harlocs = q_harlocs;
g.md_threshold = md_threshold;
g.st_threshold = st_threshold;
g.all_ori = all_ori;
g.all_id = all_id;
g.all_max = all_max;
g.all_cen = all_cen;
g.nos = nos;
g.scale_index = scale_index;
g.cropflag = cropflag;
g.sequence = sequence;
g.RD_start = RD_start;
g.RD_end = RD_end;
g.MAXDIS = MAXDIS;
g.MAXORI = MAXORI;
g.tolers = tolers;
"""
Start worker processes to use on multi-core processor (able to run
in parallel - no GIL issue if each core has it's own VM)
"""
pool = multiprocessing.Pool(processes=config.numProcesses);
print("multiscale_quad_retrieval(): Spawned a pool of %d workers" % \
config.numProcesses);
listParams = range(0, len(q_harlocs)); #!!!!TODO: use counterStep, config.initFrame[indexVideo]
#res = pool.map(IterationStandaloneMQR, listParams);
# See https://docs.python.org/2/library/multiprocessing.html#module-multiprocessing.pool
res = pool.map(func=IterationStandaloneMQR, iterable=listParams, \
chunksize=1);
print("Pool.map returns %s" % str(res)); #x0.size + 1
"""
From https://medium.com/building-things-on-the-internet/40e9b2b36148
close the pool and wait for the work to finish
"""
pool.close();
pool.join();
# Doing the "reduce" phase after the workers have finished :)
assert len(res) == len(q_harlocs);
for queryFrame, resE in enumerate(res):
resEIndex = resE[0];
resE = resE[1];
assert resEIndex == queryFrame;
# Gives: "ValueError: output operand requires a reduction, but reduction is not enabled"
#Votes_space[:, queryFrame - 1] = votes;
Votes_space[:, queryFrame] = resE;
for queryFrame in range(len(q_harlocs)):
if cropflag == 0:
HH[:, queryFrame] = 1;
else:
"""
HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
qcen, len(RD), st_threshold, cropflag);
"""
HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
qcen, len(r_harlocs), st_threshold, cropflag);
try:
np.savez_compressed("Votes_space%d" % scale_index, Votes_space);
np.savez_compressed("HH%d" % scale_index, HH);
except:
common.DebugPrintErrorTrace();
return Votes_space, HH;
"""
We substitute q - 1 with q, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
#for q=1:length(QD)
#for q in range(1, len(QD) + 1):
#for queryFrame in range(len(QD)):
for queryFrame in range(len(q_harlocs)):
common.DebugPrint("multiscale_quad_retrieval(): Starting iteration queryFrame = %d" % queryFrame);
# tic
"""
str1=['load ' q_path QD(q).name]
eval(str1)
"""
"""
We make pp reference the desired multiharloc list for the query video
frame queryFrame
"""
pp = q_harlocs[queryFrame];
#pp = np.array(pp);
#common.DebugPrint("multiscale_quad_retrieval(): pp = %s" % str(pp));
#[qout,qcen,qmaxdis,qori]=findquads(pp(pp(:,3)==scale_index,1:2),md_threshold,0);
points = pp[pp[:, 2] == scale_index, 0:2];
qout, qcen, qmaxdis, qori = findquads.findquads(points, md_threshold, 0);
if common.MY_DEBUG_STDOUT and DBGPRINT:
print("multiscale_quad_retrieval(): queryFrame = %d, " \
"qout.shape (number of quads for query frame queryFrame) = %s" % \
(queryFrame, str(qout.shape)));
# disp([num2str(q) ' of ' num2str(length(QD)) ' -> ' num2str(size(qout,1)) ' quads'])
#space_xy=zeros(size(qcen,1),2*length(RD))+nan;
#space_xy = np.zeros( (qcen.shape[0], 2 * len(RD)) ) + np.nan;
space_xy = np.zeros( (qcen.shape[0], 2 * len(r_harlocs)) ) + np.nan;
# votes=zeros(length(RD),1)
#votes=zeros(length(RD),length(tolers));
#votes = np.zeros( (len(RD), 1) );
votes = np.zeros( (len(r_harlocs), 1) );
#nep = np.array([]);
#m_points = np.array([]);
assert isinstance(tolers, float);
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): quads of query frame %d are: " % queryFrame);
common.DebugPrint(" qout = %s" % str(qout));
"""
Alex: for each quad (4 floats) of the query frame from Harris feature of scale scale_index
Note: all_id stores the reference frame id for each quad descriptor.
"""
"""
We substitute queryFrameQuad - 1 with queryFrameQuad, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
#for queryFrameQuad in range(1, qout.shape[0] + 1):
for queryFrameQuad in range(qout.shape[0]):
common.DebugPrint("multiscale_quad_retrieval(): Starting iteration queryFrameQuad = %d" % queryFrameQuad);
"""
Matlab's polymorphism is really bugging here: although it's
normally a float, tolers is considered to be a size 1 vector...
so len(tolers) == 1
"""
#for tol_i in range(1, len(tolers) + 1):
# tol = tolers[tol_i - 1]
"""
We substitute tol_i - 1 with tol, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
#for tol_i in range(1, 1 + 1):
for tol_i in range(1):
tol = tolers;
"""
# TODO: done below - take out this dbg print
if DBGPRINT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"qout[queryFrameQuad, :] = %s" % \
str(qout[queryFrameQuad, :]))
"""
#% default for first PAMI with tol= 0.1 approximately
# NOTE: SciPy's KDTree finds a few more results, in some cases,
# than the Matlab code from Evangelidis.
#idx, di = kdtree_ball_query(tree, qout(i, :), tol)
#idx, distKD = kdtree_ball_query(tree, qout[i - 1, :], tol)
#idx, di = tree.query(x=xQuery, k=4)
#resPoints = [data[i] for i in resBallIndices]
# tol is a scalar representing the radius of the ball
if config.KDTREE_IMPLEMENTATION == 0:
idx = r_quadsTree.query_ball_point(qout[queryFrameQuad, :], tol);
elif config.KDTREE_IMPLEMENTATION == 1:
#pt = qout[queryFrameQuad - 1, :].astype(np.float32);
pt = qout[queryFrameQuad, :];
pt = np.array([[pt[0], pt[1], pt[2], pt[3]]], dtype=np.float32);
retval, idx, dists = r_quadsTree.radiusSearch( \
query=pt, \
radius=(tol**2), \
maxResults=NUM_MAX_ELEMS, \
params=search_params);
if common.MY_DEBUG_STDOUT and DBGPRINT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"retval (number NNs) = %s" % str(retval));
"""
common.DebugPrint( \
"multiscale_quad_retrieval(): radiusSearch's retval " \
"(at queryFrame=%d, queryFrameQuad=%d) is %d" % (queryFrame, queryFrameQuad, retval));
idx = idx[0];
dists = dists[0];
"""
Note: retval is the number of neighbors returned from the radiusSearch().
But the idx and the dists can have more elements than the returned retval.
"""
idx = idx[: retval];
dists = dists[: retval];
if common.MY_DEBUG_STDOUT and DBGPRINT:
print("multiscale_quad_retrieval(): " \
"qout[queryFrameQuad, :] = %s" % str(qout[queryFrameQuad, :]));
print("multiscale_quad_retrieval(): " \
"idx = %s" % str(idx));
print("multiscale_quad_retrieval(): " \
"dists = %s" % str(dists));
print("multiscale_quad_retrieval(): " \
"tol = %s" % str(tol));
if config.KDTREE_IMPLEMENTATION == 0:
print("multiscale_quad_retrieval(): " \
"r_quadsTree.data[idx] = %s" % \
str(r_quadsTree.data[idx]));
# We print the distances to the points returned in idx
if common.MY_DEBUG_STDOUT and DBGPRINT: # This is just for debugging purposes
a = qout[queryFrameQuad, :];
if config.KDTREE_IMPLEMENTATION == 0:
for myI, index in enumerate(idx):
b = r_quadsTree.data[index];
"""
if False:
common.DebugPrint("multiscale_quad_retrieval(): distance to " \
"%d point (%s) inside ball = %.4f" % \
(myI, str(b), npla.norm(a - b)));
"""
else:
pass;
idx = np.array(idx);
#if False:
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"all_max.shape = %s" % str(all_max.shape));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qmaxdis.shape = %s" % str(qmaxdis.shape));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qmaxdis = %s" % str(qmaxdis));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qori.shape = %s" % str(qori.shape));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qori = %s" % str(qori));
#dis_idx=abs(qmaxdis(i)-all_max(idx))<MAXDIS;
if len(idx) == 0:
# NOT A GOOD IDEA: continue;
#idx = np.array([]);
dis_idx = np.array([]);
ori_idx = np.array([]);
else:
if common.MY_DEBUG_STDOUT and DBGPRINT:
print("multiscale_quad_retrieval(): " \
"queryFrameQuad = %s" % str(queryFrameQuad));
print("multiscale_quad_retrieval(): " \
"all_max[idx] = %s" % str(all_max[idx]));
print("multiscale_quad_retrieval(): " \
"qmaxdis[queryFrameQuad] = %s" % str(qmaxdis[queryFrameQuad]));
if USE_GPS_COORDINATES:
# We look only at a part of the reference video
"""
Since in some cases the video temporal alignment is
difficult to do due to similar portions in the
trajectory (see the drone videos, clip 3_some_lake)
we "guide" the temporal alignment by restricting
the reference frame search space - this is useful
when we have the geolocation (GPS) coordinate for
each frame.
"""
if common.MY_DEBUG_STDOUT and DBGPRINT:
print("multiscale_quad_retrieval(): " \
"all_id = %s" % str(all_id));
if True:
#assert (all_id.ndim == 2) and (all_id.shape[1] == 1);
if all_id.ndim == 2:
#!!!!TODO TODO: put this at the beginning of the function
assert all_id.shape[1] == 1;
"""
We flatten the array all_id
Note: We don't use order="F" since it's
basically 1-D array
"""
all_id = np.ravel(all_id);
#!!!!TODO: put start and end frame in config - or compute it from geolocation
sub_idx = np.logical_and( (all_id[idx] >= 2030 - 928), \
(all_id[idx] <= 2400 - 928) );
idx = idx[sub_idx];
if common.MY_DEBUG_STDOUT and DBGPRINT:
print("multiscale_quad_retrieval(): " \
"all_id = %s" % str(all_id));
print("multiscale_quad_retrieval(): " \
"sub_idx = %s" % str(sub_idx));
print("multiscale_quad_retrieval(): " \
"idx = %s" % str(idx));
if FILTER:
dis_idx = np.abs(qmaxdis[queryFrameQuad] - all_max[idx]) < MAXDIS;
#if False:
if common.MY_DEBUG_STDOUT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"idx = %s" % str(idx));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"dis_idx = %s" % str(dis_idx));
#idx=idx(dis_idx)
idx = idx[dis_idx];
#if False:
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"idx (after idx = idx[dis_idx]) = %s" % str(idx));
if FILTER:
#ori_idx=abs(qori(i)-all_ori(idx))<MAXORI;
ori_idx = np.abs(qori[queryFrameQuad] - all_ori[idx]) < MAXORI;
#if False:
if common.MY_DEBUG_STDOUT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"all_ori = %s" % str(all_ori));
common.DebugPrint("multiscale_quad_retrieval(): " \
"qori[queryFrameQuad] = %s" % str(qori[queryFrameQuad]));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"ori_idx = %s" % str(ori_idx));
#idx=idx(ori_idx);
idx = idx[ori_idx];
# IMPORTANT ###################################################
# IMPORTANT ###################################################
# IMPORTANT ###################################################
#% spatio-temporal consistency
# IMPORTANT ###################################################
# IMPORTANT ###################################################
# IMPORTANT ###################################################
#if numel(idx) > 0:
if idx.size > 0:
if cropflag == 0:
if FILTER:
"""
Alex: this is a simple procedure of eliminating False
Positive (FP) matches, as presented in Section 4.2 of
TPAMI 2013 paper.
Basically it filters out quad matches that have
centroids st_threshold away from the query quad.
Note: all_cen are the controids of all reference
quads.
"""
dy = qcen[queryFrameQuad, 0] - all_cen[idx, 0];
dx = qcen[queryFrameQuad, 1] - all_cen[idx, 1];
#D=dy.^2+dx.^2;
D = dy**2 + dx**2;
co_idx = D < pow(st_threshold, 2);
idx = idx[co_idx];
else:
"""
We substitute iii - 1 with iii, since we want
to number arrays from 0 (not from 1 like in Matlab).
"""
#for iii in range(1, len(idx) + 1):
for iii in range(len(idx)):
#space_xy(i,(all_id(idx(iii))-RD_start)*2+1:(all_id(idx(iii))-RD_start)*2+2) = all_cen(idx(iii),:)
space_xy[queryFrameQuad, \
(all_id[idx[iii]] - RD_start) * 2: (all_id[idx[iii] - 1] - RD_start) * 2 + 1] = \
all_cen[idx[iii], :];
#hh=hist(all_id(idx),RD_start:RD_end);
# It has to be an np.array because we multiply it with a scalar
histoRange = np.array(range(RD_start, RD_end + 1));
hh = Matlab.hist(x=all_id[idx], binCenters=histoRange);
#if False:
#if True:
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"hh = %s" % (str(hh)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"hh.shape = %s" % (str(hh.shape)));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"all_id = %s" % (str(all_id)));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"all_id.shape = %s" % (str(all_id.shape)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"idx = %s" % (str(idx)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"idx.shape = %s" % (str(idx.shape)));
# % nz can be computed more optimally
#nz=find(hh~=0); # nz can be computed more optimally
# np.nonzero() always returns a tuple, even if it contains 1 element since hh has only 1 dimension
nz = np.nonzero(hh != 0)[0];
#if False:
if common.MY_DEBUG_STDOUT:
common.DebugPrint("multiscale_quad_retrieval(): " \
"nz = %s" % (str(nz)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"nz.shape = %s" % (str(nz.shape)));
#if numel(nz) > 0
if nz.size > 0:
#%%----text-retrieval-like
#votes(nz, tol_i) = votes(nz, tol_i) + log10(length(RD) / (length(nz)))^2 #PREVIOUSLY
#myVal = pow(math.log10(float(len(RD)) / len(nz)), 2);
myVal = pow(math.log10(float(len(r_harlocs)) / len(nz)), 2);
"""
try:
myVal = pow(math.log10(float(len(r_harlocs)) / len(nz)), 2);
except:
print("Error: len=%d len(nz)=%d nz.size=%d" % \
(len(r_harlocs), len(nz), nz.size));
common.DebugPrintErrorTrace();
"""
#if False:
if common.MY_DEBUG_STDOUT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"len(RD) = %d" % len(RD));
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"len(r_harlocs) = %d" % len(r_harlocs));
common.DebugPrint("multiscale_quad_retrieval(): " \
"len(nz) = %d" % len(nz));
common.DebugPrint("multiscale_quad_retrieval(): " \
"myVal = %.5f" % myVal);
# PREVIOUSLY
votes[nz, tol_i] = votes[nz, tol_i] + myVal;
# votes(nz)=votes(nz)+log10(length(RD)/(length(nz)));
# votes(nz)=votes(nz)+1;
if common.MY_DEBUG_STDOUT and DBGPRINT:
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"Votes_space.shape = %s" % (str(Votes_space.shape)));
common.DebugPrint("multiscale_quad_retrieval(): " \
"votes.shape = %s" % (str(votes.shape)));
"""
print("multiscale_quad_retrieval(): " \
"votes.shape = %s" % (str(votes.shape)));
if (np.abs(votes) < 1.0e-10).all():
print( \
"multiscale_quad_retrieval(): votes = 0 (all zeros)");
else:
print("multiscale_quad_retrieval(): " \
"votes = %s" % (str(votes)));
#Votes_space(:,q)=votes;
# Gives: "ValueError: output operand requires a reduction, but reduction is not enabled"
#Votes_space[:, queryFrame - 1] = votes;
# Note: since votes is basically a 1-D vector, we don't use the Fortran order
Votes_space[:, queryFrame] = np.ravel(votes); # order="F");
if cropflag == 0:
HH[:, queryFrame] = 1;
else:
"""
HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
qcen, len(RD), st_threshold, cropflag);
"""
HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
qcen, len(r_harlocs), st_threshold, cropflag);
if common.MY_DEBUG_STDOUT and DBGPRINT:
print("multiscale_quad_retrieval(scale_index=%d): " \
"Votes_space =\n%s" % (scale_index, str(Votes_space)));
try:
np.savez_compressed("Votes_space%d" % scale_index, Votes_space);
np.savez_compressed("HH%d" % scale_index, HH);
except:
common.DebugPrintErrorTrace();
t2 = float(cv2.getTickCount());
myTime = (t2 - t1) / cv2.getTickFrequency();
print("multiscale_quad_retrieval() took %.6f [sec]" % myTime);
"""
common.DebugPrint("multiscale_quad_retrieval(): " \
"%d corresponding frames retrieved in %.6f secs" % \
(len(q_harlocs), myTime));
"""
return Votes_space, HH;
| 45.45283
| 195
| 0.480988
|
68b8c7b98c246d93e310c070b66e5e1f6e2939e4
| 4,118
|
py
|
Python
|
var/spack/repos/builtin/packages/trinity/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-09-15T23:55:48.000Z
|
2019-09-15T23:55:48.000Z
|
var/spack/repos/builtin/packages/trinity/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/trinity/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2017-01-21T17:19:32.000Z
|
2017-01-21T17:19:32.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Trinity(MakefilePackage):
"""Trinity, developed at the Broad Institute and the Hebrew University of
Jerusalem, represents a novel method for the efficient and robust de
novo reconstruction of transcriptomes from RNA-seq data. Trinity
combines three independent software modules: Inchworm, Chrysalis, and
Butterfly, applied sequentially to process large volumes of RNA-seq
reads. Trinity partitions the sequence data into many individual de
Bruijn graphs, each representing the transcriptional complexity at a
given gene or locus, and then processes each graph independently to
extract full-length splicing isoforms and to tease apart transcripts
derived from paralogous genes.
"""
homepage = "http://trinityrnaseq.github.io/"
url = "https://github.com/trinityrnaseq/trinityrnaseq/archive/Trinity-v2.6.6.tar.gz"
version('2.6.6', sha256='868dfadeefaf2d3c6150a88d5e86fbc09466d69bbf4a65f70b4f5a7485668984')
depends_on("java@8:", type=("build", "run"))
depends_on("bowtie2")
depends_on("jellyfish")
depends_on("salmon")
depends_on("perl+threads", type=("build", "run"))
depends_on("autoconf", type="build")
depends_on("automake", type="build")
depends_on("libtool", type="build")
# There is no documented list of these deps, but they're in the Dockerfile
# and we have runtime errors without them
# https://github.com/trinityrnaseq/trinityrnaseq/blob/master/Docker/Dockerfile
depends_on("blast-plus", type="run")
depends_on("bowtie", type="run")
depends_on("r", type="run")
depends_on("r-tidyverse", type="run")
depends_on("r-edger", type="run")
depends_on("r-deseq2", type="run")
depends_on("r-ape", type="run")
depends_on("r-gplots", type="run")
depends_on("r-biobase", type="run")
depends_on("r-qvalue", type="run")
depends_on("rsem", type="run")
depends_on("kallisto", type="run")
depends_on("fastqc", type="run")
depends_on("samtools", type="run")
depends_on("py-numpy", type="run")
depends_on("express", type="run")
depends_on("perl-db-file", type="run")
depends_on("perl-uri", type="run")
depends_on("r-fastcluster", type="run")
depends_on("r-ctc", type="run")
depends_on("r-goseq", type="run")
depends_on("r-glimma", type="run")
depends_on("r-rots", type="run")
depends_on("r-goplot", type="run")
depends_on("r-argparse", type="run")
depends_on("r-sm", type="run")
def build(self, spec, prefix):
make()
make("trinity_essentials")
make("plugins")
def install(self, spec, prefix):
install_tree('.', prefix.bin)
force_remove(join_path(prefix.bin, '.gitmodules'))
force_remove(join_path(prefix.bin, 'Butterfly', '.err'))
force_remove(join_path(prefix.bin, 'Butterfly', 'src', '.classpath'))
force_remove(join_path(prefix.bin, 'Butterfly', 'src', '.err'))
force_remove(join_path(prefix.bin, 'Butterfly', 'src', '.project'))
remove_linked_tree(join_path(prefix.bin, 'Butterfly', 'src',
'.settings'))
remove_linked_tree(join_path(prefix.bin, 'Inchworm', 'src', '.deps'))
remove_linked_tree(join_path(prefix.bin, 'trinity-plugins',
'ParaFly-0.1.0', 'src', '.deps'))
force_remove(join_path(prefix.bin, 'trinity-plugins',
'seqtk-trinity-0.0.2', '.gitignore'))
force_remove(join_path(prefix.bin, 'trinity-plugins', 'slclust', 'bin',
'.hidden'))
def setup_build_environment(self, env):
env.append_flags('CXXFLAGS', self.compiler.openmp_flag)
def setup_run_environment(self, env):
env.set('TRINITY_HOME', self.prefix.bin)
env.prepend_path('PATH', self.prefix.bin.util)
| 43.808511
| 95
| 0.654444
|
0c7e4c094a7edb698c93923ce8ecc247c9b2b933
| 2,132
|
py
|
Python
|
rnbgrader/grids.py
|
matthew-brett/rnbgrader
|
f07494f59dd0d1cb97c094ac2ea9e9d1243f0f70
|
[
"BSD-2-Clause"
] | null | null | null |
rnbgrader/grids.py
|
matthew-brett/rnbgrader
|
f07494f59dd0d1cb97c094ac2ea9e9d1243f0f70
|
[
"BSD-2-Clause"
] | null | null | null |
rnbgrader/grids.py
|
matthew-brett/rnbgrader
|
f07494f59dd0d1cb97c094ac2ea9e9d1243f0f70
|
[
"BSD-2-Clause"
] | null | null | null |
""" Calculate answer, evaluated chunk score grids
A grid is a 2D array, with rows corresponding to answer, and columns
corresponding to evalulated chunks.
The array contains marks, where the evaluated chunk gets the corresponding
marks from the given answer.
An answer returns marks from an evaluated chunk. A evaluated chunk is the
association of (chunk, result).
"""
import numpy as np
def full_grid(answers, evaluated_chunks):
""" Calculate full grid of `answers` against `evaluated_chunks`.
A grid is a 2D array, with rows corresponding to answer, and columns
corresponding to evaluated chunks.
The array contains marks, where the evaluated chunk gets the corresponding
marks from the given answer.
Parameters
----------
answers : length N sequence of callables.
Sequence of callable objects, returning marks for given evaluated chunk
(see below).
evaluated_chunks : length P sequence of evaluated chunks
Sequence of EvaluatedChunk instances.
Returns
-------
grid : ndarray shape (N, P)
Array where rows correspond to answers and columns to evaluated chunks.
The value at ``grid[i, j]`` is the mark for result[j] on question[i]`.
"""
N = len(answers)
P = len(evaluated_chunks)
grid = np.zeros((N, P))
for i, answer in enumerate(answers):
for j, ev_chunk in enumerate(evaluated_chunks):
grid[i, j] = answer(ev_chunk)
return grid
def max_multi(grid):
""" Allow any evaluated chunk as answer to any question
Treat NaN values as zero.
Parameters
----------
grid : ndarray shape (N, P)
Array where rows correspond to answers and columns to evaluated chunks.
The value at ``grid[i, j]`` is the mark for result[j] on question[i]`.
Returns
-------
scores : array shape(N,) of float
Scores, that are scores for the chunks giving maximum score for each
answer.
"""
# Treat NaNs as zeros. Nansum will also do this for numpy >= 1.9
grid = np.array(grid)
grid[np.isnan(grid)] = 0
return np.max(grid, axis=1)
| 30.898551
| 79
| 0.671201
|
fb5ee45e18e4f604a2b809cd72f3d388c34d0351
| 5,034
|
py
|
Python
|
cytominer_eval/tests/test_operations/test_grit.py
|
hillsbury/cytominer-eval
|
56bd9e545d4ce5dea8c2d3897024a4eb241d06db
|
[
"BSD-3-Clause"
] | null | null | null |
cytominer_eval/tests/test_operations/test_grit.py
|
hillsbury/cytominer-eval
|
56bd9e545d4ce5dea8c2d3897024a4eb241d06db
|
[
"BSD-3-Clause"
] | null | null | null |
cytominer_eval/tests/test_operations/test_grit.py
|
hillsbury/cytominer-eval
|
56bd9e545d4ce5dea8c2d3897024a4eb241d06db
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import random
import pytest
import pathlib
import tempfile
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from sklearn.preprocessing import StandardScaler
from cytominer_eval.operations import grit
from cytominer_eval.transform import metric_melt
from cytominer_eval.transform.util import (
assert_melt,
set_pair_ids,
set_grit_column_info,
)
from cytominer_eval.operations.util import (
assign_replicates,
get_grit_entry,
calculate_grit,
)
# Load CRISPR dataset
example_file = "SQ00014610_normalized_feature_select.csv.gz"
example_file = pathlib.Path(
"{file}/../../example_data/gene/{eg}".format(
file=os.path.dirname(__file__), eg=example_file
)
)
df = pd.read_csv(example_file)
meta_features = [
x for x in df.columns if (x.startswith("Metadata_") or x.startswith("Image_"))
]
features = df.drop(meta_features, axis="columns").columns.tolist()
similarity_melted_df = metric_melt(
df=df,
features=features,
metadata_features=meta_features,
similarity_metric="pearson",
eval_metric="grit",
)
control_perts = ["Luc-2", "LacZ-2", "LacZ-3"]
replicate_id = "Metadata_pert_name"
group_id = "Metadata_gene_name"
pair_ids = set_pair_ids()
replicate_col_name = "{x}{suf}".format(
x=replicate_id, suf=pair_ids[list(pair_ids)[0]]["suffix"]
)
column_id_info = set_grit_column_info(replicate_id=replicate_id, group_id=group_id)
def test_get_grit_entry():
with pytest.raises(AssertionError) as ae:
result = get_grit_entry(df=similarity_melted_df, col=replicate_col_name)
assert "grit is calculated for each perturbation independently" in str(ae.value)
expected_result = "EMPTY"
similarity_subset_df = similarity_melted_df.query(
"Metadata_pert_name_pair_a == @expected_result"
)
result = get_grit_entry(df=similarity_subset_df, col=replicate_col_name)
assert result == expected_result
def test_calculate_grit():
result = assign_replicates(
similarity_melted_df=similarity_melted_df,
replicate_groups=[replicate_id, group_id],
)
assert_melt(result, eval_metric="grit")
example_group = result.groupby(replicate_col_name).get_group(name=("MTOR-2"))
# Perform the calculation!
grit_result = pd.DataFrame(
calculate_grit(
example_group, control_perts=control_perts, column_id_info=column_id_info
),
columns=["result"],
)
expected_result = {"perturbation": "MTOR-2", "group": "MTOR", "grit": 1.55075}
expected_result = pd.DataFrame(expected_result, index=["result"]).transpose()
assert_frame_equal(grit_result, expected_result, check_less_precise=True)
# Calculate grit will not work with singleton perturbations
# (no other perts in same group)
example_group = result.groupby(replicate_col_name).get_group(name=("AURKB-2"))
grit_result = pd.DataFrame(
calculate_grit(
example_group, control_perts=control_perts, column_id_info=column_id_info
),
columns=["result"],
)
expected_result = {"perturbation": "AURKB-2", "group": "AURKB", "grit": np.nan}
expected_result = pd.DataFrame(expected_result, index=["result"]).transpose()
assert_frame_equal(grit_result, expected_result, check_less_precise=True)
# Calculate grit will not work with the full dataframe
with pytest.raises(AssertionError) as ae:
result = calculate_grit(
similarity_melted_df,
control_perts=control_perts,
column_id_info=column_id_info,
)
assert "grit is calculated for each perturbation independently" in str(ae.value)
# Calculate grit will not work with when control barcodes are missing
with pytest.raises(AssertionError) as ae:
result = calculate_grit(
example_group,
control_perts=["DOES NOT EXIST", "THIS ONE NEITHER"],
column_id_info=column_id_info,
)
assert "Error! No control perturbations found." in str(ae.value)
def test_grit():
result = grit(
similarity_melted_df=similarity_melted_df,
control_perts=control_perts,
replicate_id=replicate_id,
group_id=group_id,
).sort_values(by="grit")
assert all([x in result.columns for x in ["perturbation", "group", "grit"]])
top_result = pd.DataFrame(
result.sort_values(by="grit", ascending=False)
.reset_index(drop=True)
.iloc[0, :],
)
expected_result = {"perturbation": "PTK2-2", "group": "PTK2", "grit": 4.61094}
expected_result = pd.DataFrame(expected_result, index=[0]).transpose()
assert_frame_equal(top_result, expected_result, check_less_precise=True)
# There are six singletons in this dataset
assert result.grit.isna().sum() == 6
# No perturbations should be duplicated
assert result.perturbation.duplicated().sum() == 0
# With this data, we do not expect the sum of grit to change
assert np.round(result.grit.sum(), 0) == 152.0
| 31.4625
| 85
| 0.708979
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.