hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
51bf6c172ef68bea7970866713cd4c84a6e48a1e | 903 | py | Python | tests/conftest.py | SimmonsRitchie/topojoin | 5c5848ae05a249c38babdc449dbd89df1b644b61 | [
"MIT"
] | 1 | 2020-08-04T04:36:05.000Z | 2020-08-04T04:36:05.000Z | tests/conftest.py | SimmonsRitchie/topojoin | 5c5848ae05a249c38babdc449dbd89df1b644b61 | [
"MIT"
] | null | null | null | tests/conftest.py | SimmonsRitchie/topojoin | 5c5848ae05a249c38babdc449dbd89df1b644b61 | [
"MIT"
] | null | null | null | import pytest
from pathlib import Path
root = Path(__file__).parent.resolve()
@pytest.fixture(scope="session")
def csv_path() -> str:
"""
Returns a path to a CSV file. The file has a column called 'fips' representing unique county IDs. It joins
with a column called 'GEOID' in the topojson file at topo_path.
"""
return str(root / "fixtures/pa-county-pop.csv")
@pytest.fixture(scope="session")
def csv_path_non_matching():
"""
Returns a path to a CSV file. The file has a column called 'fips' representing unique county IDs. It joins
with a column called 'GEOID' in the topojson file at topo_path. However, this file is missing rows so it will not
match cleanly with the topojson.
"""
return str(root / "fixtures/pa-county-pop__non-matching-rows.csv")
@pytest.fixture(scope="session")
| 30.1 | 117 | 0.707641 | import pytest
from pathlib import Path
root = Path(__file__).parent.resolve()
@pytest.fixture(scope="session")
def csv_path() -> str:
"""
Returns a path to a CSV file. The file has a column called 'fips' representing unique county IDs. It joins
with a column called 'GEOID' in the topojson file at topo_path.
"""
return str(root / "fixtures/pa-county-pop.csv")
@pytest.fixture(scope="session")
def csv_path_non_matching():
"""
Returns a path to a CSV file. The file has a column called 'fips' representing unique county IDs. It joins
with a column called 'GEOID' in the topojson file at topo_path. However, this file is missing rows so it will not
match cleanly with the topojson.
"""
return str(root / "fixtures/pa-county-pop__non-matching-rows.csv")
@pytest.fixture(scope="session")
def topo_path():
return str(root / "fixtures/pa-county.json")
| 44 | 0 | 22 |
118a87702ee3127da4c1f6e2158bb95a97d28155 | 4,605 | py | Python | examples/plot_2_mne_feature_distributions.py | ryanhammonds/bycycle | c285c5b1bf5de985cea3f0898bf8e2b01171feca | [
"Apache-2.0"
] | 48 | 2019-03-04T22:37:15.000Z | 2022-03-28T16:55:52.000Z | examples/plot_2_mne_feature_distributions.py | ryanhammonds/bycycle | c285c5b1bf5de985cea3f0898bf8e2b01171feca | [
"Apache-2.0"
] | 83 | 2019-02-01T19:09:23.000Z | 2022-01-10T20:27:29.000Z | examples/plot_2_mne_feature_distributions.py | ryanhammonds/bycycle | c285c5b1bf5de985cea3f0898bf8e2b01171feca | [
"Apache-2.0"
] | 15 | 2019-06-04T23:22:37.000Z | 2021-12-21T07:49:31.000Z | """
3. MNE Interface Cycle Feature Distributions
============================================
Compute bycycle feature distributions using MNE objects.
"""
####################################################################################################
# Import Packages and Load Data
# -----------------------------
#
# First let's import the packages we need. This example depends on mne.
####################################################################################################
import numpy as np
import matplotlib.pyplot as plt
from mne.io import read_raw_fif
from mne.datasets import sample
from mne import pick_channels
from neurodsp.plts import plot_time_series
from bycycle.group import compute_features_2d
from bycycle.plts import plot_feature_hist
####################################################################################################
# Frequencies of interest: the alpha band
f_alpha = (8, 15)
# Get the data path for the MNE example data
raw_fname = sample.data_path() + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Load the file of example MNE data
raw = read_raw_fif(raw_fname, preload=True, verbose=False)
# Select EEG channels from the dataset
raw = raw.pick_types(meg=False, eeg=True, eog=False, exclude='bads')
# Grab the sampling rate from the data
fs = raw.info['sfreq']
# filter to alpha
raw = raw.filter(l_freq=None, h_freq=20.)
# Settings for exploring example channels of data
chs = ['EEG 042', 'EEG 043', 'EEG 044']
t_start = 20000
t_stop = int(t_start + (10 * fs))
# Extract an example channels to explore
sigs, times = raw.get_data(pick_channels(raw.ch_names, chs),
start=t_start, stop=t_stop, return_times=True)
####################################################################################################
#
# Plot time series for each recording
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Now let's see how each signal looks in time. This looks like standard EEG
# data.
#
####################################################################################################
# Plot the signal
plot_time_series(times, [sig * 1e6 for sig in sigs], labels=chs, title='EEG Signal')
####################################################################################################
# Compute cycle-by-cycle features
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Here we use the bycycle compute_features function to compute the cycle-by-
# cycle features of the three signals.
#
####################################################################################################
# Set parameters for defining oscillatory bursts
threshold_kwargs = {'amp_fraction_threshold': 0.3,
'amp_consistency_threshold': 0.4,
'period_consistency_threshold': 0.5,
'monotonicity_threshold': 0.8,
'min_n_cycles': 3}
# Create a dictionary of cycle feature dataframes, corresponding to each channel
kwargs = dict(threshold_kwargs=threshold_kwargs, center_extrema='trough')
dfs = compute_features_2d(sigs, fs, f_alpha, axis=0,
compute_features_kwargs=kwargs)
dfs = {ch: df for df, ch in zip(dfs, chs)}
####################################################################################################
#
# Plot feature distributions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# As it turns out, none of the channels in the mne example audio and visual
# task has waveform asymmetry. These data were collected from a healthy
# person while they listened to beeps or saw gratings on a screen
# so this is not unexpected.
#
####################################################################################################
fig, axes = plt.subplots(figsize=(15, 15), nrows=2, ncols=2)
for ch, df in dfs.items():
# Rescale amplitude and period features
df['volt_amp'] = df['volt_amp'] * 1e6
df['period'] = df['period'] / fs * 1000
# Plot feature histograms
plot_feature_hist(df, 'volt_amp', only_bursts=False, ax=axes[0][0], label=ch,
xlabel='Cycle amplitude (mV)', bins=np.arange(0, 40, 4))
plot_feature_hist(df, 'period', only_bursts=False, ax=axes[0][1], label=ch,
xlabel='Cycle period (ms)', bins=np.arange(0, 250, 25))
plot_feature_hist(df, 'time_rdsym', only_bursts=False, ax=axes[1][0], label=ch,
xlabel='Rise-decay asymmetry', bins=np.arange(0, 1, .1))
plot_feature_hist(df, 'time_ptsym', only_bursts=False, ax=axes[1][1], label=ch,
xlabel='Peak-trough asymmetry', bins=np.arange(0, 1, .1))
| 35.976563 | 100 | 0.529859 | """
3. MNE Interface Cycle Feature Distributions
============================================
Compute bycycle feature distributions using MNE objects.
"""
####################################################################################################
# Import Packages and Load Data
# -----------------------------
#
# First let's import the packages we need. This example depends on mne.
####################################################################################################
import numpy as np
import matplotlib.pyplot as plt
from mne.io import read_raw_fif
from mne.datasets import sample
from mne import pick_channels
from neurodsp.plts import plot_time_series
from bycycle.group import compute_features_2d
from bycycle.plts import plot_feature_hist
####################################################################################################
# Frequencies of interest: the alpha band
f_alpha = (8, 15)
# Get the data path for the MNE example data
raw_fname = sample.data_path() + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Load the file of example MNE data
raw = read_raw_fif(raw_fname, preload=True, verbose=False)
# Select EEG channels from the dataset
raw = raw.pick_types(meg=False, eeg=True, eog=False, exclude='bads')
# Grab the sampling rate from the data
fs = raw.info['sfreq']
# filter to alpha
raw = raw.filter(l_freq=None, h_freq=20.)
# Settings for exploring example channels of data
chs = ['EEG 042', 'EEG 043', 'EEG 044']
t_start = 20000
t_stop = int(t_start + (10 * fs))
# Extract an example channels to explore
sigs, times = raw.get_data(pick_channels(raw.ch_names, chs),
start=t_start, stop=t_stop, return_times=True)
####################################################################################################
#
# Plot time series for each recording
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Now let's see how each signal looks in time. This looks like standard EEG
# data.
#
####################################################################################################
# Plot the signal
plot_time_series(times, [sig * 1e6 for sig in sigs], labels=chs, title='EEG Signal')
####################################################################################################
# Compute cycle-by-cycle features
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Here we use the bycycle compute_features function to compute the cycle-by-
# cycle features of the three signals.
#
####################################################################################################
# Set parameters for defining oscillatory bursts
threshold_kwargs = {'amp_fraction_threshold': 0.3,
'amp_consistency_threshold': 0.4,
'period_consistency_threshold': 0.5,
'monotonicity_threshold': 0.8,
'min_n_cycles': 3}
# Create a dictionary of cycle feature dataframes, corresponding to each channel
kwargs = dict(threshold_kwargs=threshold_kwargs, center_extrema='trough')
dfs = compute_features_2d(sigs, fs, f_alpha, axis=0,
compute_features_kwargs=kwargs)
dfs = {ch: df for df, ch in zip(dfs, chs)}
####################################################################################################
#
# Plot feature distributions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# As it turns out, none of the channels in the mne example audio and visual
# task has waveform asymmetry. These data were collected from a healthy
# person while they listened to beeps or saw gratings on a screen
# so this is not unexpected.
#
####################################################################################################
fig, axes = plt.subplots(figsize=(15, 15), nrows=2, ncols=2)
for ch, df in dfs.items():
# Rescale amplitude and period features
df['volt_amp'] = df['volt_amp'] * 1e6
df['period'] = df['period'] / fs * 1000
# Plot feature histograms
plot_feature_hist(df, 'volt_amp', only_bursts=False, ax=axes[0][0], label=ch,
xlabel='Cycle amplitude (mV)', bins=np.arange(0, 40, 4))
plot_feature_hist(df, 'period', only_bursts=False, ax=axes[0][1], label=ch,
xlabel='Cycle period (ms)', bins=np.arange(0, 250, 25))
plot_feature_hist(df, 'time_rdsym', only_bursts=False, ax=axes[1][0], label=ch,
xlabel='Rise-decay asymmetry', bins=np.arange(0, 1, .1))
plot_feature_hist(df, 'time_ptsym', only_bursts=False, ax=axes[1][1], label=ch,
xlabel='Peak-trough asymmetry', bins=np.arange(0, 1, .1))
| 0 | 0 | 0 |
1577e6530be911e68c9eb3af07ca1ad157afab06 | 2,358 | py | Python | recipes/Python/101521_Using_translate_wbinary_files__like_finding/recipe-101521.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/101521_Using_translate_wbinary_files__like_finding/recipe-101521.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/101521_Using_translate_wbinary_files__like_finding/recipe-101521.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | import string
#Translate in python has 2 pieces, a translation table and the translate call.
#The translation table is a list of 256 characters. Changing the order of the #characters is used for mapping
norm = string.maketrans('', '') #builds list of all characters
print len(norm) #256 characters
print string.maketrans('', '')[100] #is the letter d
print string.maketrans('', '')[101] #is the letter e
print string.maketrans('d','e')[100] #is now also the letter e
#The second piece of translate, is the translate function itself.
#The translate function has 3 parts:
#1)string to translate
#2)translation table -- always required
#3)deletion list
#Let's start simple and build
#use translate to get groups of characters
#This can be done because translate's 3rd arg is to delete characters
#build list of all characters
norm = string.maketrans('', '')
#delete letters
non_letters = string.translate(norm, norm, string.letters)
#then take the list of non_letters and remove digits
non_alnum = string.translate(non_letters, all_chars, string.digits)
#You'll notice the length shrinks appropriately as we delete
print len(all_chars),'\t256-(26*2 letters)=',len(non_letters),'\t204-10 digits=',len(non_alnum)
#Norm is a handy list to have around if all you are going to do is delete
#characters. It would be nice if translate assumed Norm if the translation table arg was null.
#To translate all non-text to a '#', you have to have a one to one mapping for #each character in translate.
#Thus we make use of the python * operator to make a string of '#'
#of the appropriate length
trans_nontext=string.maketrans(non_alnum,'#'*len(non_alnum))
#A full program to examine strings in a binary file for Regents
# would look like this. We use regular expressions to convert all groups
# of '#' to a single '#'
import string,re
norm = string.maketrans('', '') #builds list of all characters
non_alnum = string.translate(norm, norm, string.letters+string.digits)
#now examine the binary file. If Regents is in it. It contains the copyright
ftp_file=open('f:/tmp/ftp.exe','rb').read()
trans_nontext=string.maketrans(non_alnum,'#'*len(non_alnum))
cleaned=string.translate(ftp_file, trans_nontext)
for i in re.sub('#+','#',cleaned).split('#'):
if i.find('Regents')!=-1:
print 'found it!',i
break
if i>5:
print i
| 36.84375 | 109 | 0.736217 | import string
#Translate in python has 2 pieces, a translation table and the translate call.
#The translation table is a list of 256 characters. Changing the order of the #characters is used for mapping
norm = string.maketrans('', '') #builds list of all characters
print len(norm) #256 characters
print string.maketrans('', '')[100] #is the letter d
print string.maketrans('', '')[101] #is the letter e
print string.maketrans('d','e')[100] #is now also the letter e
#The second piece of translate, is the translate function itself.
#The translate function has 3 parts:
#1)string to translate
#2)translation table -- always required
#3)deletion list
#Let's start simple and build
#use translate to get groups of characters
#This can be done because translate's 3rd arg is to delete characters
#build list of all characters
norm = string.maketrans('', '')
#delete letters
non_letters = string.translate(norm, norm, string.letters)
#then take the list of non_letters and remove digits
non_alnum = string.translate(non_letters, all_chars, string.digits)
#You'll notice the length shrinks appropriately as we delete
print len(all_chars),'\t256-(26*2 letters)=',len(non_letters),'\t204-10 digits=',len(non_alnum)
#Norm is a handy list to have around if all you are going to do is delete
#characters. It would be nice if translate assumed Norm if the translation table arg was null.
#To translate all non-text to a '#', you have to have a one to one mapping for #each character in translate.
#Thus we make use of the python * operator to make a string of '#'
#of the appropriate length
trans_nontext=string.maketrans(non_alnum,'#'*len(non_alnum))
#A full program to examine strings in a binary file for Regents
# would look like this. We use regular expressions to convert all groups
# of '#' to a single '#'
import string,re
norm = string.maketrans('', '') #builds list of all characters
non_alnum = string.translate(norm, norm, string.letters+string.digits)
#now examine the binary file. If Regents is in it. It contains the copyright
ftp_file=open('f:/tmp/ftp.exe','rb').read()
trans_nontext=string.maketrans(non_alnum,'#'*len(non_alnum))
cleaned=string.translate(ftp_file, trans_nontext)
for i in re.sub('#+','#',cleaned).split('#'):
if i.find('Regents')!=-1:
print 'found it!',i
break
if i>5:
print i
| 0 | 0 | 0 |
fe18c96add59a11529a5587939eebb7d941428ad | 131 | py | Python | manti_by/apps/gallery/urls.py | manti-by/m2 | ee2d2bad412c265962675c94dbfd29cdec07910c | [
"BSD-3-Clause"
] | 2 | 2017-09-07T09:28:29.000Z | 2018-04-10T03:03:32.000Z | manti_by/apps/gallery/urls.py | manti-by/m2 | ee2d2bad412c265962675c94dbfd29cdec07910c | [
"BSD-3-Clause"
] | 11 | 2021-03-23T13:59:39.000Z | 2022-02-02T10:16:58.000Z | manti_by/apps/gallery/urls.py | manti-by/Manti.by | ee2d2bad412c265962675c94dbfd29cdec07910c | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from manti_by.apps.gallery import views
urlpatterns = [path("", views.index, name="gallery_list")]
| 18.714286 | 58 | 0.755725 | from django.urls import path
from manti_by.apps.gallery import views
urlpatterns = [path("", views.index, name="gallery_list")]
| 0 | 0 | 0 |
91b3caaa30001b46485a1441a222116f60bbc77c | 4,615 | py | Python | hydra/core/config_store.py | mshvartsman/hydra | 65057c36c05187fdadd07db4f4b4e086a22fc030 | [
"MIT"
] | 1 | 2020-07-13T09:06:16.000Z | 2020-07-13T09:06:16.000Z | hydra/core/config_store.py | mshvartsman/hydra | 65057c36c05187fdadd07db4f4b4e086a22fc030 | [
"MIT"
] | 6 | 2021-03-01T21:23:23.000Z | 2022-02-27T09:15:03.000Z | hydra/core/config_store.py | mshvartsman/hydra | 65057c36c05187fdadd07db4f4b4e086a22fc030 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from omegaconf import DictConfig, OmegaConf
from hydra.core.object_type import ObjectType
from hydra.core.singleton import Singleton
from hydra.plugins.config_source import ConfigLoadError
@dataclass
| 30.163399 | 86 | 0.573781 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from omegaconf import DictConfig, OmegaConf
from hydra.core.object_type import ObjectType
from hydra.core.singleton import Singleton
from hydra.plugins.config_source import ConfigLoadError
class ConfigStoreWithProvider:
def __init__(self, provider: str) -> None:
self.provider = provider
def __enter__(self) -> "ConfigStoreWithProvider":
return self
def store(
self,
name: str,
node: Any,
group: Optional[str] = None,
package: Optional[str] = None,
) -> None:
ConfigStore.instance().store(
group=group, name=name, node=node, package=package, provider=self.provider
)
def __exit__(self, exc_type: Any, exc_value: Any, exc_traceback: Any) -> Any:
...
@dataclass
class ConfigNode:
name: str
node: DictConfig
group: Optional[str]
package: Optional[str]
provider: Optional[str]
class ConfigStore(metaclass=Singleton):
@staticmethod
def instance(*args: Any, **kwargs: Any) -> "ConfigStore":
return Singleton.instance(ConfigStore, *args, **kwargs) # type: ignore
repo: Dict[str, Any]
def __init__(self) -> None:
self.repo = {}
def store(
self,
name: str,
node: Any,
group: Optional[str] = None,
package: Optional[str] = "_group_",
provider: Optional[str] = None,
) -> None:
"""
Stores a config node into the repository
:param name: config name
:param node: config node, can be DictConfig, ListConfig,
Structured configs and even dict and list
:param group: config group, subgroup separator is '/',
for example hydra/launcher
:param package: Config node parent hierarchy.
Child separator is '.', for example foo.bar.baz
:param provider: the name of the module/app providing this config.
Helps debugging.
"""
cur = self.repo
if group is not None:
for d in group.split("/"):
if d not in cur:
cur[d] = {}
cur = cur[d]
if not name.endswith(".yaml"):
name = f"{name}.yaml"
assert isinstance(cur, dict)
cfg = OmegaConf.structured(node)
cur[name] = ConfigNode(
name=name, node=cfg, group=group, package=package, provider=provider,
)
def load(self, config_path: str) -> ConfigNode:
ret = self._load(config_path)
# shallow copy to avoid changing the original stored ConfigNode
ret = copy.copy(ret)
assert isinstance(ret, ConfigNode)
# copy to avoid mutations to config effecting subsequent calls
ret.node = copy.deepcopy(ret.node)
return ret
def _load(self, config_path: str) -> ConfigNode:
idx = config_path.rfind("/")
if idx == -1:
ret = self._open(config_path)
if ret is None:
raise ConfigLoadError(f"Structured config not found {config_path}")
assert isinstance(ret, ConfigNode)
return ret
else:
path = config_path[0:idx]
name = config_path[idx + 1 :]
d = self._open(path)
if d is None or not isinstance(d, dict):
raise ConfigLoadError(f"Structured config not found {config_path}")
if name not in d:
raise ConfigLoadError(
f"Structured config {name} not found in {config_path}"
)
ret = d[name]
assert isinstance(ret, ConfigNode)
return ret
def get_type(self, path: str) -> ObjectType:
d = self._open(path)
if d is None:
return ObjectType.NOT_FOUND
if isinstance(d, dict):
return ObjectType.GROUP
else:
return ObjectType.CONFIG
def list(self, path: str) -> List[str]:
d = self._open(path)
if d is None:
raise IOError(f"Path not found {path}")
if not isinstance(d, dict):
raise IOError(f"Path points to a file : {path}")
return sorted(d.keys())
def _open(self, path: str) -> Any:
d: Any = self.repo
for frag in path.split("/"):
if frag == "":
continue
if frag in d:
d = d[frag]
else:
return None
return d
| 2,489 | 1,583 | 175 |
d3ef1724b8bc12f6a1aa9f8f8073ede202d7c53c | 3,452 | py | Python | airflow/providers/google/cloud/sensors/looker.py | JGoldman110/airflow | 93e2c945b1be5b7c9700e780d2aa67846503763b | [
"Apache-2.0"
] | 1 | 2022-03-25T23:49:03.000Z | 2022-03-25T23:49:03.000Z | airflow/providers/google/cloud/sensors/looker.py | JGoldman110/airflow | 93e2c945b1be5b7c9700e780d2aa67846503763b | [
"Apache-2.0"
] | 2 | 2019-02-16T19:00:53.000Z | 2019-05-09T23:29:14.000Z | airflow/providers/google/cloud/sensors/looker.py | samhita-alla/airflow | 5b8c3819900793f6530a7313a05a181edf86f224 | [
"Apache-2.0"
] | 1 | 2022-03-03T18:47:49.000Z | 2022-03-03T18:47:49.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains Google Cloud Looker sensors."""
from typing import TYPE_CHECKING, Optional
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.looker import JobStatus, LookerHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class LookerCheckPdtBuildSensor(BaseSensorOperator):
"""
Check for the state of a previously submitted PDT materialization job.
:param materialization_id: Required. The materialization job ID to poll. (templated)
:param looker_conn_id: Required. The connection ID to use connecting to Looker.
:param cancel_on_kill: Optional. Flag which indicates whether cancel the hook's job or not,
when on_kill is called.
"""
template_fields = ["materialization_id"]
| 40.611765 | 110 | 0.704519 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains Google Cloud Looker sensors."""
from typing import TYPE_CHECKING, Optional
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.looker import JobStatus, LookerHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class LookerCheckPdtBuildSensor(BaseSensorOperator):
"""
Check for the state of a previously submitted PDT materialization job.
:param materialization_id: Required. The materialization job ID to poll. (templated)
:param looker_conn_id: Required. The connection ID to use connecting to Looker.
:param cancel_on_kill: Optional. Flag which indicates whether cancel the hook's job or not,
when on_kill is called.
"""
template_fields = ["materialization_id"]
def __init__(
self, materialization_id: str, looker_conn_id: str, cancel_on_kill: bool = True, **kwargs
) -> None:
super().__init__(**kwargs)
self.materialization_id = materialization_id
self.looker_conn_id = looker_conn_id
self.cancel_on_kill = cancel_on_kill
self.hook: Optional[LookerHook] = None
def poke(self, context: "Context") -> bool:
self.hook = LookerHook(looker_conn_id=self.looker_conn_id)
# materialization_id is templated var pulling output from start task
status_dict = self.hook.pdt_build_status(materialization_id=self.materialization_id)
status = status_dict['status']
if status == JobStatus.ERROR.value:
msg = status_dict["message"]
raise AirflowException(
f'PDT materialization job failed. Job id: {self.materialization_id}. Message:\n"{msg}"'
)
elif status == JobStatus.CANCELLED.value:
raise AirflowException(
f'PDT materialization job was cancelled. Job id: {self.materialization_id}.'
)
elif status == JobStatus.UNKNOWN.value:
raise AirflowException(
f'PDT materialization job has unknown status. Job id: {self.materialization_id}.'
)
elif status == JobStatus.DONE.value:
self.log.debug(
"PDT materialization job completed successfully. Job id: %s.", self.materialization_id
)
return True
self.log.info("Waiting for PDT materialization job to complete. Job id: %s.", self.materialization_id)
return False
def on_kill(self):
if self.materialization_id and self.cancel_on_kill:
self.hook.stop_pdt_build(materialization_id=self.materialization_id)
| 1,744 | 0 | 81 |
68b5756d00b8538620dba0ede613869bdfbb77a0 | 23,457 | py | Python | tests/bindings/python/test_annotator.py | awickens/libcellml | 1fe0aecc9651285dfa6b9d43c0b45edf1378edf5 | [
"Apache-2.0"
] | 1 | 2020-11-16T05:43:00.000Z | 2020-11-16T05:43:00.000Z | tests/bindings/python/test_annotator.py | awickens/libcellml | 1fe0aecc9651285dfa6b9d43c0b45edf1378edf5 | [
"Apache-2.0"
] | 16 | 2019-11-29T11:36:30.000Z | 2021-03-08T23:59:14.000Z | tests/bindings/python/test_annotator.py | kerimoyle/libcellml | 63b677e4b22c28a7dcb69513df1495f0f8eccefa | [
"Apache-2.0"
] | null | null | null | #
# Tests the Component class bindings
#
import unittest
from test_resources import file_contents
if __name__ == '__main__':
unittest.main()
| 43.278598 | 119 | 0.629236 | #
# Tests the Component class bindings
#
import unittest
from test_resources import file_contents
class AnnotatorTestCase(unittest.TestCase):
def test_create_destroy(self):
from libcellml import Annotator
x = Annotator()
self.assertIsNotNone(x)
del x
def test_item(self):
from libcellml import Annotator, CellmlElementType, Model, Parser
annotator = Annotator()
model = Model()
parser = Parser()
model_string = file_contents("annotator/unique_ids.cellml")
model = parser.parseModel(model_string)
annotator.setModel(model)
self.assertEqual(CellmlElementType.UNDEFINED, annotator.item("not_an_id")[0])
self.assertEqual(CellmlElementType.UNDEFINED, annotator.item("not_an_id", 3)[0])
self.assertEqual(CellmlElementType.MAP_VARIABLES, annotator.item("map_variables_2")[0])
# For coverage purposes only.
annotator._itemCellmlElement("not_an_id", 0)
def test_type_based_retrieval(self):
from libcellml import Annotator, Model, Parser
annotator = Annotator()
model = Model()
parser = Parser()
model_string = file_contents("annotator/unique_ids.cellml")
model = parser.parseModel(model_string)
annotator.setModel(model)
v1v1 = (model.component("component2").variable("variable1"), model.component(
"component2").component("component3").variable("variable1"))
v2v2 = (model.component("component2").variable("variable2"), model.component(
"component2").component("component3").variable("variable2"))
self.assertEqual(model.name(), annotator.model("model_1").name())
self.assertEqual(model.name(), annotator.encapsulation("encapsulation_1").name())
self.assertEqual(model.component("component1").name(),
annotator.component("component_1").name())
self.assertEqual(model.component("component2").name(),
annotator.component("component_2").name())
self.assertEqual(model.component("component2").name(),
annotator.component("component_ref_1").name())
self.assertEqual(model.component("component2").component("component3").name(),
annotator.component("component_3").name())
self.assertEqual(model.component("component2").component("component3").name(),
annotator.componentRef("component_ref_2").name())
self.assertEqual(model.component("component1").importSource().url(),
annotator.importSource("import_1").url())
self.assertEqual(model.units("units1").name(),
annotator.units("units_1").name())
self.assertEqual(model.units("units1").importSource().url(),
annotator.importSource("import_2").url())
self.assertEqual(model.units("units2").name(),
annotator.units("units_2").name())
self.assertEqual(model.units("units2").name(),
annotator.unit("unit_1").units().name())
self.assertEqual(0, annotator.unit("unit_1").index())
self.assertEqual(model.component("component2").variable("variable1").name(),
annotator.variable("variable_1").name())
self.assertEqual(model.component("component2").variable("variable2").name(),
annotator.variable("variable_2").name())
self.assertEqual(model.component("component2").reset(0).variable().name(),
annotator.reset("reset_1").variable().name())
self.assertEqual(model.component("component2").reset(0).testVariable().name(),
annotator.reset("reset_1").testVariable().name())
self.assertEqual(model.component("component2").reset(0).testValue(),
annotator.testValue("test_value_1").testValue())
self.assertEqual(model.component("component2").reset(0).resetValue(),
annotator.resetValue("reset_value_1").resetValue())
self.assertEqual(model.component("component2").component("component3").variable("variable1").name(),
annotator.variable("variable_3").name())
self.assertEqual(model.component("component2").component("component3").variable("variable2").name(),
annotator.variable("variable_4").name())
self.assertEqual(v1v1[0].name(),
annotator.connection("connection_1").variable1().name())
self.assertEqual(v1v1[1].name(),
annotator.connection("connection_1").variable2().name())
self.assertEqual(v1v1[0].name(),
annotator.mapVariables("map_variables_1").variable1().name())
self.assertEqual(v1v1[1].name(),
annotator.mapVariables("map_variables_1").variable2().name())
self.assertEqual(v2v2[0].name(),
annotator.mapVariables("map_variables_2").variable1().name())
self.assertEqual(v2v2[1].name(),
annotator.mapVariables("map_variables_2").variable2().name())
self.assertIsNone(annotator.model("i_dont_exist"))
self.assertIsNone(annotator.component("i_dont_exist"))
self.assertIsNone(annotator.variable("i_dont_exist"))
self.assertIsNone(annotator.units("i_dont_exist"))
self.assertIsNone(annotator.unit("i_dont_exist"))
self.assertIsNone(annotator.reset("i_dont_exist"))
self.assertIsNone(annotator.resetValue("i_dont_exist"))
self.assertIsNone(annotator.testValue("i_dont_exist"))
self.assertIsNone(annotator.componentRef("i_dont_exist"))
self.assertIsNone(annotator.connection("i_dont_exist"))
self.assertIsNone(annotator.importSource("i_dont_exist"))
def test_ids(self):
from libcellml import Annotator, Parser
annotator = Annotator()
parser = Parser()
model = parser.parseModel(file_contents("annotator/unique_ids.cellml"))
annotator.setModel(model)
self.assertEqual(24, len(annotator.ids()))
def test_duplicate_count(self):
from libcellml import Annotator, Parser
annotator = Annotator()
parser = Parser()
model = parser.parseModel(file_contents("annotator/lots_of_duplicate_ids.cellml"))
annotator.setModel(model)
self.assertEqual(8, annotator.itemCount("duplicateId1"))
self.assertEqual(7, annotator.itemCount("duplicateId3"))
def test_has_model(self):
from libcellml import Annotator, Parser
annotator = Annotator()
parser = Parser()
self.assertFalse(annotator.hasModel())
model = parser.parseModel(file_contents("annotator/unique_ids.cellml"))
annotator.setModel(model)
self.assertTrue(annotator.hasModel())
def test_is_unique(self):
from libcellml import Annotator, Parser
annotator = Annotator()
parser = Parser()
model = parser.parseModel(file_contents("annotator/unique_ids.cellml"))
annotator.setModel(model)
self.assertTrue(annotator.isUnique("variable_3"))
model = parser.parseModel(file_contents("annotator/lots_of_duplicate_ids.cellml"))
annotator.setModel(model)
self.assertFalse(annotator.isUnique("duplicateId2"))
def test_assign_by_type(self):
from libcellml import Annotator, Parser, Variable
from libcellml import Unit, VariablePair
annotator = Annotator()
parser = Parser()
model_string = file_contents("annotator/no_ids.cellml")
model = parser.parseModel(model_string)
annotator.setModel(model)
c = model.component(0)
self.assertEqual("", model.id())
annotator.assignModelId(model)
self.assertEqual("b4da55", model.id())
self.assertEqual("", model.encapsulationId())
annotator.assignEncapsulationId(model)
self.assertEqual("b4da56", model.encapsulationId())
self.assertEqual("", c.id())
annotator.assignComponentId(c)
self.assertEqual("b4da57", c.id())
self.assertEqual("", c.encapsulationId())
annotator.assignComponentRefId(c)
self.assertEqual("b4da58", c.encapsulationId())
c2v1 = model.component("component2").variable("variable1")
c3v1 = model.component("component3").variable("variable1")
self.assertEqual("", Variable.equivalenceConnectionId(c2v1, c3v1))
annotator.assignConnectionId(VariablePair(c2v1, c3v1))
self.assertEqual("b4da59", Variable.equivalenceConnectionId(c2v1, c3v1))
self.assertEqual("", Variable.equivalenceMappingId(c2v1, c3v1))
annotator.assignMapVariablesId(VariablePair(c2v1, c3v1))
self.assertEqual("b4da5a", Variable.equivalenceMappingId(c2v1, c3v1))
c3v2 = model.component("component3").variable("variable2")
self.assertEqual("", c3v2.id())
annotator.assignVariableId(c3v2)
self.assertEqual("b4da5b", c3v2.id())
u = model.units(1)
self.assertEqual("", u.id())
annotator.assignUnitsId(u)
self.assertEqual("b4da5c", u.id())
r = model.component("component2").reset(0)
self.assertEqual("", r.id())
annotator.assignResetId(r)
self.assertEqual("b4da5d", r.id())
self.assertEqual("", r.testValueId())
annotator.assignTestValueId(r)
self.assertEqual("b4da5e", r.testValueId())
self.assertEqual("", r.resetValueId())
annotator.assignResetValueId(r)
self.assertEqual("b4da5f", r.resetValueId())
i = model.importSource(0)
self.assertEqual("", i.id())
annotator.assignImportSourceId(i)
self.assertEqual("b4da60", i.id())
self.assertEqual("", u.unitId(0))
annotator.assignUnitId(Unit(u, 0))
self.assertEqual("b4da61", u.unitId(0))
def test_auto_ids(self):
from libcellml import Annotator, Parser, Variable
annotator = Annotator()
parser = Parser()
model_string = file_contents("annotator/unique_ids.cellml")
model = parser.parseModel(model_string)
annotator.setModel(model)
annotator.clearAllIds()
annotator.assignAllIds()
self.assertEqual("b4da55", model.id())
self.assertEqual("b4da56", model.importSource(0).id())
self.assertEqual("b4da57", model.importSource(1).id())
self.assertEqual("b4da58", model.units(0).id())
self.assertEqual("b4da59", model.units(1).id())
self.assertEqual("b4da5a", model.units(2).id())
self.assertEqual("b4da5b", model.units(3).id())
self.assertEqual("b4da5c", model.units(1).unitId(0))
self.assertEqual("b4da5d", model.component(0).id())
self.assertEqual("b4da5e", model.component(1).id())
self.assertEqual("b4da5f", model.component(1).component(0).id())
self.assertEqual("b4da60", model.component(1).variable(0).id())
self.assertEqual("b4da61", model.component(1).variable(1).id())
self.assertEqual("b4da62", model.component(
1).component(0).variable(0).id())
self.assertEqual("b4da63", model.component(
1).component(0).variable(1).id())
self.assertEqual("b4da64", model.component(1).reset(0).id())
self.assertEqual("b4da65", model.component(1).reset(0).resetValueId())
self.assertEqual("b4da66", model.component(1).reset(0).testValueId())
c2v1 = model.component("component2").variable("variable1")
c2v2 = model.component("component2").variable("variable2")
c3v1 = model.component("component3").variable("variable1")
c3v2 = model.component("component3").variable("variable2")
self.assertEqual(
"b4da67", Variable.equivalenceConnectionId(c2v1, c3v1))
self.assertEqual(
"b4da67", Variable.equivalenceConnectionId(c2v2, c3v2))
self.assertEqual("b4da68", Variable.equivalenceMappingId(c2v1, c3v1))
self.assertEqual("b4da69", Variable.equivalenceMappingId(c2v2, c3v2))
self.assertEqual("b4da6a", model.component(
"component2").encapsulationId())
self.assertEqual("b4da6b", model.component(
"component3").encapsulationId())
self.assertEqual("b4da6c", model.encapsulationId())
def test_assign_id(self):
from libcellml import Annotator, Component, Model, Units
from libcellml import Unit, CellmlElementType
annotator = Annotator()
model = Model()
component1 = Component("c1")
component2 = Component("c2")
component3 = Component("c3")
component3.setId("id3")
units = Units("u1")
units.addUnit("volt")
model.addComponent(component1)
model.addComponent(component2)
component2.addComponent(component3)
model.addUnits(units)
annotator.setModel(model)
self.assertEqual("", component1.id())
self.assertEqual("", component2.id())
self.assertEqual("", units.unitId(0))
annotator.assignId(component1)
self.assertEqual("b4da55", component1.id())
self.assertEqual("", component2.id())
self.assertEqual("", units.unitId(0))
annotator.assignId(Unit(units, 0))
self.assertEqual("b4da55", component1.id())
self.assertEqual("", component2.id())
self.assertEqual("b4da56", units.unitId(0))
self.assertEqual("", annotator.assignId(None, CellmlElementType.UNDEFINED))
item = annotator.item("id3")
annotator.assignId(item)
self.assertEqual("b4da57", component3.id())
# For coverage only.
annotator._assignId(component2)
def test_auto_ids_group(self):
from libcellml import Annotator, Component, Model
from libcellml.enums import CellmlElementType_COMPONENT
annotator = Annotator()
model = Model()
component1 = Component("c1")
component2 = Component("c2")
component3 = Component("c3")
model.addComponent(component1)
model.addComponent(component2)
component2.addComponent(component3)
annotator.setModel(model)
self.assertEqual("", model.id())
self.assertEqual("", component1.id())
self.assertEqual("", component2.id())
self.assertEqual("", component3.id())
annotator.assignIds(CellmlElementType_COMPONENT)
self.assertEqual("", model.id())
self.assertEqual("b4da55", component1.id())
self.assertEqual("b4da56", component2.id())
self.assertEqual("b4da57", component3.id())
def test_auto_id_individual(self):
from libcellml import Annotator, CellmlElementType, Parser, Variable
from libcellml import Unit, VariablePair
annotator = Annotator()
parser = Parser()
model_string = file_contents("annotator/no_ids.cellml")
model = parser.parseModel(model_string)
annotator.setModel(model)
self.assertEqual("b4da55", annotator.assignId(model.component(0), CellmlElementType.COMPONENT))
self.assertEqual("b4da55", model.component(0).id())
self.assertEqual("b4da56", annotator.assignId(model.component("component2"), CellmlElementType.COMPONENT_REF))
self.assertEqual("b4da56", model.component("component2").encapsulationId())
self.assertEqual("b4da57", annotator.assignId(VariablePair(model.component("component2").variable("variable1"),
model.component("component2").variable(
"variable1").equivalentVariable(0)),
CellmlElementType.CONNECTION))
self.assertEqual("b4da57", Variable.equivalenceConnectionId(
model.component("component2").variable("variable1"),
model.component("component2").variable("variable1").equivalentVariable(0)))
self.assertEqual("b4da58", annotator.assignId(model.importSource(0)))
self.assertEqual("b4da58", model.importSource(0).id())
self.assertEqual("b4da59", annotator.assignId(VariablePair(model.component("component2").variable("variable2"),
model.component("component2").variable(
"variable2").equivalentVariable(0)),
CellmlElementType.MAP_VARIABLES))
self.assertEqual("b4da59", Variable.equivalenceMappingId(model.component("component2").variable("variable2"),
model.component("component2").variable(
"variable2").equivalentVariable(0)))
self.assertEqual("b4da5a", annotator.assignId(model, CellmlElementType.MODEL))
self.assertEqual("b4da5a", model.id())
self.assertEqual("b4da5b", annotator.assignId(model.component("component2").reset(0), CellmlElementType.RESET))
self.assertEqual("b4da5b", model.component("component2").reset(0).id())
self.assertEqual("b4da5c",
annotator.assignId(model.component("component2").reset(0), CellmlElementType.RESET_VALUE))
self.assertEqual("b4da5c", model.component("component2").reset(0).resetValueId())
self.assertEqual("b4da5d",
annotator.assignId(model.component("component2").reset(0), CellmlElementType.TEST_VALUE))
self.assertEqual("b4da5d", model.component("component2").reset(0).testValueId())
self.assertEqual("b4da5e", annotator.assignId(Unit(model.units(1), 0)))
self.assertEqual("b4da5e", model.units(1).unitId(0))
self.assertEqual("b4da5f", annotator.assignId(model.units(1)))
self.assertEqual("b4da5f", model.units(1).id())
self.assertEqual("b4da60", annotator.assignId(model.component(1).variable(0)))
self.assertEqual("b4da60", model.component(1).variable(0).id())
self.assertEqual("b4da61", annotator.assignId(model, CellmlElementType.ENCAPSULATION))
self.assertEqual("b4da61", model.encapsulationId())
def test_list_duplicate_ids(self):
from libcellml import Annotator, CellmlElementType, Parser
model_string = file_contents("annotator/lots_of_duplicate_ids.cellml")
parser = Parser()
annotator = Annotator()
model = parser.parseModel(model_string)
annotator.setModel(model)
id_list = annotator.duplicateIds()
expected_ids = ('duplicateId1', 'duplicateId2', 'duplicateId3', 'duplicateId4')
self.assertEqual(expected_ids, id_list)
# Get the collections by duplicated id.
c2v1 = model.component("component2").variable("variable1")
c2v2 = model.component("component2").variable("variable2")
c3v1 = model.component("component2").component("component3").variable("variable1")
c3v2 = model.component("component2").component("component3").variable("variable2")
c4v1 = model.component("component4").variable("variable1")
c4v2 = model.component("component4").variable("variable2")
expected_items = {
"duplicateId1": (
(CellmlElementType.UNITS, model.units("units2")),
(CellmlElementType.IMPORT, model.importSource(0)),
(CellmlElementType.MAP_VARIABLES, (c4v1, c2v1)),
(CellmlElementType.COMPONENT, model.component("component2")),
(CellmlElementType.CONNECTION, (c2v1, c3v1)),
(CellmlElementType.TEST_VALUE, model.component("component2").reset(0)),
(CellmlElementType.COMPONENT_REF, model.component("component2").component("component3")),
(CellmlElementType.VARIABLE,
model.component("component2").component("component3").variable("variable2")),
),
"duplicateId2": (
(CellmlElementType.MODEL, model),
(CellmlElementType.UNITS, model.units("units1")),
(CellmlElementType.UNITS, model.units("blob")),
(CellmlElementType.CONNECTION, (c4v2, c2v2)),
(CellmlElementType.VARIABLE, c4v2),
(CellmlElementType.COMPONENT_REF, model.component("component2")),
(CellmlElementType.RESET, model.component("component2").reset(0)),
(CellmlElementType.VARIABLE, c3v1),
),
"duplicateId3": (
(CellmlElementType.IMPORT, model.importSource(1)),
(CellmlElementType.UNITS, model.units("units3")),
(CellmlElementType.VARIABLE, c4v1),
(CellmlElementType.VARIABLE, c2v2),
(CellmlElementType.MAP_VARIABLES, (c2v2, c4v2)),
(CellmlElementType.COMPONENT, model.component("component2").component("component3")),
(CellmlElementType.ENCAPSULATION, model),
),
"duplicateId4": (
(CellmlElementType.UNIT, ((model.units("units2"), 0))),
(CellmlElementType.COMPONENT, model.component("component1")),
(CellmlElementType.COMPONENT, model.component("component4")),
(CellmlElementType.MAP_VARIABLES, (c2v1, c3v1)),
(CellmlElementType.VARIABLE, c2v1),
(CellmlElementType.MAP_VARIABLES, (c2v2, c4v2)),
(CellmlElementType.RESET_VALUE, model.component("component2").reset(0)),
)}
for id in expected_ids:
items_with_id = annotator.items(id)
count = 0
for item in items_with_id:
self.assertEqual(item[0], expected_items[id][count][0])
# SWIG copies the pointers so can't expect a comparison to be true. Not sure how to
# compare these ...
# self.assertEqual(item[1], expected_items[id][count][1])
count = count + 1
def test_raise_not_found_issue(self):
from libcellml import Annotator, Parser
annotator = Annotator()
parser = Parser()
message = 'Could not find an item with an id of \'i_dont_exist\' in the model.'
model = parser.parseModel(file_contents('annotator/unique_ids.cellml'))
annotator.setModel(model)
annotator.item('i_dont_exist')
self.assertEqual(1, annotator.issueCount())
self.assertEqual(message, annotator.issue(0).description())
def test_raise_non_unique_issue(self):
from libcellml import Annotator, Parser
annotator = Annotator()
parser = Parser()
non_unique_message = 'The id \'duplicateId\' occurs 29 times in the model so a unique item cannot be located.'
model = parser.parseModel(file_contents('annotator/duplicate_ids.cellml'))
annotator.setModel(model)
annotator.item('duplicateId')
self.assertEqual(1, annotator.issueCount())
self.assertEqual(non_unique_message, annotator.issue(0).description())
if __name__ == '__main__':
unittest.main()
| 22,858 | 22 | 428 |
56e9acadfe961a3967a4d97ab8d11fe03412f68a | 2,220 | py | Python | neurotin/psd/ratio.py | mscheltienne/neurotin-analysis | 841b7d86c0c990169cceb02b40d9eb6bd0d07612 | [
"MIT"
] | null | null | null | neurotin/psd/ratio.py | mscheltienne/neurotin-analysis | 841b7d86c0c990169cceb02b40d9eb6bd0d07612 | [
"MIT"
] | null | null | null | neurotin/psd/ratio.py | mscheltienne/neurotin-analysis | 841b7d86c0c990169cceb02b40d9eb6bd0d07612 | [
"MIT"
] | null | null | null | import pandas as pd
from ..utils._docs import fill_doc
from .average import add_average_column
@fill_doc
def ratio(df_alpha, df_delta):
"""Compute the ratio of alpha/delta band power.
Parameters
----------
%(df_psd)s
Contains alpha-band PSD.
%(df_psd)s
Contains delta-band PSD.
Returns
-------
df : DataFrame
PSD ratio alpha/delta averaged by bin and channels. Columns:
participant : int - Participant ID
session : int - Session ID (1 to 15)
run : int - Run ID
phase : str - 'regulation' or 'non-regulation'
idx : ID of the phase within the run (0 to 9)
ratio : float - Averaged ratio alpha/delta
"""
if "avg" not in df_alpha.columns:
df_alpha = add_average_column(df_alpha)
if "avg" not in df_delta.columns:
df_alpha = add_average_column(df_delta)
# check keys
keys = ["participant", "session", "run", "phase", "idx"]
assert len(set(keys).intersection(df_alpha.columns)) == len(keys)
assert len(set(keys).intersection(df_delta.columns)) == len(keys)
assert sorted(df_alpha.columns) == sorted(df_delta.columns)
# container for new df with ratio of power
data = {key: [] for key in keys + ["ratio"]}
ratio = df_alpha["avg"] / df_delta["avg"]
ratio = ratio[ratio.notna()]
# fill new df dict
for i, r in ratio.iteritems():
alpha_ = df_alpha.loc[i]
delta_ = df_delta.loc[i]
# sanity-check
try:
assert alpha_["participant"] == delta_["participant"]
assert alpha_["session"] == delta_["session"]
assert alpha_["run"] == delta_["run"]
assert alpha_["phase"] == delta_["phase"]
assert alpha_["idx"] == delta_["idx"]
except AssertionError:
continue
data["participant"].append(alpha_["participant"])
data["session"].append(alpha_["session"])
data["run"].append(alpha_["run"])
data["phase"].append(alpha_["phase"])
data["idx"].append(alpha_["idx"])
data["ratio"].append(r)
# create df
df = pd.DataFrame.from_dict(data, orient="columns")
return df
| 31.267606 | 69 | 0.593694 | import pandas as pd
from ..utils._docs import fill_doc
from .average import add_average_column
@fill_doc
def ratio(df_alpha, df_delta):
"""Compute the ratio of alpha/delta band power.
Parameters
----------
%(df_psd)s
Contains alpha-band PSD.
%(df_psd)s
Contains delta-band PSD.
Returns
-------
df : DataFrame
PSD ratio alpha/delta averaged by bin and channels. Columns:
participant : int - Participant ID
session : int - Session ID (1 to 15)
run : int - Run ID
phase : str - 'regulation' or 'non-regulation'
idx : ID of the phase within the run (0 to 9)
ratio : float - Averaged ratio alpha/delta
"""
if "avg" not in df_alpha.columns:
df_alpha = add_average_column(df_alpha)
if "avg" not in df_delta.columns:
df_alpha = add_average_column(df_delta)
# check keys
keys = ["participant", "session", "run", "phase", "idx"]
assert len(set(keys).intersection(df_alpha.columns)) == len(keys)
assert len(set(keys).intersection(df_delta.columns)) == len(keys)
assert sorted(df_alpha.columns) == sorted(df_delta.columns)
# container for new df with ratio of power
data = {key: [] for key in keys + ["ratio"]}
ratio = df_alpha["avg"] / df_delta["avg"]
ratio = ratio[ratio.notna()]
# fill new df dict
for i, r in ratio.iteritems():
alpha_ = df_alpha.loc[i]
delta_ = df_delta.loc[i]
# sanity-check
try:
assert alpha_["participant"] == delta_["participant"]
assert alpha_["session"] == delta_["session"]
assert alpha_["run"] == delta_["run"]
assert alpha_["phase"] == delta_["phase"]
assert alpha_["idx"] == delta_["idx"]
except AssertionError:
continue
data["participant"].append(alpha_["participant"])
data["session"].append(alpha_["session"])
data["run"].append(alpha_["run"])
data["phase"].append(alpha_["phase"])
data["idx"].append(alpha_["idx"])
data["ratio"].append(r)
# create df
df = pd.DataFrame.from_dict(data, orient="columns")
return df
| 0 | 0 | 0 |
5fd260181261f402ed2fa28662e4a4e336f57098 | 2,751 | py | Python | tests/test_variable.py | ctgk/pygrad | 8d7bdedf92481c62a692c9fd2edca6616cf1ecae | [
"MIT"
] | 4 | 2020-10-17T19:09:29.000Z | 2022-02-20T05:38:49.000Z | tests/test_variable.py | ctgk/pygrad | 8d7bdedf92481c62a692c9fd2edca6616cf1ecae | [
"MIT"
] | null | null | null | tests/test_variable.py | ctgk/pygrad | 8d7bdedf92481c62a692c9fd2edca6616cf1ecae | [
"MIT"
] | null | null | null | import numpy as np
import pytest
import numgrad as ng
@pytest.mark.parametrize('function, expect', [
(lambda: np.asarray(ng.Variable([0, 1])), np.array([0., 1.])),
(lambda: 0. in ng.Variable(0.), TypeError),
(lambda: 0. in ng.Variable([0.]), True),
(lambda: 1. in ng.Variable([[0., 1.], [2., 3.]]), True),
(lambda: -1. not in ng.Variable([[0., 1.], [2., 3.]]), True),
(lambda: float(ng.Variable(-1)), -1.),
(lambda: float(ng.Variable([0, -1])), TypeError),
(lambda: int(ng.Variable(-1)), -1),
(lambda: int(ng.Variable([0, -1])), TypeError),
(lambda: len(ng.Variable(-1)), TypeError),
(lambda: len(ng.Variable([0, -1])), 2),
(lambda: ng.Variable(0.).item(), 0.),
(lambda: ng.Variable([0.]).item(), 0.),
(lambda: ng.Variable([0., 1.]).item(), ValueError),
(lambda: ng.Variable(1).ndim, 0),
(lambda: ng.Variable([0, 1]).ndim, 1),
(lambda: ng.Variable(0).shape, tuple()),
(lambda: ng.Variable([0, 1]).shape, (2,)),
(lambda: ng.Variable(0).size, 1),
(lambda: ng.Variable([0, 1]).size, 2),
(lambda: ng.Variable(0.).tolist(), 0.),
(lambda: ng.Variable([0., 1.]).tolist(), [0., 1.]),
(lambda: ng.Variable([[0., 1.], [2., 3.]]).tolist(), [[0., 1.], [2., 3.]]),
])
@pytest.mark.parametrize('self, method, args', [
(ng.Variable([1, -1]), '__iadd__', 1),
(ng.Variable([1, -1]), '__isub__', 1),
(ng.Variable([1, -1]), '__imul__', 2),
(ng.Variable([1, -1]), '__itruediv__', 2),
])
if __name__ == '__main__':
pytest.main([__file__])
| 29.902174 | 79 | 0.592148 | import numpy as np
import pytest
import numgrad as ng
def test_init_error():
with pytest.raises(ValueError):
ng.Variable(1, dtype=int)
def test_init():
ng.Variable([1, 2])
def test_init_pass_dtype():
assert ng.Variable(1, np.float32).dtype == np.float32
def test_default_dtype():
assert ng.Variable(1).dtype == np.float64
def test_non_default_dtype():
ng.config.dtype = np.float32
assert ng.Variable(1).dtype == np.float32
ng.config.dtype = np.float64
def test_ufunc():
a = ng.Variable([0, 1])
assert type(a + 0) == np.ndarray
@pytest.mark.parametrize('function, expect', [
(lambda: np.asarray(ng.Variable([0, 1])), np.array([0., 1.])),
(lambda: 0. in ng.Variable(0.), TypeError),
(lambda: 0. in ng.Variable([0.]), True),
(lambda: 1. in ng.Variable([[0., 1.], [2., 3.]]), True),
(lambda: -1. not in ng.Variable([[0., 1.], [2., 3.]]), True),
(lambda: float(ng.Variable(-1)), -1.),
(lambda: float(ng.Variable([0, -1])), TypeError),
(lambda: int(ng.Variable(-1)), -1),
(lambda: int(ng.Variable([0, -1])), TypeError),
(lambda: len(ng.Variable(-1)), TypeError),
(lambda: len(ng.Variable([0, -1])), 2),
(lambda: ng.Variable(0.).item(), 0.),
(lambda: ng.Variable([0.]).item(), 0.),
(lambda: ng.Variable([0., 1.]).item(), ValueError),
(lambda: ng.Variable(1).ndim, 0),
(lambda: ng.Variable([0, 1]).ndim, 1),
(lambda: ng.Variable(0).shape, tuple()),
(lambda: ng.Variable([0, 1]).shape, (2,)),
(lambda: ng.Variable(0).size, 1),
(lambda: ng.Variable([0, 1]).size, 2),
(lambda: ng.Variable(0.).tolist(), 0.),
(lambda: ng.Variable([0., 1.]).tolist(), [0., 1.]),
(lambda: ng.Variable([[0., 1.], [2., 3.]]).tolist(), [[0., 1.], [2., 3.]]),
])
def test_method_and_property(function, expect):
if isinstance(expect, type) and issubclass(expect, Exception):
with pytest.raises(expect):
function()
elif isinstance(expect, np.ndarray):
assert np.allclose(expect, function())
else:
assert function() == expect
@pytest.mark.parametrize('self, method, args', [
(ng.Variable([1, -1]), '__iadd__', 1),
(ng.Variable([1, -1]), '__isub__', 1),
(ng.Variable([1, -1]), '__imul__', 2),
(ng.Variable([1, -1]), '__itruediv__', 2),
])
def test_inplace(self, method, args):
if not isinstance(args, tuple):
args = (args,)
expect_id = id(self)
expect_id_of_data = id(self._data)
getattr(self, method)(*args)
assert expect_id == id(self)
assert expect_id_of_data == id(self._data)
with pytest.raises(ValueError):
with ng.Graph():
getattr(self, method)(*args)
if __name__ == '__main__':
pytest.main([__file__])
| 1,026 | 0 | 182 |
68d5e93d27f4249eff91d5f5e7f3be29efc707ac | 489 | py | Python | examples/OpenCV/client_cv.py | BassmanBiff/NumpySocket | 67eb3a1bd7aaae4a08d17e8b76a7cb2f2e7bf2c9 | [
"MIT"
] | null | null | null | examples/OpenCV/client_cv.py | BassmanBiff/NumpySocket | 67eb3a1bd7aaae4a08d17e8b76a7cb2f2e7bf2c9 | [
"MIT"
] | null | null | null | examples/OpenCV/client_cv.py | BassmanBiff/NumpySocket | 67eb3a1bd7aaae4a08d17e8b76a7cb2f2e7bf2c9 | [
"MIT"
] | 1 | 2019-08-28T13:55:33.000Z | 2019-08-28T13:55:33.000Z | # From https://stackoverflow.com/questions/30988033/
# sending-live-video-frame-over-network-in-python-opencv
from numpysocket import NumpySocket
import cv2
npSocket = NumpySocket()
npSocket.startClient(9999)
# Read until video is completed
while True:
# Capture frame-by-frame
frame = npSocket.recieveNumpy()
cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
npSocket.endServer()
print("Closing")
| 22.227273 | 61 | 0.707566 | # From https://stackoverflow.com/questions/30988033/
# sending-live-video-frame-over-network-in-python-opencv
from numpysocket import NumpySocket
import cv2
npSocket = NumpySocket()
npSocket.startClient(9999)
# Read until video is completed
while True:
# Capture frame-by-frame
frame = npSocket.recieveNumpy()
cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
npSocket.endServer()
print("Closing")
| 0 | 0 | 0 |
85fc2248d4d3ed6a0a85011b45f719c4363bb0ff | 2,768 | py | Python | cliAssistant.py | dailyideas/naming-media-files-by-datetime | b678387887c4d801269c9c9302183472425ce39b | [
"MIT"
] | null | null | null | cliAssistant.py | dailyideas/naming-media-files-by-datetime | b678387887c4d801269c9c9302183472425ce39b | [
"MIT"
] | null | null | null | cliAssistant.py | dailyideas/naming-media-files-by-datetime | b678387887c4d801269c9c9302183472425ce39b | [
"MIT"
] | null | null | null | import datetime, os, sys
import subprocess
#### #### #### #### ####
#### Functions ####
#### #### #### #### ####
#### #### #### #### ####
#### Main ####
#### #### #### #### ####
try:
## Variables initialization
targetDirectory = GetTargetDirectory()
alternativeDate = GetAlternativeDate()
isRecursiveSearch = GetIsRecursiveSearch()
## Main
command = ["./main.exe", "--dir", targetDirectory]
if isinstance(alternativeDate, datetime.date):
command.extend( ["--date", alternativeDate.isoformat() ] )
if isRecursiveSearch is True:
command.append("-r")
subprocess.call(command)
except KeyboardInterrupt:
pass
finally:
ExitHandler()
| 34.6 | 154 | 0.634393 | import datetime, os, sys
import subprocess
#### #### #### #### ####
#### Functions ####
#### #### #### #### ####
def ExitHandler() -> None:
print("Bye")
input("Press any key to continue ...")
sys.exit() ## NOTE: try-except will catch sys.exit() as it raises SystemExit
def CheckAndRaiseExiting(userInput:str) -> None:
if userInput == "exit":
raise SystemExit
def TipsBeforeInput() -> None:
print("If you want to exit, Enter exit or press Ctrl-C")
def GetTargetDirectory() -> str:
while True:
TipsBeforeInput()
targetDirectory = input(r"Enter the location of media files for renaming (e.g. C:\Users\Public\Pictures): ")
targetDirectory = targetDirectory.strip()
CheckAndRaiseExiting(targetDirectory)
if os.path.isdir(targetDirectory):
return targetDirectory
print("The location does not exist. Try again.")
def GetAlternativeDate() -> datetime.date:
while True:
alternativeDate = input("Date (YYYY-MM-DD) for files naming if date & time information cannot be found. Leave it empty if you do not care: ")
alternativeDate = alternativeDate.strip()
CheckAndRaiseExiting(alternativeDate)
if alternativeDate == "":
print("You did not provide an alternative date. File names will be preceded by 00000000_000000 if date & time information cannot be found")
return None
try:
alternativeDate = datetime.date.fromisoformat(alternativeDate)
return alternativeDate
except ValueError:
print("Your input is not a valid date in YYYY-MM-DD format. Try again.")
def GetIsRecursiveSearch() -> bool:
while True:
isRecursiveSearch = input("Do you want the program to recursively search for files to rename in the target folder and all its sub-folders? (y/N)")
isRecursiveSearch = isRecursiveSearch.strip()
CheckAndRaiseExiting(isRecursiveSearch)
if isRecursiveSearch in ["y", "Y", "yes", "Yes", "YES"]:
return True
if isRecursiveSearch in ["", "n", "N", "no", "No", "NO"]:
return False
print("Please enter y or n")
#### #### #### #### ####
#### Main ####
#### #### #### #### ####
try:
## Variables initialization
targetDirectory = GetTargetDirectory()
alternativeDate = GetAlternativeDate()
isRecursiveSearch = GetIsRecursiveSearch()
## Main
command = ["./main.exe", "--dir", targetDirectory]
if isinstance(alternativeDate, datetime.date):
command.extend( ["--date", alternativeDate.isoformat() ] )
if isRecursiveSearch is True:
command.append("-r")
subprocess.call(command)
except KeyboardInterrupt:
pass
finally:
ExitHandler()
| 1,929 | 0 | 137 |
54e5a8165da0d8dfbe5e8c477e69f5e99c3172a7 | 871 | py | Python | opentsdb/tests/conftest.py | razvandimescu/opentsdb-py | 61c15302468769121f94323493e88cb51efcea15 | [
"MIT"
] | 48 | 2016-12-27T10:11:41.000Z | 2021-11-15T16:05:24.000Z | opentsdb/tests/conftest.py | razvandimescu/opentsdb-py | 61c15302468769121f94323493e88cb51efcea15 | [
"MIT"
] | 8 | 2017-10-08T16:20:30.000Z | 2022-02-23T08:36:52.000Z | opentsdb/tests/conftest.py | razvandimescu/opentsdb-py | 61c15302468769121f94323493e88cb51efcea15 | [
"MIT"
] | 17 | 2017-10-01T01:14:55.000Z | 2021-11-15T16:05:24.000Z | from os import environ
import pytest
from opentsdb import TSDBClient, TSDBConnectProtocols, Counter
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 21.775 | 96 | 0.774971 | from os import environ
import pytest
from opentsdb import TSDBClient, TSDBConnectProtocols, Counter
@pytest.fixture
def tsdb_host():
return environ.get('OPEN_TSDB_HOST', '127.0.0.1')
@pytest.fixture
def tsdb_port():
return int(environ.get('OPEN_TSDB_PORT', '4242'))
@pytest.fixture
def http_client(tsdb_host, tsdb_port):
return TSDBClient(tsdb_host, tsdb_port, host_tag=False)
@pytest.fixture
def http_client2(tsdb_host, tsdb_port):
return TSDBClient(tsdb_host, tsdb_port, host_tag=True)
class Metrics(TSDBClient):
PREDEFINED_METRIC = Counter('test.predefined.metric')
@pytest.fixture
def http_client3(tsdb_host, tsdb_port):
return Metrics(tsdb_host, tsdb_port, host_tag=True)
@pytest.fixture
def telnet_client(tsdb_host, tsdb_port):
return TSDBClient(tsdb_host, tsdb_port, protocol=TSDBConnectProtocols.TELNET, host_tag=True)
| 442 | 63 | 155 |
0cb89ef3105a3391063eea1ce036c19f90092b77 | 145 | py | Python | bitcoinExchange/exchange/apps.py | pogginicolo98/start2impact_exchange | 559c42cdeb2dec890d4b1145ed66a1a2f7c362cb | [
"MIT"
] | 1 | 2021-09-08T16:39:07.000Z | 2021-09-08T16:39:07.000Z | bitcoinExchange/exchange/apps.py | pogginicolo98/start2impact_exchange | 559c42cdeb2dec890d4b1145ed66a1a2f7c362cb | [
"MIT"
] | null | null | null | bitcoinExchange/exchange/apps.py | pogginicolo98/start2impact_exchange | 559c42cdeb2dec890d4b1145ed66a1a2f7c362cb | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 16.111111 | 33 | 0.703448 | from django.apps import AppConfig
class ExchangeConfig(AppConfig):
name = 'exchange'
def ready(self):
import exchange.signals
| 27 | 60 | 23 |
a6c8600552c317e6e49247d8616f175a7c726357 | 1,804 | py | Python | cuhk01/imgdump.py | hashknot/person-reid | 64a8c441388ea53555669986338af0b40bd854de | [
"MIT"
] | null | null | null | cuhk01/imgdump.py | hashknot/person-reid | 64a8c441388ea53555669986338af0b40bd854de | [
"MIT"
] | null | null | null | cuhk01/imgdump.py | hashknot/person-reid | 64a8c441388ea53555669986338af0b40bd854de | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.7
import numpy as np
import os
import random
import sys
from scipy.ndimage import imread
dataset = sys.argv[1]
outputdir = sys.argv[2] if len(sys.argv) == 3 else 'data'
minibatches = 10
minibatch_size = 500
split_channels = lambda x: np.array((x[:,:,0], x[:,:,1], x[:,:,2]))
images = os.listdir(dataset)
images.sort()
image_groups = [images[i:i+4] for i in xrange(0, len(images), 4)]
path = lambda p: os.path.join(dataset, p)
records = []
random.seed()
for image_group in image_groups:
random.shuffle(image_group)
a_file, b_file = image_group[:2]
a_array = split_channels(imread(path(a_file)))
b_array = split_channels(imread(path(b_file)))
records.append((a_array, b_array, np.uint8(1)))
a_file, b_file = image_group[2:]
a_array = split_channels(imread(path(a_file)))
b_array = split_channels(imread(path(b_file)))
records.append((a_array, b_array, np.uint8(1)))
images = os.listdir(dataset)
random.shuffle(images)
images_2 = os.listdir(dataset)
random.shuffle(images_2)
for a_file, b_file in zip(images, images_2):
a_array = split_channels(imread(path(a_file)))
b_array = split_channels(imread(path(b_file)))
label = 1 if a_file[:4] == b_file[:4] else 0
records.append((a_array, b_array, np.uint8(0)))
random.shuffle(records)
for i in xrange(minibatches):
outfile_path = os.path.join(outputdir, 'data_batch_{}.bin'.format(i+1))
with open(outfile_path, 'wb') as fh:
for j in xrange(i*minibatch_size, (i+1)*minibatch_size):
for r in records[j]:
r.tofile(fh)
outfile_path = os.path.join(outputdir, 'data_test.bin')
with open(outfile_path, 'wb') as fh:
for j in xrange(minibatches*minibatch_size, len(records)):
for r in records[j]:
r.tofile(fh)
| 27.333333 | 75 | 0.681264 | #!/usr/bin/env python2.7
import numpy as np
import os
import random
import sys
from scipy.ndimage import imread
dataset = sys.argv[1]
outputdir = sys.argv[2] if len(sys.argv) == 3 else 'data'
minibatches = 10
minibatch_size = 500
split_channels = lambda x: np.array((x[:,:,0], x[:,:,1], x[:,:,2]))
images = os.listdir(dataset)
images.sort()
image_groups = [images[i:i+4] for i in xrange(0, len(images), 4)]
path = lambda p: os.path.join(dataset, p)
records = []
random.seed()
for image_group in image_groups:
random.shuffle(image_group)
a_file, b_file = image_group[:2]
a_array = split_channels(imread(path(a_file)))
b_array = split_channels(imread(path(b_file)))
records.append((a_array, b_array, np.uint8(1)))
a_file, b_file = image_group[2:]
a_array = split_channels(imread(path(a_file)))
b_array = split_channels(imread(path(b_file)))
records.append((a_array, b_array, np.uint8(1)))
images = os.listdir(dataset)
random.shuffle(images)
images_2 = os.listdir(dataset)
random.shuffle(images_2)
for a_file, b_file in zip(images, images_2):
a_array = split_channels(imread(path(a_file)))
b_array = split_channels(imread(path(b_file)))
label = 1 if a_file[:4] == b_file[:4] else 0
records.append((a_array, b_array, np.uint8(0)))
random.shuffle(records)
for i in xrange(minibatches):
outfile_path = os.path.join(outputdir, 'data_batch_{}.bin'.format(i+1))
with open(outfile_path, 'wb') as fh:
for j in xrange(i*minibatch_size, (i+1)*minibatch_size):
for r in records[j]:
r.tofile(fh)
outfile_path = os.path.join(outputdir, 'data_test.bin')
with open(outfile_path, 'wb') as fh:
for j in xrange(minibatches*minibatch_size, len(records)):
for r in records[j]:
r.tofile(fh)
| 0 | 0 | 0 |
e17f1b01d929a76c04c55e4b7778db57f587b103 | 1,428 | py | Python | log_request_id/session.py | adaniels21487/django-log-request-id | 54d1afc17ac292c6243dc05d11968fef3997e767 | [
"BSD-2-Clause"
] | 254 | 2015-01-16T05:47:19.000Z | 2022-03-27T22:42:24.000Z | log_request_id/session.py | adaniels21487/django-log-request-id | 54d1afc17ac292c6243dc05d11968fef3997e767 | [
"BSD-2-Clause"
] | 49 | 2015-01-08T08:46:27.000Z | 2022-03-10T10:20:13.000Z | log_request_id/session.py | adaniels21487/django-log-request-id | 54d1afc17ac292c6243dc05d11968fef3997e767 | [
"BSD-2-Clause"
] | 69 | 2015-01-07T15:10:31.000Z | 2022-03-21T07:54:20.000Z | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from requests import Session as BaseSession
from log_request_id import DEFAULT_NO_REQUEST_ID, OUTGOING_REQUEST_ID_HEADER_SETTING, REQUEST_ID_HEADER_SETTING, local
| 44.625 | 118 | 0.641457 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from requests import Session as BaseSession
from log_request_id import DEFAULT_NO_REQUEST_ID, OUTGOING_REQUEST_ID_HEADER_SETTING, REQUEST_ID_HEADER_SETTING, local
class Session(BaseSession):
def __init__(self, *args, **kwargs):
if hasattr(settings, OUTGOING_REQUEST_ID_HEADER_SETTING):
self.request_id_header = getattr(settings, OUTGOING_REQUEST_ID_HEADER_SETTING)
elif hasattr(settings, REQUEST_ID_HEADER_SETTING):
self.request_id_header = getattr(settings, REQUEST_ID_HEADER_SETTING)
else:
raise ImproperlyConfigured("The %s or %s settings must be configured in "
"order to use %s" % (
OUTGOING_REQUEST_ID_HEADER_SETTING,
REQUEST_ID_HEADER_SETTING, __name__
))
super(Session, self).__init__(*args, **kwargs)
def prepare_request(self, request):
"""Include the request ID, if available, in the outgoing request"""
try:
request_id = local.request_id
if self.request_id_header:
request.headers[self.request_id_header] = request_id
except AttributeError:
pass
return super(Session, self).prepare_request(request)
| 729 | 422 | 23 |
e544c1c41f6b5815b1af020f16d4d26968390842 | 6,956 | py | Python | mopidy_rehabradio/playback.py | rehabradio/mopidy-rehabradio | ac1b405768bd916994f97f2bb0185e8c02ccec0f | [
"Apache-2.0"
] | null | null | null | mopidy_rehabradio/playback.py | rehabradio/mopidy-rehabradio | ac1b405768bd916994f97f2bb0185e8c02ccec0f | [
"Apache-2.0"
] | null | null | null | mopidy_rehabradio/playback.py | rehabradio/mopidy-rehabradio | ac1b405768bd916994f97f2bb0185e8c02ccec0f | [
"Apache-2.0"
] | null | null | null | # future imports
from __future__ import unicode_literals
# stdlib imports
import logging
import time
import threading
# third-party imports
import pykka
from mopidy.core import CoreListener
logger = logging.getLogger(__name__)
class WebhookPlayback(pykka.ThreadingActor, CoreListener):
"""Control the tracklist and playback functionality of mopidy.
Fetches the head track, adds to tracklist, and starts playback.
If a timelapse is set, then the track is seeked to the given position.
"""
popped = False
queue = None
track = None
next_track = None
stop_update_thread = True
stop_track_thread = True
def on_start(self):
"""Grab the current head track, and add it to the tracklist.
Starts the play method.
"""
logger.info('ON START CALLED')
self.initiate()
def on_stop(self):
"""Stops the playback of the current track,
and cleans up all the treads and tracklist.
"""
# Stop playback
self.core.playback.stop()
# Stop any new timers
self.stop_update_thread = True
self.stop_track_thread = True
# Empty queue
self.core.tracklist.clear()
def on_event(self, event):
"""Fires functions base of mopidy tracklist events
"""
state = self.core.playback.state.get()
if event == 'tracklist_changed' and state == 'stopped':
logger.info('CALLING NEXT')
return self.next()
def play(self):
"""Starts playing the first track in the tracklist.
If the track has a "time_position" value then seek the track to that postion.
"""
logger.info('PLAY CALLED')
# Start track
self.core.playback.play()
# Annoyingly cant start a track at a given time,
# So once the track has started we can seek it to the correct position
if self.track['time_position']:
self.seek()
self.stop_update_thread = False
self.stop_track_thread = False
self.update_thread()
self.track_thread()
def update_thread(self):
"""Sends updates to the server every 3 seconds
on the status of the playing track.
"""
# If stop_thread is set, then return causing the loop to break
if self.stop_update_thread:
return
# Ensure there is a track to report on
if self.core.playback.current_track.get():
# Ensure track has started and that it is also not about to end.
time_position = self.core.playback.time_position.get()
total = self.track['track']['duration_ms']
if 1000 < time_position < (total - 9000):
# Send updates to the server
kwargs = {
'track_id': self.track['id'],
'queue_id': self.queue,
'state': self.core.playback.state.get(),
'time_position': self.core.playback.time_position.get(),
}
self.session.update_head(kwargs)
# Loop method every 3 seconds
thread_timer = threading.Timer(3, self.update_thread)
thread_timer.start()
def track_thread(self):
"""Watches the track to know when to trigger fetching a the next track.
"""
# If stop_thread is set, then return causing the loop to break
if self.stop_track_thread:
return
if self.track.get('track'):
# Work out the time remaining on the track
if self.track['track']['duration_ms'] is not None:
t_end = self.track['track']['duration_ms']
t_current = self.core.playback.time_position.get()
time_til_end = t_end - t_current
# If there is less than 5 seconds left on the track,
# add the next track to the tracklist,
# or if no track is currently playing
if time_til_end < 5000 or not self.core.playback.current_track.get():
# Stop updates
self.stop_update_thread = True
# Delete the current track from the server and fetch the next.
# popped param is set to ensure only one delete request is sent.
# Futher requests should be fetches rather than deletes.
logger.info('POPPING TRACK')
if self.popped:
next_track = self.session.fetch_head()
else:
self.popped = True
kwargs = {'queue_id': self.queue}
next_track = self.session.pop_head(kwargs)
logger.info('############')
# If a track is found, added it
if next_track.get('track'):
self.next_track = next_track
self.queue = self.next_track['queue']
self.popped = False
# Exit loop
return
# Loop method every 1/2 second
thread_timer = threading.Timer(1, self.track_thread)
thread_timer.start()
| 34.954774 | 85 | 0.576768 | # future imports
from __future__ import unicode_literals
# stdlib imports
import logging
import time
import threading
# third-party imports
import pykka
from mopidy.core import CoreListener
logger = logging.getLogger(__name__)
class WebhookPlayback(pykka.ThreadingActor, CoreListener):
"""Control the tracklist and playback functionality of mopidy.
Fetches the head track, adds to tracklist, and starts playback.
If a timelapse is set, then the track is seeked to the given position.
"""
popped = False
queue = None
track = None
next_track = None
stop_update_thread = True
stop_track_thread = True
def __init__(self, config, core, session):
super(WebhookPlayback, self).__init__()
self.config = config
self.core = core
self.core.tracklist.consume = True
self.session = session
def on_start(self):
"""Grab the current head track, and add it to the tracklist.
Starts the play method.
"""
logger.info('ON START CALLED')
self.initiate()
def initiate(self):
logger.info('INITITATE CALLED')
"""Loads in the top track of a given queue.
Note will loop itself every second until a track is loaded.
"""
self.track = self.session.fetch_head()
if self.track.get('track') is None:
time.sleep(1)
return self.initiate()
self.queue = self.track['queue']
self.core.tracklist.add(uri=self.track['track']['uri'])
self.play()
def on_stop(self):
"""Stops the playback of the current track,
and cleans up all the treads and tracklist.
"""
# Stop playback
self.core.playback.stop()
# Stop any new timers
self.stop_update_thread = True
self.stop_track_thread = True
# Empty queue
self.core.tracklist.clear()
def on_event(self, event):
"""Fires functions base of mopidy tracklist events
"""
state = self.core.playback.state.get()
if event == 'tracklist_changed' and state == 'stopped':
logger.info('CALLING NEXT')
return self.next()
def play(self):
"""Starts playing the first track in the tracklist.
If the track has a "time_position" value then seek the track to that postion.
"""
logger.info('PLAY CALLED')
# Start track
self.core.playback.play()
# Annoyingly cant start a track at a given time,
# So once the track has started we can seek it to the correct position
if self.track['time_position']:
self.seek()
self.stop_update_thread = False
self.stop_track_thread = False
self.update_thread()
self.track_thread()
def seek(self):
logger.info('SEEK CALLED')
"""Seeks a track to a given location.
Note there is a 1.5 second delay to allow mopidy to settle.
"""
seek_time = self.track['time_position'] + 2000
# If the seeked time is longer than the tracks duration,
# then start the next track instead
if seek_time >= self.track['track']['duration_ms']:
return self.next()
# Delay required to allow mopidy to setup track
time.sleep(1.5)
self.core.playback.seek(seek_time)
def next(self):
logger.info('NEXT CALLED')
"""Plays the next track which is stored locally.
If no track is found then it loops every second until a track is found.
"""
if self.next_track is None:
time.sleep(1)
return self.next()
self.track = self.next_track
self.next_track = None
self.core.tracklist.add(uri=self.track['track']['uri'])
self.play()
def update_thread(self):
"""Sends updates to the server every 3 seconds
on the status of the playing track.
"""
# If stop_thread is set, then return causing the loop to break
if self.stop_update_thread:
return
# Ensure there is a track to report on
if self.core.playback.current_track.get():
# Ensure track has started and that it is also not about to end.
time_position = self.core.playback.time_position.get()
total = self.track['track']['duration_ms']
if 1000 < time_position < (total - 9000):
# Send updates to the server
kwargs = {
'track_id': self.track['id'],
'queue_id': self.queue,
'state': self.core.playback.state.get(),
'time_position': self.core.playback.time_position.get(),
}
self.session.update_head(kwargs)
# Loop method every 3 seconds
thread_timer = threading.Timer(3, self.update_thread)
thread_timer.start()
def track_thread(self):
"""Watches the track to know when to trigger fetching a the next track.
"""
# If stop_thread is set, then return causing the loop to break
if self.stop_track_thread:
return
if self.track.get('track'):
# Work out the time remaining on the track
if self.track['track']['duration_ms'] is not None:
t_end = self.track['track']['duration_ms']
t_current = self.core.playback.time_position.get()
time_til_end = t_end - t_current
# If there is less than 5 seconds left on the track,
# add the next track to the tracklist,
# or if no track is currently playing
if time_til_end < 5000 or not self.core.playback.current_track.get():
# Stop updates
self.stop_update_thread = True
# Delete the current track from the server and fetch the next.
# popped param is set to ensure only one delete request is sent.
# Futher requests should be fetches rather than deletes.
logger.info('POPPING TRACK')
if self.popped:
next_track = self.session.fetch_head()
else:
self.popped = True
kwargs = {'queue_id': self.queue}
next_track = self.session.pop_head(kwargs)
logger.info('############')
# If a track is found, added it
if next_track.get('track'):
self.next_track = next_track
self.queue = self.next_track['queue']
self.popped = False
# Exit loop
return
# Loop method every 1/2 second
thread_timer = threading.Timer(1, self.track_thread)
thread_timer.start()
| 1,605 | 0 | 108 |
5441bba384db1d128e7c42810c49df716c0a04a0 | 158 | py | Python | bling-dump.py | Webstume/scantoken | a85f2ada3d7f3237ab55f29079d04e5a8bf6bbd7 | [
"Apache-2.0"
] | null | null | null | bling-dump.py | Webstume/scantoken | a85f2ada3d7f3237ab55f29079d04e5a8bf6bbd7 | [
"Apache-2.0"
] | null | null | null | bling-dump.py | Webstume/scantoken | a85f2ada3d7f3237ab55f29079d04e5a8bf6bbd7 | [
"Apache-2.0"
] | 1 | 2020-10-28T20:42:18.000Z | 2020-10-28T20:42:18.000Z | from lxml import html
import requests
import sqlite3
c = sqlite3.connect('bling.db')
oldscore={}
for row in c.execute('SELECT * FROM users'):
print(row)
| 14.363636 | 44 | 0.721519 | from lxml import html
import requests
import sqlite3
c = sqlite3.connect('bling.db')
oldscore={}
for row in c.execute('SELECT * FROM users'):
print(row)
| 0 | 0 | 0 |
d898719794542f33de1c997341b8110887ee93f7 | 8,084 | py | Python | vgn/functions.py | becheran/vgn | 908159ff05ba3e3c53d154685164a5de5b2b94a4 | [
"MIT"
] | 5 | 2020-03-06T20:52:23.000Z | 2022-01-09T21:37:00.000Z | vgn/functions.py | becheran/vgn | 908159ff05ba3e3c53d154685164a5de5b2b94a4 | [
"MIT"
] | null | null | null | vgn/functions.py | becheran/vgn | 908159ff05ba3e3c53d154685164a5de5b2b94a4 | [
"MIT"
] | null | null | null | from vgn.exceptions import VgnGetError
from vgn.data_classes import *
import vgn.converter as conv
import datetime
import asyncio
import aiohttp
if __name__ == '__main__':
asyncio.run(main())
| 41.670103 | 120 | 0.599332 | from vgn.exceptions import VgnGetError
from vgn.data_classes import *
import vgn.converter as conv
import datetime
import asyncio
import aiohttp
class VGNClient:
async def __aenter__(self):
self._client_session = aiohttp.ClientSession()
return self
async def __aexit__(self, *args, **kwargs):
await self._client_session.__aexit__(*args, **kwargs)
@staticmethod
def _url(path):
return 'https://start.vag.de/dm/api/v1/' + path
async def _get(self, query) -> dict:
async with self._client_session.get(query) as resp:
if resp.status == 200:
return await resp.json()
else:
raise VgnGetError(f'Could not resolve query {query}. Returned {resp.status}')
async def api_version(self) -> str:
""" Version info from the VGN REST-API."""
query = self._url('haltestellen/VGN/location?lon=0&lat=0')
return (await self._get(query)).get('Metadata').get('Version')
async def all_stations(self) -> List[Station]:
""" List of all stations.
Returns:
list: List of stations for the VGN transport association.
"""
query = self._url(f'haltestellen/VGN')
return conv.to_stations((await self._get(query)).get('Haltestellen'))
async def stations(self, station_name: str) -> List[Station]:
""" List of stations for the specified station name.
Args:
station_name: Name of a station.
Returns:
list: List of station objects for the given stop_name.
"""
query = self._url(f'haltestellen/VGN?name={station_name}') if station_name else _url(f'haltestellen/VGN')
return conv.to_stations((await self._get(query)).get('Haltestellen'))
async def nearby_stations(self, location: Coordinates, radius: int = 1000) -> List[Station]:
""" List stops close to a given location.
Args:
location: Search for stations close to this location.
radius (optional): Radius for search in meter
Returns:
list: List of station objects in radius of the given location.
"""
query = self._url(f'haltestellen/VGN/location?lon={location.longitude}&lat={location.latitude}&radius={radius}')
return conv.to_stations((await self._get(query)).get('Haltestellen'))
async def station_additional_information(self, stop_id: int) -> List[str]:
""" List of information text strings for a given stop.
Args:
stop_id (optional): The VGN stop identifier number.
Returns:
list: List of strings containing additional information for the given station.
"""
query = self._url(f'abfahrten/VGN/{stop_id}')
return (await self._get(query)).get('Sonderinformationen')
async def departure_schedule(self,
stop_id: int,
transport_type: List[TransportType] = [TransportType.BUS, TransportType.TRAM,
TransportType.SUBWAY],
timespan: int = 10,
timedelay: int = 5,
limit_result: int = 100) -> List[Departure]:
""" Departures for a specific stop.
Args:
stop_id: The VGN stop identifier number.
transport_type: Information shall only be given for the defined transport means of transportation.
limit_result (optional): Limit amount of returned results. Default limit is 100.
timedelay (optional): Time delay for the request in minutes.
timespan (optional): Time window for the query in minutes.
Returns:
list: List of departures for the given station.
"""
if limit_result <= 0:
limit_result = 100
transport_type_str = ','.join(list(map(lambda x: x.value, transport_type)))
query = self._url(
f'abfahrten/VGN/{stop_id}'
f'?product={transport_type_str}'
f'×pan={timespan}'
f'&timedelay={timedelay}'
f'&limitcount={limit_result}')
return conv.to_departures((await self._get(query)).get('Abfahrten'))
async def departure_schedule_for_line(self,
stop_id: int,
line_name: str,
timespan: int = 10,
timedelay: int = 5,
limit_result: int = 100) -> List[Departure]:
""" List of Departures for a specific stop and line.
Args:
line_name: Name of the line. For example 'U2' for the underground line two.
stop_id: The VGN stop identifier number.
limit_result (optional): Limit amount of returned results. Default limit is 100.
timedelay (optional): Time delay for the request in minutes.
timespan (optional): Time window for the query in minutes.
Returns:
list: List of departures for the given station and line.
"""
if limit_result <= 0:
limit_result = 100
query = self._url(f'abfahrten/VGN/{stop_id}/{line_name}'
f'?timespan={timespan}'
f'&timedelay={timedelay}'
f'&limitcount={limit_result}')
return conv.to_departures((await self._get(query)).get('Abfahrten'))
async def rides(self, transport_type: TransportType, time_span: int = 60) -> List[Ride]:
""" All running and starting rides for a given transport type within a given time frame (default 60 minutes)
Args:
transport_type: Transportation type. For example Bus.
time_span (optional): Time window in minutes (default 60 minutes)
Returns:
list: List of rides for the given transport type within the time window.
"""
query = self._url(f'fahrten/{transport_type.value}?timespan={time_span}')
return conv.to_rides((await self._get(query)).get('Fahrten'))
async def route(self, transport_type: TransportType, ride_id: int) -> Route:
""" Route for a given transport type and ride number for the current operating day
Args:
transport_type: Transportation type. For example Bus.
ride_id: Ride number for the given transportation type
Returns:
Route: The route for the given ride_number
"""
query = self._url(f'fahrten/{transport_type.value}/{ride_id}')
return conv.to_route((await self._get(query)))
async def route_for_day(self, transport_type: TransportType, ride_id: int, day: datetime.date) -> Route:
""" Route for a given transport type, ride number and operating day.
Args:
transport_type: Transportation type. For example Bus.
ride_id: Ride number for the given transportation type.
day: Operating day date for the request.
Returns:
Route: The route for the given ride_number on the requested day.
"""
query = self._url(f'fahrten/{transport_type.value}/{day}/{ride_id}')
return conv.to_route((await self._get(query)))
async def main():
async with VGNClient() as vgn_client:
res = await asyncio.gather(
vgn_client.api_version(),
vgn_client.all_stations(),
vgn_client.departure_schedule(704),
vgn_client.departure_schedule_for_line(704, "U2"),
vgn_client.rides(TransportType.BUS, 30),
)
print(f'Api version: {res[0]}')
print(f'Stations in nbg: {str(len(res[1]))}')
print(f'Departures at plaerrer in nbg: {res[2]}')
print(f'Departures of underground line 2 at plaerrer in nbg: {res[3]}')
print(f'Bus departures in the next 30 minutes: {res[4]}')
if __name__ == '__main__':
asyncio.run(main())
| 1,081 | 6,757 | 46 |
9943bc8976a4b591060e0a0646265c5cdbf12a4e | 88 | py | Python | src/dcat_ap_no_validator_service/service/__init__.py | Informasjonsforvaltning/dcat-ap-no-validator-service | 45c1f762429427591840a15ece4b70617d2f3c8a | [
"Apache-2.0"
] | 1 | 2021-02-08T09:36:08.000Z | 2021-02-08T09:36:08.000Z | src/dcat_ap_no_validator_service/service/__init__.py | Informasjonsforvaltning/dcat-ap-no-validator-service | 45c1f762429427591840a15ece4b70617d2f3c8a | [
"Apache-2.0"
] | 63 | 2020-11-12T13:54:40.000Z | 2022-03-14T12:04:31.000Z | src/dcat_ap_no_validator_service/service/__init__.py | Informasjonsforvaltning/dcat-ap-no-validator-service | 45c1f762429427591840a15ece4b70617d2f3c8a | [
"Apache-2.0"
] | null | null | null | """Package for all services."""
from .validator_service import Config, ValidatorService
| 29.333333 | 55 | 0.795455 | """Package for all services."""
from .validator_service import Config, ValidatorService
| 0 | 0 | 0 |
2c22cabaf6d13aa9ba7757a44da8dd804f3e8ff6 | 1,614 | py | Python | rls/utils/sundry_utils.py | StepNeverStop/RLs | 25cc97c96cbb19fe859c9387b7547cbada2c89f2 | [
"Apache-2.0"
] | 371 | 2019-04-26T00:37:33.000Z | 2022-03-31T07:33:12.000Z | rls/utils/sundry_utils.py | BlueFisher/RLs | 25cc97c96cbb19fe859c9387b7547cbada2c89f2 | [
"Apache-2.0"
] | 47 | 2019-07-21T11:51:57.000Z | 2021-08-31T08:45:22.000Z | rls/utils/sundry_utils.py | BlueFisher/RLs | 25cc97c96cbb19fe859c9387b7547cbada2c89f2 | [
"Apache-2.0"
] | 102 | 2019-06-29T13:11:15.000Z | 2022-03-28T13:51:04.000Z | #!/usr/bin/env python3
# encoding: utf-8
import os
import random
from typing import NoReturn
import numpy as np
import torch as th
from rls.utils.display import colorize
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
def check_or_create(dicpath: str, name: str = '') -> NoReturn:
"""
check dictionary whether existing, if not then create it.
"""
if not os.path.exists(dicpath):
os.makedirs(dicpath)
logger.info(colorize(
''.join([f'create {name} directionary :', dicpath]), color='green'))
def set_global_seeds(seed: int) -> NoReturn:
"""
Set the random seed of pytorch, numpy and random.
params:
seed: an integer refers to the random seed
"""
th.manual_seed(seed)
th.cuda.manual_seed_all(seed)
th.backends.cudnn.deterministic = True
np.random.seed(seed)
random.seed(seed)
| 23.391304 | 80 | 0.600372 | #!/usr/bin/env python3
# encoding: utf-8
import os
import random
from typing import NoReturn
import numpy as np
import torch as th
from rls.utils.display import colorize
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
def check_or_create(dicpath: str, name: str = '') -> NoReturn:
"""
check dictionary whether existing, if not then create it.
"""
if not os.path.exists(dicpath):
os.makedirs(dicpath)
logger.info(colorize(
''.join([f'create {name} directionary :', dicpath]), color='green'))
def set_global_seeds(seed: int) -> NoReturn:
"""
Set the random seed of pytorch, numpy and random.
params:
seed: an integer refers to the random seed
"""
th.manual_seed(seed)
th.cuda.manual_seed_all(seed)
th.backends.cudnn.deterministic = True
np.random.seed(seed)
random.seed(seed)
class LinearAnnealing:
def __init__(self, x: float, x_: float, end: int):
"""
Params:
x: start value
x_: end value
end: annealing time
"""
assert end != 0, 'the time steps for annealing must larger than 0.'
self.x = x
self.x_ = x_
self.interval = (x_ - x) / end
def __call__(self, current: int) -> float:
"""
TODO: Annotation
"""
return max(self.x + self.interval * current, self.x_)
def nested_tuple(x):
ret = []
for i in x:
if isinstance(i, (tuple, list)):
ret.extend(nested_tuple(i))
else:
ret.append(i)
return tuple(ret)
| 171 | 496 | 46 |
9be4a1d169d9ecdf8d1741de65b4d8927bfa3dd7 | 5,685 | py | Python | mmdet/models/detectors/seq_ssd_dnt.py | ktw361/mmdetection_impl | d09f5320290699cdae0817c9f6a52e8e07c1e098 | [
"Apache-2.0"
] | 2 | 2021-05-09T15:49:35.000Z | 2021-05-22T02:16:14.000Z | mmdet/models/detectors/seq_ssd_dnt.py | ktw361/mmdetection_impl | d09f5320290699cdae0817c9f6a52e8e07c1e098 | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/seq_ssd_dnt.py | ktw361/mmdetection_impl | d09f5320290699cdae0817c9f6a52e8e07c1e098 | [
"Apache-2.0"
] | 1 | 2021-05-09T15:49:43.000Z | 2021-05-09T15:49:43.000Z | import torch.nn as nn
from mmdet.core import bbox2result
from .. import builder
from ..registry import DETECTORS
from .seq_base import SeqBaseDetector
@DETECTORS.register_module
| 37.156863 | 85 | 0.581003 | import torch.nn as nn
from mmdet.core import bbox2result
from .. import builder
from ..registry import DETECTORS
from .seq_base import SeqBaseDetector
@DETECTORS.register_module
class SeqSSDDnT(SeqBaseDetector):
def __init__(self,
backbone,
neck=None,
bbox_head=None,
tracking_head=None,
temporal_module=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SeqSSDDnT, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self.neck_first = True
if temporal_module is not None:
self.temporal_module = builder.build_temporal_module(
temporal_module)
if hasattr(self.temporal_module, 'neck_first'):
self.neck_first = self.temporal_module.neck_first
self.bbox_head = builder.build_head(bbox_head)
self.tracking_head = builder.build_head(tracking_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
super(SeqSSDDnT, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_temporal_module:
self.temporal_module.init_weights()
self.bbox_head.init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck and self.neck_first:
x = self.neck(x)
return x
def forward_dummy(self, img):
# TODO zhifan
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_train(self,
img,
img_metas,
seq_len,
gt_bboxes,
gt_labels,
gt_trackids,
gt_bboxes_ignore=None):
batch = img.size(0) // seq_len
raw_x = self.extract_feat(img) # [[2*B, c1, h1, w1]*{7, 14, 15, 19, &extra}]
x = raw_x[2:]
if self.with_temporal_module:
x_seq = [v.view([seq_len, batch, *v.shape[1:]])
for v in x]
x, _ = self.temporal_module(x_seq, in_dict=None, is_train=True)
if self.with_neck and not self.neck_first:
x = self.neck(x)
# Predice track
track_x_seq = [v.view([seq_len, batch, *v.shape[1:]])
for i, v in enumerate(raw_x) if i in (0, 1, 3)]
track_preds = self.tracking_head(track_x_seq)
outs = self.bbox_head(x)
# Some frames may have no annotation, disable calculation on them.
valid_gt_ind = [i for i, gt in enumerate(gt_bboxes) if len(gt) != 0]
new_outs = []
for out in outs:
new_out = [o[valid_gt_ind, ...].contiguous()
for o in out]
new_outs.append(new_out)
new_outs = tuple(new_outs)
gt_bboxes = [gt_bboxes[i] for i in valid_gt_ind]
gt_labels = [gt_labels[i] for i in valid_gt_ind]
gt_trackids = [gt_trackids[i] for i in valid_gt_ind]
img_metas = [img_metas[i] for i in valid_gt_ind]
loss_inputs = new_outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg)
losses = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
loss_track = self.tracking_head.loss(
seq_len,
outs[0], track_preds, gt_bboxes, gt_trackids,
img_metas, self.train_cfg)
losses.update(loss_track)
return losses
def temporal_test(self, img, img_meta, seq_len, rescale=False):
x = self.extract_feat(img) # [[1*1, c1, h1, w1]*4]
x = x[2:]
out_dict = None
if self.with_temporal_module:
x_seq = [v.view([seq_len, 1, *v.shape[1:]])
for v in x]
x, out_dict = self.temporal_module(x_seq, in_dict=None, is_train=True)
if self.with_neck and not self.neck_first:
x = self.neck(x)
outs = self.bbox_head(x)
bbox_inputs = outs + (img_meta, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results, out_dict
def simple_test(self, img, img_meta, in_dict=None, rescale=False):
x = self.extract_feat(img) # [[1*1, c1, h1, w1]*5]
out_dict = None
if self.with_temporal_module:
# During test, no reshape & permute
x, out_dict = self.temporal_module(x, in_dict=in_dict,
is_train=False)
if self.with_neck and not self.neck_first:
x = self.neck(x)
outs = self.bbox_head(x)
bbox_inputs = outs + (img_meta, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results[0], out_dict
def aug_test(self, imgs, img_metas, rescale=False):
raise NotImplementedError
| 5,256 | 12 | 238 |
48a811bb993c0019431ed5bb84886410d9075233 | 831 | py | Python | logparser.py | ncos/gitparser | 27f814f8bb8ddfdde174ff1277c38f1cad56b12f | [
"MIT"
] | null | null | null | logparser.py | ncos/gitparser | 27f814f8bb8ddfdde174ff1277c38f1cad56b12f | [
"MIT"
] | null | null | null | logparser.py | ncos/gitparser | 27f814f8bb8ddfdde174ff1277c38f1cad56b12f | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import operator
labels = {}
f = open('log.txt', 'r')
for line in f.readlines():
s = line.strip().split('| ')
name = s[0].split(' ')[0].strip()
if len(s) != 2:
safe_add(name, 'other')
continue
lbls = s[1].strip().split(';')
for label in lbls:
if len(label) < 1:
continue
safe_add(name, label.strip())
f.close()
for label in labels.keys():
print "\n" + label
for name, num in sorted(labels[label].items(), key=operator.itemgetter(1), reverse=True):
print "\t" + name + ": " + str(num)
| 18.466667 | 93 | 0.535499 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import operator
labels = {}
def safe_add(user, label):
if label not in labels.keys():
labels[label] = {}
if user not in labels[label].keys():
labels[label][user] = 1
return
labels[label][user] += 1
f = open('log.txt', 'r')
for line in f.readlines():
s = line.strip().split('| ')
name = s[0].split(' ')[0].strip()
if len(s) != 2:
safe_add(name, 'other')
continue
lbls = s[1].strip().split(';')
for label in lbls:
if len(label) < 1:
continue
safe_add(name, label.strip())
f.close()
for label in labels.keys():
print "\n" + label
for name, num in sorted(labels[label].items(), key=operator.itemgetter(1), reverse=True):
print "\t" + name + ": " + str(num)
| 186 | 0 | 23 |
c938ec918fe4df83bcdb7352aaff7d9528da5f95 | 1,192 | py | Python | cpo/lib/fyre/data/quick_burn_max_hours_data.py | IBM/cloud-pak-operations-cli | 45ddcefb4302801c9a833d1359ea4d740c384556 | [
"Apache-2.0"
] | 7 | 2021-12-07T09:16:24.000Z | 2022-03-08T12:38:54.000Z | cpo/lib/fyre/data/quick_burn_max_hours_data.py | IBM/cloud-pak-operations-cli | 45ddcefb4302801c9a833d1359ea4d740c384556 | [
"Apache-2.0"
] | 3 | 2021-11-26T09:43:03.000Z | 2021-12-14T08:04:53.000Z | cpo/lib/fyre/data/quick_burn_max_hours_data.py | IBM/cloud-pak-operations-cli | 45ddcefb4302801c9a833d1359ea4d740c384556 | [
"Apache-2.0"
] | 1 | 2022-03-10T07:14:49.000Z | 2022-03-10T07:14:49.000Z | # Copyright 2021, 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import click
from cpo.lib.fyre.types.ocp_quick_burn_max_hours_response import OCPQuickBurnMaxHoursResponse
| 38.451613 | 104 | 0.766779 | # Copyright 2021, 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import click
from cpo.lib.fyre.types.ocp_quick_burn_max_hours_response import OCPQuickBurnMaxHoursResponse
class QuickBurnMaxHoursData:
def __init__(self, ocp_quick_burn_max_hours_response: OCPQuickBurnMaxHoursResponse):
self._ocp_quick_burn_max_hours_response = ocp_quick_burn_max_hours_response
def format(self, use_json: bool = False):
if use_json:
click.echo(json.dumps(self._ocp_quick_burn_max_hours_response, indent="\t", sort_keys=True))
else:
click.echo(self._ocp_quick_burn_max_hours_response["quick_burn_max_hours"])
| 395 | 7 | 76 |
e1ef5151fe0aa8273bff8cce8b0bea40347a50f6 | 4,243 | py | Python | data/goslar/myinv.py | LIAG-S2/SAEM | 17af3a5015f37adb50f8b47086214aed90d1c4e5 | [
"Apache-2.0"
] | null | null | null | data/goslar/myinv.py | LIAG-S2/SAEM | 17af3a5015f37adb50f8b47086214aed90d1c4e5 | [
"Apache-2.0"
] | null | null | null | data/goslar/myinv.py | LIAG-S2/SAEM | 17af3a5015f37adb50f8b47086214aed90d1c4e5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 7 12:21:17 2017
@author: Rochlitz.R
"""
import matplotlib.pyplot as plt
import numpy as np
import pygimli as pg
from custEM.meshgen.invmesh_tools import PrismWorld
from custEM.meshgen import meshgen_utils as mu
from custEM.inv.inv_utils import MultiFWD
xt, zt = np.loadtxt("topo.txt", unpack=True)
zt = np.abs(zt)
# %% define mesh paramters
dataname = 'GOS_raw_inversion_ByBz_B_Tx123.npz'
invmod = dataname + '_l40'
invmesh = 'Prisms'
dataR, dataI = [], []
errorR, errorI = [], []
with np.load(dataname+".npz", allow_pickle=True) as ALL:
freqs = list(ALL["freqs"])
tx = ALL["tx"]
print(tx)
DATA = ALL["DATA"]
rxs = [data["rx"] for data in DATA]
# tx_ids = [[int(txi) for txi in data["tx_ids"]] for data in DATA]
tx_ids = [data["tx_ids"] for data in DATA]
cmps = [data["cmp"] for data in DATA]
for i, data in enumerate(DATA):
dataR = np.concatenate([dataR, data["dataR"].ravel()])
dataI = np.concatenate([dataI, data["dataI"].ravel()])
errorR = np.concatenate([errorR, data["errorR"].ravel()])
errorI = np.concatenate([errorI, data["errorI"].ravel()])
skip_domains = [0, 1]
sig_bg = 3e-3
refm_size = 1.
rxs_resolved = mu.resolve_rx_overlaps(rxs, refm_size)
rx_tri = mu.refine_rx(rxs_resolved, refm_size, 30.)
bound = 200
minrx = min([min(data["rx"][:, 0]) for data in DATA])
maxrx = max([max(data["rx"][:, 0]) for data in DATA])
##############################################################################
# %% generate 2.5D prism inversion mesh
P = PrismWorld(name=invmesh,
x_extent=[minrx-bound, maxrx+bound],
x_reduction=500.,
y_depth=1500.,
z_depth=1200.,
n_prisms=200,
tx=[txi for txi in tx],
orthogonal_tx=[True] * len(tx),
#surface_rx=rx_tri,
prism_area=50000,
prism_quality=34,
x_dim=[-1e5, 1e5],
y_dim=[-1e5, 1e5],
z_dim=[-1e5, 1e5],
topo=topo_f,
)
P.PrismWorld.add_paths(rx_tri)
for rx in rxs:
P.PrismWorld.add_rx(rx)
# %%
P.PrismWorld.call_tetgen(tet_param='-pDq1.3aA', print_infos=False)
pgmesh = pg.load('meshes/mesh_create/' + invmesh + '.bms')
# pgmesh = P.xzmesh # is 3D
if 0:
ax, cb = pg.show(pgmesh)
for rx in rxs:
ax.plot(rx[:, 0], rx[:, 2], ".")
for txi in tx:
for txii in txi:
print(txii)
ax.plot(txii[0], txii[2], "mv")
# %% run inversion
mask = np.isfinite(dataR+dataI+errorR+errorI)
datavec = np.hstack((dataR[mask], dataI[mask]))
errorvec = np.hstack((errorR[mask], errorI[mask]))
relerror = np.abs(errorvec/datavec)
fop = MultiFWD(invmod, invmesh, pgmesh, list(freqs), cmps, tx_ids,
skip_domains, sig_bg, n_cores=140, ini_data=datavec,
data_mask=mask)
fop.setRegionProperties("*", limits=[1e-4, 1])
# set up inv
inv = pg.Inversion(verbose=True) # , debug=True)
inv.setForwardOperator(fop)
C = pg.matrix.GeostatisticConstraintsMatrix(mesh=pgmesh, I=[500, 80])
# fop.setConstraints(C)
dT = pg.trans.TransSymLog(1e-3)
inv.dataTrans = dT
# run inversion
invmodel = inv.run(datavec, relerror, lam=40, # zWeight=0.3,
startModel=sig_bg, maxIter=10,
verbose=True, robustData=True)
# %% save results
np.save(fop.inv_dir + 'inv_model.npy', invmodel)
res = 1. / invmodel
pgmesh['sigma'] = invmodel # np.load(fop.inv_dir + 'inv_model.npy')
pgmesh['res'] = res # np.load(fop.inv_dir + 'inv_model.npy')
# pgmesh.setDimension(3)
# pgmesh.swapCoordinates(1, 2)
pgmesh.exportVTK(fop.inv_dir + invmod + '_final_invmodel.vtk')
# %% plot inv model
fig, ax = plt.subplots(figsize=(14, 8))
ax2, cbar = pg.show(pgmesh, res, ax=ax, cMap="Spectral", colorBar=True,
logScale=True, cMin=5, cMax=5000,
xlabel='x [m]', ylabel='z [m]',
label=r'$\rho$ [$\Omega$m]', pad=0.8)
# cbar.ax.set_xlabel(r'$\sigma$ [S/m]', labelpad=4)
# ax.figure.savefig("out.pdf")
np.save(invmod+"-response.npy", inv.response)
fop.jacobian().save("jacobian.bmat")
| 31.198529 | 78 | 0.602168 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 7 12:21:17 2017
@author: Rochlitz.R
"""
import matplotlib.pyplot as plt
import numpy as np
import pygimli as pg
from custEM.meshgen.invmesh_tools import PrismWorld
from custEM.meshgen import meshgen_utils as mu
from custEM.inv.inv_utils import MultiFWD
xt, zt = np.loadtxt("topo.txt", unpack=True)
zt = np.abs(zt)
def topo_f(x, y=None):
return np.interp(x, xt, zt)
# %% define mesh paramters
dataname = 'GOS_raw_inversion_ByBz_B_Tx123.npz'
invmod = dataname + '_l40'
invmesh = 'Prisms'
dataR, dataI = [], []
errorR, errorI = [], []
with np.load(dataname+".npz", allow_pickle=True) as ALL:
freqs = list(ALL["freqs"])
tx = ALL["tx"]
print(tx)
DATA = ALL["DATA"]
rxs = [data["rx"] for data in DATA]
# tx_ids = [[int(txi) for txi in data["tx_ids"]] for data in DATA]
tx_ids = [data["tx_ids"] for data in DATA]
cmps = [data["cmp"] for data in DATA]
for i, data in enumerate(DATA):
dataR = np.concatenate([dataR, data["dataR"].ravel()])
dataI = np.concatenate([dataI, data["dataI"].ravel()])
errorR = np.concatenate([errorR, data["errorR"].ravel()])
errorI = np.concatenate([errorI, data["errorI"].ravel()])
skip_domains = [0, 1]
sig_bg = 3e-3
refm_size = 1.
rxs_resolved = mu.resolve_rx_overlaps(rxs, refm_size)
rx_tri = mu.refine_rx(rxs_resolved, refm_size, 30.)
bound = 200
minrx = min([min(data["rx"][:, 0]) for data in DATA])
maxrx = max([max(data["rx"][:, 0]) for data in DATA])
##############################################################################
# %% generate 2.5D prism inversion mesh
P = PrismWorld(name=invmesh,
x_extent=[minrx-bound, maxrx+bound],
x_reduction=500.,
y_depth=1500.,
z_depth=1200.,
n_prisms=200,
tx=[txi for txi in tx],
orthogonal_tx=[True] * len(tx),
#surface_rx=rx_tri,
prism_area=50000,
prism_quality=34,
x_dim=[-1e5, 1e5],
y_dim=[-1e5, 1e5],
z_dim=[-1e5, 1e5],
topo=topo_f,
)
P.PrismWorld.add_paths(rx_tri)
for rx in rxs:
P.PrismWorld.add_rx(rx)
# %%
P.PrismWorld.call_tetgen(tet_param='-pDq1.3aA', print_infos=False)
pgmesh = pg.load('meshes/mesh_create/' + invmesh + '.bms')
# pgmesh = P.xzmesh # is 3D
if 0:
ax, cb = pg.show(pgmesh)
for rx in rxs:
ax.plot(rx[:, 0], rx[:, 2], ".")
for txi in tx:
for txii in txi:
print(txii)
ax.plot(txii[0], txii[2], "mv")
# %% run inversion
mask = np.isfinite(dataR+dataI+errorR+errorI)
datavec = np.hstack((dataR[mask], dataI[mask]))
errorvec = np.hstack((errorR[mask], errorI[mask]))
relerror = np.abs(errorvec/datavec)
fop = MultiFWD(invmod, invmesh, pgmesh, list(freqs), cmps, tx_ids,
skip_domains, sig_bg, n_cores=140, ini_data=datavec,
data_mask=mask)
fop.setRegionProperties("*", limits=[1e-4, 1])
# set up inv
inv = pg.Inversion(verbose=True) # , debug=True)
inv.setForwardOperator(fop)
C = pg.matrix.GeostatisticConstraintsMatrix(mesh=pgmesh, I=[500, 80])
# fop.setConstraints(C)
dT = pg.trans.TransSymLog(1e-3)
inv.dataTrans = dT
# run inversion
invmodel = inv.run(datavec, relerror, lam=40, # zWeight=0.3,
startModel=sig_bg, maxIter=10,
verbose=True, robustData=True)
# %% save results
np.save(fop.inv_dir + 'inv_model.npy', invmodel)
res = 1. / invmodel
pgmesh['sigma'] = invmodel # np.load(fop.inv_dir + 'inv_model.npy')
pgmesh['res'] = res # np.load(fop.inv_dir + 'inv_model.npy')
# pgmesh.setDimension(3)
# pgmesh.swapCoordinates(1, 2)
pgmesh.exportVTK(fop.inv_dir + invmod + '_final_invmodel.vtk')
# %% plot inv model
fig, ax = plt.subplots(figsize=(14, 8))
ax2, cbar = pg.show(pgmesh, res, ax=ax, cMap="Spectral", colorBar=True,
logScale=True, cMin=5, cMax=5000,
xlabel='x [m]', ylabel='z [m]',
label=r'$\rho$ [$\Omega$m]', pad=0.8)
# cbar.ax.set_xlabel(r'$\sigma$ [S/m]', labelpad=4)
# ax.figure.savefig("out.pdf")
np.save(invmod+"-response.npy", inv.response)
fop.jacobian().save("jacobian.bmat")
| 33 | 0 | 23 |
2ded969c18fb89b666664bb84245401e24294dd0 | 6,837 | py | Python | agent/tests/workload_managers/slurm/test_slurmctld_prolog.py | omnivector-solutions/license-manager | 9eb1e4569d692aef83a2388096e7413bc010be61 | [
"MIT"
] | 2 | 2020-11-15T22:54:39.000Z | 2022-02-15T07:58:55.000Z | agent/tests/workload_managers/slurm/test_slurmctld_prolog.py | omnivector-solutions/license-manager | 9eb1e4569d692aef83a2388096e7413bc010be61 | [
"MIT"
] | 2 | 2022-02-18T19:36:45.000Z | 2022-03-16T23:07:44.000Z | agent/tests/workload_managers/slurm/test_slurmctld_prolog.py | omnivector-solutions/license-manager | 9eb1e4569d692aef83a2388096e7413bc010be61 | [
"MIT"
] | null | null | null | """
Test Prolog script.
"""
from unittest import mock
import pytest
from lm_agent.workload_managers.slurm.slurmctld_prolog import prolog as main
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.update_report")
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.update_report")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.make_booking_request")
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.settings")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.update_report")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.make_booking_request")
| 37.157609 | 94 | 0.774609 | """
Test Prolog script.
"""
from unittest import mock
import pytest
from lm_agent.workload_managers.slurm.slurmctld_prolog import prolog as main
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
async def test_main_error_in_get_required_licenses_for_job(
get_required_licenses_for_job_mock,
get_job_context_mock,
sys_mock,
):
get_job_context_mock.return_value = {
"job_id": "1",
"user_name": "user1",
"lead_host": "host1",
"job_licenses": "",
}
get_required_licenses_for_job_mock.side_effect = Exception
with pytest.raises(Exception):
await main()
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
async def test_main_error_in_get_config_from_backend(
get_config_from_backend_mock,
get_required_licenses_for_job_mock,
get_job_context_mock,
sys_mock,
):
get_job_context_mock.return_value = {
"job_id": "1",
"user_name": "user1",
"lead_host": "host1",
"job_licenses": "test.feature@flexlm:10",
}
bookings_mock = mock.MagicMock()
bookings_mock.product_feature = "test.feature"
bookings_mock.license_server_type = "flexlm"
bookings_mock.tokens = 10
get_required_licenses_for_job_mock.return_value = [bookings_mock]
get_config_from_backend_mock.side_effect = Exception
with pytest.raises(Exception):
await main()
get_required_licenses_for_job_mock.assert_called_once_with("test.feature@flexlm:10")
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.update_report")
async def test_main_error_in_reconcile(
update_report_mock,
get_config_from_backend_mock,
get_required_licenses_for_job_mock,
get_job_context_mock,
):
get_job_context_mock.return_value = {
"job_id": "1",
"user_name": "user1",
"lead_host": "host1",
"job_licenses": "test.feature@flexlm:10",
}
bookings_mock = mock.MagicMock()
bookings_mock.product_feature = "test.feature"
bookings_mock.license_server_type = "flexlm"
bookings_mock.tokens = 10
get_required_licenses_for_job_mock.return_value = [bookings_mock]
backend_return_mock = mock.MagicMock()
backend_return_mock.product = "test"
backend_return_mock.features = ["feature"]
get_config_from_backend_mock.return_value = [backend_return_mock]
update_report_mock.side_effect = Exception
with pytest.raises(Exception):
await main()
get_required_licenses_for_job_mock.assert_called_once_with("test.feature@flexlm:10")
get_config_from_backend_mock.assert_awaited_once()
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.update_report")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.make_booking_request")
async def test_main(
make_booking_request_mock,
update_report_mock,
get_config_from_backend_mock,
get_required_licenses_for_job_mock,
get_job_context_mock,
sys_mock,
):
get_job_context_mock.return_value = {
"job_id": "1",
"user_name": "user1",
"lead_host": "host1",
"cluster_name": "cluster1",
"job_licenses": "test.feature@flexlm:10",
}
bookings_mock = mock.MagicMock()
bookings_mock.product_feature = "test.feature"
bookings_mock.license_server_type = "flexlm"
bookings_mock.tokens = 10
get_required_licenses_for_job_mock.return_value = [bookings_mock]
backend_return_mock = mock.MagicMock()
backend_return_mock.product = "test"
backend_return_mock.features = ["feature"]
get_config_from_backend_mock.return_value = [backend_return_mock]
await main()
get_config_from_backend_mock.assert_awaited_once()
get_required_licenses_for_job_mock.assert_called_once_with("test.feature@flexlm:10")
update_report_mock.assert_awaited_once()
make_booking_request_mock.assert_awaited_once()
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.settings")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.update_report")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.make_booking_request")
async def test_main_without_triggering_reconciliation(
make_booking_request_mock,
update_report_mock,
get_config_from_backend_mock,
get_required_licenses_for_job_mock,
get_job_context_mock,
settings_mock,
sys_mock,
):
get_job_context_mock.return_value = {
"job_id": "1",
"user_name": "user1",
"lead_host": "host1",
"cluster_name": "cluster1",
"job_licenses": "test.feature@flexlm:10",
}
bookings_mock = mock.MagicMock()
bookings_mock.product_feature = "test.feature"
bookings_mock.license_server_type = "flexlm"
bookings_mock.tokens = 10
get_required_licenses_for_job_mock.return_value = [bookings_mock]
backend_return_mock = mock.MagicMock()
backend_return_mock.product = "test"
backend_return_mock.features = ["feature"]
get_config_from_backend_mock.return_value = [backend_return_mock]
settings_mock.USE_RECONCILE_IN_PROLOG_EPILOG = False
await main()
get_config_from_backend_mock.assert_awaited_once()
get_required_licenses_for_job_mock.assert_called_once_with("test.feature@flexlm:10")
make_booking_request_mock.assert_awaited_once()
update_report_mock.assert_not_called()
| 4,470 | 0 | 110 |
11a6fdbc062928222df6546d356adc6bc8f20004 | 1,231 | py | Python | blogapp/contexts.py | Vaiterius/Miniblog-App | 09ef0c93399454da04a55117f6b38f62ea65acb8 | [
"MIT"
] | null | null | null | blogapp/contexts.py | Vaiterius/Miniblog-App | 09ef0c93399454da04a55117f6b38f62ea65acb8 | [
"MIT"
] | null | null | null | blogapp/contexts.py | Vaiterius/Miniblog-App | 09ef0c93399454da04a55117f6b38f62ea65acb8 | [
"MIT"
] | null | null | null | """Context processors and other useful functions"""
from re import template
from flask import Blueprint, current_app as app
from datetime import datetime
from blogapp import fc
contexts_bp = Blueprint("contexts_bp", __name__)
@contexts_bp.app_context_processor
def datetime_processor():
"""Inject current date/time into each template before rendering"""
return dict(get_datetime=get_datetime)
@contexts_bp.app_context_processor
def form_constraints():
"""Inject form constraints into login/signup fields"""
return {
"min_name_length": fc["min_name_length"],
"max_name_length": fc["max_name_length"],
"min_username_length": fc["min_username_length"],
"max_username_length": fc["max_username_length"],
"min_pass_length": fc["min_pass_length"],
}
@contexts_bp.after_request
def after_request(response):
"""Ensure responses aren't cached"""
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response | 30.775 | 77 | 0.708367 | """Context processors and other useful functions"""
from re import template
from flask import Blueprint, current_app as app
from datetime import datetime
from blogapp import fc
contexts_bp = Blueprint("contexts_bp", __name__)
@contexts_bp.app_context_processor
def datetime_processor():
"""Inject current date/time into each template before rendering"""
def get_datetime(time="default"):
if time == "year":
return str(datetime.now().year)
return str(datetime.now())
return dict(get_datetime=get_datetime)
@contexts_bp.app_context_processor
def form_constraints():
"""Inject form constraints into login/signup fields"""
return {
"min_name_length": fc["min_name_length"],
"max_name_length": fc["max_name_length"],
"min_username_length": fc["min_username_length"],
"max_username_length": fc["max_username_length"],
"min_pass_length": fc["min_pass_length"],
}
@contexts_bp.after_request
def after_request(response):
"""Ensure responses aren't cached"""
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response | 118 | 0 | 26 |
9981d08b07ef247bb97b93765a73202f94d5ed49 | 311 | py | Python | setup.py | ozars/pip-subdir-example | 00e36374d4efc6cda582979a27afa269e374e038 | [
"CC0-1.0"
] | null | null | null | setup.py | ozars/pip-subdir-example | 00e36374d4efc6cda582979a27afa269e374e038 | [
"CC0-1.0"
] | null | null | null | setup.py | ozars/pip-subdir-example | 00e36374d4efc6cda582979a27afa269e374e038 | [
"CC0-1.0"
] | null | null | null | from setuptools import setup
from pathlib import Path
subpackage_path = (Path(__file__).parent / "deps" / "subpackage").resolve()
setup(
name="mainpackage",
version="0.1",
packages="mainpackage",
install_requires=[
f"subpackage @ git+file://{subpackage_path}#subpackage-0.1",
],
)
| 22.214286 | 75 | 0.672026 | from setuptools import setup
from pathlib import Path
subpackage_path = (Path(__file__).parent / "deps" / "subpackage").resolve()
setup(
name="mainpackage",
version="0.1",
packages="mainpackage",
install_requires=[
f"subpackage @ git+file://{subpackage_path}#subpackage-0.1",
],
)
| 0 | 0 | 0 |
90d681cae5c2dce94c7ea35d9f3d524b06814158 | 24,958 | py | Python | src/config/device-manager/device_manager/device_manager.py | codilime/contrail-controller-arch | e87a974950fc1bbdc2b834212dbdfee5e94008de | [
"Apache-2.0"
] | null | null | null | src/config/device-manager/device_manager/device_manager.py | codilime/contrail-controller-arch | e87a974950fc1bbdc2b834212dbdfee5e94008de | [
"Apache-2.0"
] | null | null | null | src/config/device-manager/device_manager/device_manager.py | codilime/contrail-controller-arch | e87a974950fc1bbdc2b834212dbdfee5e94008de | [
"Apache-2.0"
] | 1 | 2020-07-04T12:08:02.000Z | 2020-07-04T12:08:02.000Z | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of managing physical router configuration
"""
# Import kazoo.client before monkey patching
from cfgm_common.zkclient import ZookeeperClient
from gevent import monkey
monkey.patch_all()
from cfgm_common.vnc_kombu import VncKombuClient
import cgitb
import sys
import argparse
import requests
import ConfigParser
import socket
import time
from pprint import pformat
from pysandesh.sandesh_base import *
from pysandesh.sandesh_logger import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common.uve.virtual_network.ttypes import *
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, Module2NodeType, \
NodeTypeNames, INSTANCE_ID_DEFAULT
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
import discoveryclient.client as client
from cfgm_common.exceptions import ResourceExhaustionError
from vnc_api.vnc_api import VncApi
from cfgm_common.uve.nodeinfo.ttypes import NodeStatusUVE, \
NodeStatus
from db import DBBaseDM, BgpRouterDM, PhysicalRouterDM, PhysicalInterfaceDM,\
ServiceInstanceDM, LogicalInterfaceDM, VirtualMachineInterfaceDM, \
VirtualNetworkDM, RoutingInstanceDM, GlobalSystemConfigDM, \
GlobalVRouterConfigDM, FloatingIpDM, InstanceIpDM, DMCassandraDB, PortTupleDM
from physical_router_config import PushConfigState
from cfgm_common.dependency_tracker import DependencyTracker
from cfgm_common.utils import cgitb_hook
def parse_args(args_str):
'''
Eg. python device_manager.py --rabbit_server localhost
-- rabbit_port 5672
-- rabbit_user guest
-- rabbit_password guest
--cassandra_server_list 10.1.2.3:9160
--api_server_ip 10.1.2.3
--api_server_port 8082
--api_server_use_ssl False
--zk_server_ip 10.1.2.3
--zk_server_port 2181
--collectors 127.0.0.1:8086
--disc_server_ip 127.0.0.1
--disc_server_port 5998
--http_server_port 8090
--log_local
--log_level SYS_DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--cluster_id <testbed-name>
--repush_interval 15
--repush_max_interval 300
--push_delay_per_kb 0.01
--push_delay_max 100
--push_delay_enable True
[--reset_config]
'''
# Source any specified config/ini file
# Turn off help, so we all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file", action='append',
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'rabbit_server': 'localhost',
'rabbit_port': '5672',
'rabbit_user': 'guest',
'rabbit_password': 'guest',
'rabbit_vhost': None,
'rabbit_ha_mode': False,
'cassandra_server_list': '127.0.0.1:9160',
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'zk_server_ip': '127.0.0.1',
'zk_server_port': '2181',
'collectors': None,
'disc_server_ip': None,
'disc_server_port': None,
'http_server_port': '8096',
'log_local': False,
'log_level': SandeshLevel.SYS_DEBUG,
'log_category': '',
'log_file': Sandesh._DEFAULT_LOG_FILE,
'use_syslog': False,
'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY,
'cluster_id': '',
'repush_interval': '15',
'repush_max_interval': '600',
'push_delay_per_kb': '0.01',
'push_delay_max': '100',
'push_delay_enable': 'True',
'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(),
'rabbit_use_ssl': False,
'kombu_ssl_version': '',
'kombu_ssl_keyfile': '',
'kombu_ssl_certfile': '',
'kombu_ssl_ca_certs': '',
}
secopts = {
'use_certs': False,
'keyfile': '',
'certfile': '',
'ca_certs': '',
'ifmap_certauth_port': "8444",
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain',
}
cassandraopts = {
'cassandra_user': None,
'cassandra_password': None
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(args.conf_file)
defaults.update(dict(config.items("DEFAULTS")))
if ('SECURITY' in config.sections() and
'use_certs' in config.options('SECURITY')):
if config.getboolean('SECURITY', 'use_certs'):
secopts.update(dict(config.items("SECURITY")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
if 'CASSANDRA' in config.sections():
cassandraopts.update(dict(config.items('CASSANDRA')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(secopts)
defaults.update(ksopts)
defaults.update(cassandraopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--cassandra_server_list",
help="List of cassandra servers in IP Address:Port format",
nargs='+')
parser.add_argument(
"--reset_config", action="store_true",
help="Warning! Destroy previous configuration and start clean")
parser.add_argument("--api_server_ip",
help="IP address of API server")
parser.add_argument("--api_server_port",
help="Port of API server")
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument("--zk_server_ip",
help="IP address:port of zookeeper server")
parser.add_argument("--collectors",
help="List of VNC collectors in ip:port format",
nargs="+")
parser.add_argument("--disc_server_ip",
help="IP address of the discovery server")
parser.add_argument("--disc_server_port",
help="Port of the discovery server")
parser.add_argument("--http_server_port",
help="Port of local HTTP server")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--use_syslog", action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--admin_user",
help="Name of keystone admin user")
parser.add_argument("--admin_password",
help="Password of keystone admin user")
parser.add_argument("--admin_tenant_name",
help="Tenant name for keystone admin user")
parser.add_argument("--cluster_id",
help="Used for database keyspace separation")
parser.add_argument("--repush_interval",
help="time interval for config re push")
parser.add_argument("--repush_max_interval",
help="max time interval for config re push")
parser.add_argument("--push_delay_per_kb",
help="time delay between two successful commits per kb config size")
parser.add_argument("--push_delay_max",
help="max time delay between two successful commits")
parser.add_argument("--push_delay_enable",
help="enable delay between two successful commits")
parser.add_argument("--cassandra_user",
help="Cassandra user name")
parser.add_argument("--cassandra_password",
help="Cassandra password")
parser.add_argument("--sandesh_send_rate_limit", type=int,
help="Sandesh send rate limit in messages/sec")
args = parser.parse_args(remaining_argv)
if type(args.cassandra_server_list) is str:
args.cassandra_server_list = args.cassandra_server_list.split()
if type(args.collectors) is str:
args.collectors = args.collectors.split()
return args
# end parse_args
# end main
# end run_device_manager
# end server_main
if __name__ == '__main__':
server_main()
| 41.252893 | 92 | 0.577049 | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of managing physical router configuration
"""
# Import kazoo.client before monkey patching
from cfgm_common.zkclient import ZookeeperClient
from gevent import monkey
monkey.patch_all()
from cfgm_common.vnc_kombu import VncKombuClient
import cgitb
import sys
import argparse
import requests
import ConfigParser
import socket
import time
from pprint import pformat
from pysandesh.sandesh_base import *
from pysandesh.sandesh_logger import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common.uve.virtual_network.ttypes import *
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, Module2NodeType, \
NodeTypeNames, INSTANCE_ID_DEFAULT
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
import discoveryclient.client as client
from cfgm_common.exceptions import ResourceExhaustionError
from vnc_api.vnc_api import VncApi
from cfgm_common.uve.nodeinfo.ttypes import NodeStatusUVE, \
NodeStatus
from db import DBBaseDM, BgpRouterDM, PhysicalRouterDM, PhysicalInterfaceDM,\
ServiceInstanceDM, LogicalInterfaceDM, VirtualMachineInterfaceDM, \
VirtualNetworkDM, RoutingInstanceDM, GlobalSystemConfigDM, \
GlobalVRouterConfigDM, FloatingIpDM, InstanceIpDM, DMCassandraDB, PortTupleDM
from physical_router_config import PushConfigState
from cfgm_common.dependency_tracker import DependencyTracker
from cfgm_common.utils import cgitb_hook
class DeviceManager(object):
_REACTION_MAP = {
'physical_router': {
'self': ['bgp_router',
'physical_interface',
'logical_interface'],
'bgp_router': [],
'physical_interface': [],
'logical_interface': [],
'virtual_network': [],
'global_system_config': [],
},
'global_system_config': {
'self': ['physical_router'],
'physical_router': [],
},
'bgp_router': {
'self': ['bgp_router', 'physical_router'],
'bgp_router': ['physical_router'],
'physical_router': [],
},
'physical_interface': {
'self': ['physical_router',
'physical_interface',
'logical_interface'],
'physical_router': ['logical_interface'],
'logical_interface': ['physical_router'],
'physical_interface': ['physical_router'],
'virtual_machine_interface': ['physical_interface'],
},
'logical_interface': {
'self': ['physical_router',
'physical_interface',
'virtual_machine_interface'],
'physical_interface': ['virtual_machine_interface'],
'virtual_machine_interface': ['physical_router',
'physical_interface'],
'physical_router': ['virtual_machine_interface']
},
'virtual_machine_interface': {
'self': ['logical_interface',
'physical_interface',
'virtual_network',
'floating_ip',
'instance_ip',
'port_tuple'],
'logical_interface': ['virtual_network'],
'virtual_network': ['logical_interface'],
'floating_ip': ['virtual_network'],
'instance_ip': ['virtual_network'],
'routing_instance': ['port_tuple','physical_interface'],
'port_tuple': ['physical_interface']
},
'service_instance': {
'self': ['port_tuple'],
'port_tuple':[],
},
'port_tuple':{
'self':['virtual_machine_interface','service_instance'],
'service_instance':['virtual_machine_interface'],
'virtual_machine_interface':['service_instance']
},
'virtual_network': {
'self': ['physical_router',
'virtual_machine_interface'],
'routing_instance': ['physical_router',
'virtual_machine_interface'],
'physical_router': [],
'virtual_machine_interface': ['physical_router'],
},
'routing_instance': {
'self': ['routing_instance',
'virtual_network',
'virtual_machine_interface'],
'routing_instance': ['virtual_network',
'virtual_machine_interface'],
'virtual_network': []
},
'floating_ip': {
'self': ['virtual_machine_interface'],
'virtual_machine_interface': [],
},
'instance_ip': {
'self': ['virtual_machine_interface'],
'virtual_machine_interface': [],
},
}
def __init__(self, args=None):
self._args = args
# Initialize discovery client
self._disc = None
if self._args.disc_server_ip and self._args.disc_server_port:
self._disc = client.DiscoveryClient(
self._args.disc_server_ip,
self._args.disc_server_port,
ModuleNames[Module.DEVICE_MANAGER])
PushConfigState.set_repush_interval(int(self._args.repush_interval))
PushConfigState.set_repush_max_interval(int(self._args.repush_max_interval))
PushConfigState.set_push_delay_per_kb(float(self._args.push_delay_per_kb))
PushConfigState.set_push_delay_max(int(self._args.push_delay_max))
PushConfigState.set_push_delay_enable(bool(self._args.push_delay_enable))
self._sandesh = Sandesh()
# Reset the sandesh send rate limit value
if self._args.sandesh_send_rate_limit is not None:
SandeshSystem.set_sandesh_send_rate_limit(
self._args.sandesh_send_rate_limit)
module = Module.DEVICE_MANAGER
module_name = ModuleNames[module]
node_type = Module2NodeType[module]
node_type_name = NodeTypeNames[node_type]
self.table = "ObjectConfigNode"
instance_id = INSTANCE_ID_DEFAULT
hostname = socket.gethostname()
self._sandesh.init_generator(
module_name, hostname, node_type_name, instance_id,
self._args.collectors, 'to_bgp_context',
int(args.http_server_port),
['cfgm_common', 'device_manager.sandesh'], self._disc)
self._sandesh.set_logging_params(enable_local_log=args.log_local,
category=args.log_category,
level=args.log_level,
file=args.log_file,
enable_syslog=args.use_syslog,
syslog_facility=args.syslog_facility)
PhysicalRouterDM._sandesh = self._sandesh
ConnectionState.init(
self._sandesh, hostname, module_name, instance_id,
staticmethod(ConnectionState.get_process_state_cb),
NodeStatusUVE, NodeStatus, self.table)
# Retry till API server is up
connected = False
self.connection_state_update(ConnectionStatus.INIT)
while not connected:
try:
self._vnc_lib = VncApi(
args.admin_user, args.admin_password,
args.admin_tenant_name, args.api_server_ip,
args.api_server_port,
api_server_use_ssl=args.api_server_use_ssl)
connected = True
self.connection_state_update(ConnectionStatus.UP)
except requests.exceptions.ConnectionError as e:
# Update connection info
self.connection_state_update(ConnectionStatus.DOWN, str(e))
time.sleep(3)
except ResourceExhaustionError: # haproxy throws 503
time.sleep(3)
rabbit_servers = self._args.rabbit_server
rabbit_port = self._args.rabbit_port
rabbit_user = self._args.rabbit_user
rabbit_password = self._args.rabbit_password
rabbit_vhost = self._args.rabbit_vhost
rabbit_ha_mode = self._args.rabbit_ha_mode
self._db_resync_done = gevent.event.Event()
q_name = 'device_manager.%s' % (socket.gethostname())
self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port,
rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode,
q_name, self._vnc_subscribe_callback,
self.config_log, rabbit_use_ssl =
self._args.rabbit_use_ssl,
kombu_ssl_version =
self._args.kombu_ssl_version,
kombu_ssl_keyfile =
self._args.kombu_ssl_keyfile,
kombu_ssl_certfile =
self._args.kombu_ssl_certfile,
kombu_ssl_ca_certs =
self._args.kombu_ssl_ca_certs)
self._cassandra = DMCassandraDB.getInstance(self, _zookeeper_client)
DBBaseDM.init(self, self._sandesh.logger(), self._cassandra)
for obj in GlobalSystemConfigDM.list_obj():
GlobalSystemConfigDM.locate(obj['uuid'], obj)
for obj in GlobalVRouterConfigDM.list_obj():
GlobalVRouterConfigDM.locate(obj['uuid'], obj)
for obj in VirtualNetworkDM.list_obj():
vn = VirtualNetworkDM.locate(obj['uuid'], obj)
if vn is not None and vn.routing_instances is not None:
for ri_id in vn.routing_instances:
ri_obj = RoutingInstanceDM.locate(ri_id)
for obj in BgpRouterDM.list_obj():
BgpRouterDM.locate(obj['uuid'], obj)
pr_obj_list = PhysicalRouterDM.list_obj()
pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list])
self._cassandra.handle_pr_deletes(pr_uuid_set)
for obj in PortTupleDM.list_obj():
PortTupleDM.locate(obj['uuid'],obj)
for obj in pr_obj_list:
pr = PhysicalRouterDM.locate(obj['uuid'], obj)
li_set = pr.logical_interfaces
vmi_set = set()
for pi_id in pr.physical_interfaces:
pi = PhysicalInterfaceDM.locate(pi_id)
if pi:
li_set |= pi.logical_interfaces
vmi_set |= pi.virtual_machine_interfaces
for li_id in li_set:
li = LogicalInterfaceDM.locate(li_id)
if li and li.virtual_machine_interface:
vmi_set |= set([li.virtual_machine_interface])
for vmi_id in vmi_set:
vmi = VirtualMachineInterfaceDM.locate(vmi_id)
si_obj_list = ServiceInstanceDM.list_obj()
si_uuid_set = set([si_obj['uuid'] for si_obj in si_obj_list])
self._cassandra.handle_pnf_resource_deletes(si_uuid_set)
for obj in si_obj_list:
ServiceInstanceDM.locate(obj['uuid'], obj)
for obj in InstanceIpDM.list_obj():
InstanceIpDM.locate(obj['uuid'], obj)
for obj in FloatingIpDM.list_obj():
FloatingIpDM.locate(obj['uuid'], obj)
for vn in VirtualNetworkDM.values():
vn.update_instance_ip_map()
for pr in PhysicalRouterDM.values():
pr.set_config_state()
self._db_resync_done.set()
gevent.joinall(self._vnc_kombu.greenlets())
# end __init__
def connection_state_update(self, status, message=None):
ConnectionState.update(
conn_type=ConnType.APISERVER, name='ApiServer',
status=status, message=message or '',
server_addrs=['%s:%s' % (self._args.api_server_ip,
self._args.api_server_port)])
# end connection_state_update
def config_log(self, msg, level):
self._sandesh.logger().log(SandeshLogger.get_py_logger_level(level),
msg)
def _vnc_subscribe_callback(self, oper_info):
self._db_resync_done.wait()
dependency_tracker = None
try:
msg = "Notification Message: %s" % (pformat(oper_info))
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
obj_type = oper_info['type'].replace('-', '_')
obj_class = DBBaseDM.get_obj_type_map().get(obj_type)
if obj_class is None:
return
if oper_info['oper'] == 'CREATE':
obj_dict = oper_info['obj_dict']
obj_id = obj_dict['uuid']
obj = obj_class.locate(obj_id, obj_dict)
dependency_tracker = DependencyTracker(
DBBaseDM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
elif oper_info['oper'] == 'UPDATE':
obj_id = oper_info['uuid']
obj = obj_class.get(obj_id)
old_dt = None
if obj is not None:
old_dt = DependencyTracker(
DBBaseDM.get_obj_type_map(), self._REACTION_MAP)
old_dt.evaluate(obj_type, obj)
else:
obj = obj_class.locate(obj_id)
obj.update()
dependency_tracker = DependencyTracker(
DBBaseDM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
if old_dt:
for resource, ids in old_dt.resources.items():
if resource not in dependency_tracker.resources:
dependency_tracker.resources[resource] = ids
else:
dependency_tracker.resources[resource] = list(
set(dependency_tracker.resources[resource]) |
set(ids))
elif oper_info['oper'] == 'DELETE':
obj_id = oper_info['uuid']
obj = obj_class.get(obj_id)
if obj is None:
return
dependency_tracker = DependencyTracker(
DBBaseDM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
obj_class.delete(obj_id)
else:
# unknown operation
self.config_log('Unknown operation %s' % oper_info['oper'],
level=SandeshLevel.SYS_ERR)
return
if obj is None:
self.config_log('Error while accessing %s uuid %s' % (
obj_type, obj_id))
return
except Exception:
string_buf = cStringIO.StringIO()
cgitb_hook(file=string_buf, format="text")
self.config_log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR)
if not dependency_tracker:
return
for vn_id in dependency_tracker.resources.get('virtual_network', []):
vn = VirtualNetworkDM.get(vn_id)
if vn is not None:
vn.update_instance_ip_map()
for pr_id in dependency_tracker.resources.get('physical_router', []):
pr = PhysicalRouterDM.get(pr_id)
if pr is not None:
pr.set_config_state()
# end _vnc_subscribe_callback
def parse_args(args_str):
'''
Eg. python device_manager.py --rabbit_server localhost
-- rabbit_port 5672
-- rabbit_user guest
-- rabbit_password guest
--cassandra_server_list 10.1.2.3:9160
--api_server_ip 10.1.2.3
--api_server_port 8082
--api_server_use_ssl False
--zk_server_ip 10.1.2.3
--zk_server_port 2181
--collectors 127.0.0.1:8086
--disc_server_ip 127.0.0.1
--disc_server_port 5998
--http_server_port 8090
--log_local
--log_level SYS_DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--cluster_id <testbed-name>
--repush_interval 15
--repush_max_interval 300
--push_delay_per_kb 0.01
--push_delay_max 100
--push_delay_enable True
[--reset_config]
'''
# Source any specified config/ini file
# Turn off help, so we all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file", action='append',
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'rabbit_server': 'localhost',
'rabbit_port': '5672',
'rabbit_user': 'guest',
'rabbit_password': 'guest',
'rabbit_vhost': None,
'rabbit_ha_mode': False,
'cassandra_server_list': '127.0.0.1:9160',
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'zk_server_ip': '127.0.0.1',
'zk_server_port': '2181',
'collectors': None,
'disc_server_ip': None,
'disc_server_port': None,
'http_server_port': '8096',
'log_local': False,
'log_level': SandeshLevel.SYS_DEBUG,
'log_category': '',
'log_file': Sandesh._DEFAULT_LOG_FILE,
'use_syslog': False,
'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY,
'cluster_id': '',
'repush_interval': '15',
'repush_max_interval': '600',
'push_delay_per_kb': '0.01',
'push_delay_max': '100',
'push_delay_enable': 'True',
'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(),
'rabbit_use_ssl': False,
'kombu_ssl_version': '',
'kombu_ssl_keyfile': '',
'kombu_ssl_certfile': '',
'kombu_ssl_ca_certs': '',
}
secopts = {
'use_certs': False,
'keyfile': '',
'certfile': '',
'ca_certs': '',
'ifmap_certauth_port': "8444",
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain',
}
cassandraopts = {
'cassandra_user': None,
'cassandra_password': None
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(args.conf_file)
defaults.update(dict(config.items("DEFAULTS")))
if ('SECURITY' in config.sections() and
'use_certs' in config.options('SECURITY')):
if config.getboolean('SECURITY', 'use_certs'):
secopts.update(dict(config.items("SECURITY")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
if 'CASSANDRA' in config.sections():
cassandraopts.update(dict(config.items('CASSANDRA')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(secopts)
defaults.update(ksopts)
defaults.update(cassandraopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--cassandra_server_list",
help="List of cassandra servers in IP Address:Port format",
nargs='+')
parser.add_argument(
"--reset_config", action="store_true",
help="Warning! Destroy previous configuration and start clean")
parser.add_argument("--api_server_ip",
help="IP address of API server")
parser.add_argument("--api_server_port",
help="Port of API server")
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument("--zk_server_ip",
help="IP address:port of zookeeper server")
parser.add_argument("--collectors",
help="List of VNC collectors in ip:port format",
nargs="+")
parser.add_argument("--disc_server_ip",
help="IP address of the discovery server")
parser.add_argument("--disc_server_port",
help="Port of the discovery server")
parser.add_argument("--http_server_port",
help="Port of local HTTP server")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--use_syslog", action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--admin_user",
help="Name of keystone admin user")
parser.add_argument("--admin_password",
help="Password of keystone admin user")
parser.add_argument("--admin_tenant_name",
help="Tenant name for keystone admin user")
parser.add_argument("--cluster_id",
help="Used for database keyspace separation")
parser.add_argument("--repush_interval",
help="time interval for config re push")
parser.add_argument("--repush_max_interval",
help="max time interval for config re push")
parser.add_argument("--push_delay_per_kb",
help="time delay between two successful commits per kb config size")
parser.add_argument("--push_delay_max",
help="max time delay between two successful commits")
parser.add_argument("--push_delay_enable",
help="enable delay between two successful commits")
parser.add_argument("--cassandra_user",
help="Cassandra user name")
parser.add_argument("--cassandra_password",
help="Cassandra password")
parser.add_argument("--sandesh_send_rate_limit", type=int,
help="Sandesh send rate limit in messages/sec")
args = parser.parse_args(remaining_argv)
if type(args.cassandra_server_list) is str:
args.cassandra_server_list = args.cassandra_server_list.split()
if type(args.collectors) is str:
args.collectors = args.collectors.split()
return args
# end parse_args
def main(args_str=None):
global _zookeeper_client
if not args_str:
args_str = ' '.join(sys.argv[1:])
args = parse_args(args_str)
if args.cluster_id:
client_pfx = args.cluster_id + '-'
zk_path_pfx = args.cluster_id + '/'
else:
client_pfx = ''
zk_path_pfx = ''
_zookeeper_client = ZookeeperClient(client_pfx+"device-manager",
args.zk_server_ip)
_zookeeper_client.master_election(zk_path_pfx+"/device-manager",
os.getpid(), run_device_manager,
args)
# end main
def run_device_manager(args):
device_manager = DeviceManager(args)
# end run_device_manager
def server_main():
cgitb.enable(format='text')
main()
# end server_main
if __name__ == '__main__':
server_main()
| 11,439 | 3,556 | 92 |
4ed26aa71fa95806a78963b45e018b32b1b16344 | 2,599 | py | Python | contrib/python-sdk/test/test_job.py | hongpu-corp/pai | 23b812b1bae7bc9c9a14b88393411144dde8293e | [
"MIT"
] | 2 | 2020-08-27T05:21:14.000Z | 2020-09-29T14:34:09.000Z | contrib/python-sdk/test/test_job.py | hongpu-corp/pai | 23b812b1bae7bc9c9a14b88393411144dde8293e | [
"MIT"
] | 178 | 2020-12-10T19:40:01.000Z | 2022-02-27T09:53:15.000Z | contrib/python-sdk/test/test_job.py | hongpu-corp/pai | 23b812b1bae7bc9c9a14b88393411144dde8293e | [
"MIT"
] | 1 | 2020-11-17T08:19:48.000Z | 2020-11-17T08:19:48.000Z | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from basic_test import OrderedUnitTestCase, separated
from openpaisdk import to_screen
| 49.980769 | 128 | 0.711043 | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from basic_test import OrderedUnitTestCase, separated
from openpaisdk import to_screen
class TestJobResource(OrderedUnitTestCase):
def test_job_resource_parser(self):
from openpaisdk.job import JobResource
from openpaisdk import __flags__
self.assertDictEqual(__flags__.resources_requirements, JobResource(None).as_dict)
self.assertDictEqual(__flags__.resources_requirements, JobResource().as_dict)
self.assertDictEqual(__flags__.resources_requirements, JobResource({}).as_dict)
dic = dict(cpu=-1, gpu=-2, memoryMB=-1024)
for key, value in dic.items():
self.assertEqual(value, JobResource(dic).as_dict[key])
dic['mem'] = '-2gb'
self.assertEqual(-2048, JobResource(dic).as_dict["memoryMB"])
dic['mem'] = '-3g'
self.assertEqual(-3072, JobResource(dic).as_dict["memoryMB"])
dic['mem'] = 10240
self.assertEqual(10240, JobResource(dic).as_dict["memoryMB"])
self.assertEqual({"a": 1}, JobResource(dic).add_port("a").as_dict["ports"])
def test_job_resource_list(self):
from openpaisdk.job import JobResource
samples = {
"3,3,3g": dict(gpu=3, cpu=3, memoryMB=3072, ports={}),
"3,1, 2g": dict(gpu=3, cpu=1, memoryMB=2048, ports={}),
}
keys = list(samples.keys())
rets = JobResource.parse_list(keys)
for k, r in zip(keys, rets):
self.assertDictEqual(r, samples[k])
| 1,288 | 22 | 77 |
38731772eeac59e3d9a1d21beb19182530f71cbe | 4,912 | py | Python | runtool/tests/test_job_dispatching/test_job_dispatcher.py | Schmedu/gluon-ts-tools | bf05de991a23c740d61679dfa50a078c88518e14 | [
"Apache-2.0"
] | 1 | 2021-03-31T11:49:25.000Z | 2021-03-31T11:49:25.000Z | runtool/tests/test_job_dispatching/test_job_dispatcher.py | Schmedu/gluon-ts-tools | bf05de991a23c740d61679dfa50a078c88518e14 | [
"Apache-2.0"
] | 1 | 2021-04-01T12:28:02.000Z | 2021-04-01T12:28:02.000Z | runtool/tests/test_job_dispatching/test_job_dispatcher.py | Schmedu/gluon-ts-tools | bf05de991a23c740d61679dfa50a078c88518e14 | [
"Apache-2.0"
] | 2 | 2021-04-29T17:49:48.000Z | 2022-01-31T09:39:46.000Z | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
from pathlib import Path
from unittest.mock import Mock, patch
import botocore
import pytest
from runtool.dispatcher import JobDispatcher, group_by_instance_type
RESPONSE = {
"TrainingJobArn": "arn:aws:sagemaker:eu-west-1:012345678901:training-job/test-60a848663fa1",
"ResponseMetadata": {
"RequestId": "00924112-abcd-4aed-6d4d-28190dba0b68",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "00924112-abcd-4aed-6d4d-28190dba0b68",
"content-type": "application/x-amz-json-1.1",
"content-length": "92",
"date": "Tue, 16 Mar 2021 11:19:06 GMT",
},
"RetryAttempts": 0,
},
}
def client_side_effects(behaviour: list):
"""
Emulates the behaviour or a `boto3.Sagemaker.client` for
mocking purposes. The return value of this function is to
be used as a `unittest.mock().return_value`.
Takes a list of responses which will happen in sequence from
last to first.
If an item in the list is the string "busy", a
`ResourceLimitExceeded` exception is triggered.
If an item in the list is the string "throttle", a
`ResourceLimitExceeded` exception is triggered.
Otherwise the item will be returned.
>>> side_effects = client_side_effects([{}, "throttle", "busy"])
>>> side_effects()
Traceback (most recent call last):
...
botocore.exceptions.ClientError: An error occurred (ResourceLimitExceeded) when calling the operation: Unknown
>>> side_effects()
Traceback (most recent call last):
...
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the operation: Unknown
>>> side_effects()
{}
"""
return client_side_effect
@patch.object(JobDispatcher, "timeout_with_printer")
@patch("time.sleep", return_value=None)
@patch.object(JobDispatcher, "timeout_with_printer")
@patch("time.sleep", return_value=None)
@patch.object(JobDispatcher, "timeout_with_printer")
@patch("time.sleep", return_value=None)
| 32.966443 | 115 | 0.674878 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
from pathlib import Path
from unittest.mock import Mock, patch
import botocore
import pytest
from runtool.dispatcher import JobDispatcher, group_by_instance_type
RESPONSE = {
"TrainingJobArn": "arn:aws:sagemaker:eu-west-1:012345678901:training-job/test-60a848663fa1",
"ResponseMetadata": {
"RequestId": "00924112-abcd-4aed-6d4d-28190dba0b68",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "00924112-abcd-4aed-6d4d-28190dba0b68",
"content-type": "application/x-amz-json-1.1",
"content-length": "92",
"date": "Tue, 16 Mar 2021 11:19:06 GMT",
},
"RetryAttempts": 0,
},
}
def load_file(name):
path = Path(__file__).parent / "test_data" / f"{name}.json"
with open(path) as file_pointer:
return json.load(file_pointer)
def test_group_by_instance():
dispatcher = JobDispatcher(None)
job = lambda instance, name: {
"ResourceConfig": {"InstanceType": instance},
"name": name,
}
assert group_by_instance_type([job(1, 1), job(2, 2), job(2, 3)]) == [
[job(1, 1)],
[job(2, 2), job(2, 3)],
]
def client_side_effects(behaviour: list):
"""
Emulates the behaviour or a `boto3.Sagemaker.client` for
mocking purposes. The return value of this function is to
be used as a `unittest.mock().return_value`.
Takes a list of responses which will happen in sequence from
last to first.
If an item in the list is the string "busy", a
`ResourceLimitExceeded` exception is triggered.
If an item in the list is the string "throttle", a
`ResourceLimitExceeded` exception is triggered.
Otherwise the item will be returned.
>>> side_effects = client_side_effects([{}, "throttle", "busy"])
>>> side_effects()
Traceback (most recent call last):
...
botocore.exceptions.ClientError: An error occurred (ResourceLimitExceeded) when calling the operation: Unknown
>>> side_effects()
Traceback (most recent call last):
...
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the operation: Unknown
>>> side_effects()
{}
"""
def client_side_effect(*args, **kwargs):
current_result = behaviour.pop()
if current_result == "busy":
raise botocore.exceptions.ClientError(
{"Error": {"Code": "ResourceLimitExceeded"}}, ""
)
if current_result == "throttle":
raise botocore.exceptions.ClientError(
{"Error": {"Code": "ThrottlingException"}}, ""
)
return current_result
return client_side_effect
def run_dispatch(responses, file_name, expected=None):
client = Mock()
client.create_training_job.side_effect = client_side_effects(responses)
dispatcher = JobDispatcher(client)
assert dispatcher.dispatch(load_file(file_name)) == expected
return client
@patch.object(JobDispatcher, "timeout_with_printer")
@patch("time.sleep", return_value=None)
def test_dispatch_success(patched_sleep, mock_timeout_with_printer):
client = run_dispatch(
[RESPONSE] * 2,
"two_trainingjobs",
{
"test-106f177b0569": RESPONSE,
"test-823eea803d1e": RESPONSE,
},
)
assert client.create_training_job.call_count == 2
assert mock_timeout_with_printer.call_count == 0
@patch.object(JobDispatcher, "timeout_with_printer")
@patch("time.sleep", return_value=None)
def test_dispatch_resources_busy_and_throttled(
patched_sleep, mock_timeout_with_printer
):
client = run_dispatch(
[RESPONSE, "throttle", "busy", RESPONSE],
"two_trainingjobs",
{
"test-106f177b0569": RESPONSE,
"test-823eea803d1e": RESPONSE,
},
)
assert client.create_training_job.call_count == 4
assert mock_timeout_with_printer.call_count == 2
@patch.object(JobDispatcher, "timeout_with_printer")
@patch("time.sleep", return_value=None)
def test_dispatch_retry_limit_reached(
patched_sleep, mock_timeout_with_printer
):
with pytest.raises(botocore.exceptions.ClientError):
# default of 10 retries, thus 11 retries should cause exception
run_dispatch(["throttle"] * 11, "two_trainingjobs")
| 2,110 | 0 | 162 |
ad4b427cd9126b20fc2c02a86d6159a529a5e8ee | 3,148 | py | Python | examples/full_binary_tree.py | ccoakley/dbcbet | 0110f7f02c9cec21d550ac59750bf6db9ee8e095 | [
"MIT"
] | 2 | 2020-04-22T20:19:56.000Z | 2020-11-10T16:40:10.000Z | examples/full_binary_tree.py | ccoakley/dbcbet | 0110f7f02c9cec21d550ac59750bf6db9ee8e095 | [
"MIT"
] | null | null | null | examples/full_binary_tree.py | ccoakley/dbcbet | 0110f7f02c9cec21d550ac59750bf6db9ee8e095 | [
"MIT"
] | null | null | null | """A full binary tree example"""
from dbcbet.dbcbet import pre, post, inv, bet, finitize, finitize_method
from dbcbet.helpers import state, argument_types
@inv(full_tree_invariant)
if __name__ == "__main__":
bet(FullBinaryTree).run()
| 31.79798 | 92 | 0.640407 | """A full binary tree example"""
from dbcbet.dbcbet import pre, post, inv, bet, finitize, finitize_method
from dbcbet.helpers import state, argument_types
def full_tree_invariant(self):
return self._leaf() or self._full()
def is_full(self):
return self._full()
def is_leaf(self):
return self._leaf()
@inv(full_tree_invariant)
class FullBinaryTree(object):
@finitize_method([1,2,3,4])
def __init__(self, value):
self.value = value
self.left_subtree = None
self.right_subtree = None
def leaf(self):
return self._leaf()
def _leaf(self):
return self.left_subtree is None and self.right_subtree is None
def full(self):
return self._full()
def _full(self):
return self.left_subtree is not None and self.right_subtree is not None
def nodes(self):
return 1 if self.leaf() else self.left_subtree.nodes() + self.right_subtree.nodes()
@pre(argument_types("examples.FullBinaryTree"))
def add_left_subtree(self, left_subtree):
self.left_subtree = left_subtree
@pre(argument_types("examples.FullBinaryTree"))
def add_right_subtree(self, right_subtree):
self.right_subtree = right_subtree
@pre(argument_types("examples.FullBinaryTree", "examples.FullBinaryTree"))
@pre(state(is_leaf))
@post(state(is_full))
def add_subtrees(self, left, right):
self.left_subtree = left
self.right_subtree = right
@pre(state(is_full))
@pre(argument_types("examples.FullBinaryTree"))
def replace_left_subtree(self, left_subtree):
self.left_subtree = left_subtree
@pre(state(is_full))
@pre(argument_types("examples.FullBinaryTree"))
def replace_right_subtree(self, right_subtree):
self.right_subtree = right_subtree
def __str__(self):
return self._s("")
def __hash__(self):
return hash((self.value, self.left_subtree, self.right_subtree))
def __eq__(self, other):
partial = True
if self.left_subtree is not None and other.left_subtree is not None:
partial = partial and (self.left_subtree == other.left_subtree)
else:
partial = partial and self.left_subtree is None and other.left_subtree is None
if self.right_subtree is not None and other.right_subtree is not None:
partial = partial and (self.right_subtree == other.right_subtree)
else:
partial = partial and self.right_subtree is None and other.right_subtree is None
partial = partial and (self.value == other.value)
return partial
def _s(self, pad):
ret = ""
ret += "\n"+pad
ret += " value: " + self.value
ret += " # nodes: " + self.nodes()
if self.left_subtree:
ret += '\n' + pad
ret += " left subtree: " + self.left_subtree._s(pad + " ")
assert self.right_subtree
ret += '\n' + pad
ret += " right subtree: " + self.right_subtree._s(pad + " ")
ret += "\n"
return ret
if __name__ == "__main__":
bet(FullBinaryTree).run()
| 1,967 | 844 | 91 |
9330e26c51ef23651b439dc4c82f55ebd5f26cc1 | 1,807 | py | Python | utils/train_test_split.py | abhatta1234/face_analysis_pytorch | 2abe930c0ca02a1fd819d4710fd9bff392f32f58 | [
"MIT"
] | 27 | 2020-05-19T16:51:42.000Z | 2022-02-28T05:00:16.000Z | utils/train_test_split.py | abhatta1234/face_analysis_pytorch | 2abe930c0ca02a1fd819d4710fd9bff392f32f58 | [
"MIT"
] | 3 | 2020-04-09T04:46:24.000Z | 2020-10-21T18:57:05.000Z | utils/train_test_split.py | abhatta1234/face_analysis_pytorch | 2abe930c0ca02a1fd819d4710fd9bff392f32f58 | [
"MIT"
] | 10 | 2020-05-11T19:50:30.000Z | 2022-03-16T11:49:52.000Z | import argparse
import random
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create train and test splits.")
parser.add_argument("-i", "--image_list", help="List of images to split.")
parser.add_argument(
"-p", "--percent", help="Percent of data used to test.", default=0.1
)
args = parser.parse_args()
np.random.seed(0)
random.seed(0)
split(args.image_list, args.percent)
| 29.145161 | 81 | 0.65689 | import argparse
import random
import numpy as np
def get_classes_count(array):
array = np.sort(array.astype("int"))
classes_dict = {}
classes, classes_count = np.unique(array, return_counts=True)
for i in range(len(classes)):
classes_dict[classes[i]] = classes_count[i]
return classes_dict
def split(image_list, percent):
image_paths = np.loadtxt(image_list, dtype=np.str)
np.random.shuffle(image_paths)
indices = np.asarray(
random.sample(
list(np.linspace(0, len(image_paths) - 1, len(image_paths))),
int(percent * len(image_paths)),
)
).astype("int")
train_set = np.delete(image_paths, indices, axis=0)
test_set = image_paths[indices]
assert len(train_set) + len(test_set) == len(image_paths)
train_output = image_list[:-4] + "_train.txt"
val_output = image_list[:-4] + "_val.txt"
print(f"Races in train: {get_classes_count(train_set[:, 1])}")
print(f"Gender in train: {get_classes_count(train_set[:, 2])}")
print(f"Age in train: {get_classes_count(train_set[:, 3])}")
print(f"Races in test: {get_classes_count(test_set[:, 1])}")
print(f"Gender in test: {get_classes_count(test_set[:, 2])}")
print(f"Age in test: {get_classes_count(test_set[:, 3])}")
np.savetxt(train_output, train_set, fmt="%s")
np.savetxt(val_output, test_set, fmt="%s")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create train and test splits.")
parser.add_argument("-i", "--image_list", help="List of images to split.")
parser.add_argument(
"-p", "--percent", help="Percent of data used to test.", default=0.1
)
args = parser.parse_args()
np.random.seed(0)
random.seed(0)
split(args.image_list, args.percent)
| 1,295 | 0 | 46 |
c0047c3c04d44f7662b9fa3ff0cf4b5ad252eeba | 657 | py | Python | LeetCode/TopKfrequent.py | Jaidev810/Competitive-Questions | 5d5b28be69e8572e9b4353e9790ee39b56769fc3 | [
"MIT"
] | 1 | 2021-02-27T06:12:55.000Z | 2021-02-27T06:12:55.000Z | LeetCode/TopKfrequent.py | Jaidev810/Competitive-Questions | 5d5b28be69e8572e9b4353e9790ee39b56769fc3 | [
"MIT"
] | 1 | 2021-02-02T08:52:17.000Z | 2021-02-03T08:19:12.000Z | LeetCode/TopKfrequent.py | Jaidev810/Competitive-Questions | 5d5b28be69e8572e9b4353e9790ee39b56769fc3 | [
"MIT"
] | null | null | null | import heapq
arr = [3, 0, 1, 0]
k = 1
li = kFrequent(arr, k)
print(li) | 16.425 | 44 | 0.48554 | import heapq
def kFrequent(arr, k):
n = len(arr)
freq = list()
d = dict()
for i in arr:
d[i] = d.get(i, 0) + 1
for i in d:
freq.append(d[i])
heap = freq[:k]
heapq.heapify(heap)
for i in range(k, len(freq)):
if heap[0] < freq[i]:
heapq.heapreplace(heap, freq[i])
li = list()
key = list(d.keys())
values = list(d.values())
for i in range(len(heap)-1, -1, -1):
temp = heap[i]
li.append(key[values.index(temp)])
s = values.index(temp)
values[s] = 0
return li
arr = [3, 0, 1, 0]
k = 1
li = kFrequent(arr, k)
print(li) | 562 | 0 | 23 |
527c822161319075c66ba376a2c71f2a3789aa82 | 981 | py | Python | src/main.py | 2118-full-stack-python-codo-a-codo/clase-22 | 6b3db51406e85e343d00539638dcea9306236a51 | [
"MIT"
] | null | null | null | src/main.py | 2118-full-stack-python-codo-a-codo/clase-22 | 6b3db51406e85e343d00539638dcea9306236a51 | [
"MIT"
] | null | null | null | src/main.py | 2118-full-stack-python-codo-a-codo/clase-22 | 6b3db51406e85e343d00539638dcea9306236a51 | [
"MIT"
] | 13 | 2021-06-18T22:31:00.000Z | 2021-06-25T21:58:41.000Z | from math.utils import *
def main():
"""
main() -> None
"""
myVariable = complex()
print(myVariable)
sumatoria(3)
print(calVolumenParalelepipedo(2, 3, 10))
print(sumatoria(3))
print(sumatoriaLambda(3))
return None
sumatoriaLambda = lambda x: (x * (x + 1)) / 2
# print(resultado)
def count_substring(string, sub_string):
"""
Cuenta cuantas veces aparece el sub_string
en el string
Args:
string: (string)
sub_string: (string)
rerturn : int
"""
return string.count(sub_string)
if __name__ == "__main__":
main()
string = "Hola Codo a Codo" # input().strip()
sub_string = "codo" # input().strip()
count = count_substring(string, sub_string)
print(count)
str = "este es un string que tiene varias coincidencias de strings con el sub-str"
sub_str = "string"
print("La palabra [", sub_str, "] aparece ", count_substring(str, sub_str), " veces")
| 19.235294 | 89 | 0.61264 | from math.utils import *
def main():
"""
main() -> None
"""
myVariable = complex()
print(myVariable)
sumatoria(3)
print(calVolumenParalelepipedo(2, 3, 10))
print(sumatoria(3))
print(sumatoriaLambda(3))
return None
sumatoriaLambda = lambda x: (x * (x + 1)) / 2
# print(resultado)
def count_substring(string, sub_string):
"""
Cuenta cuantas veces aparece el sub_string
en el string
Args:
string: (string)
sub_string: (string)
rerturn : int
"""
return string.count(sub_string)
if __name__ == "__main__":
main()
string = "Hola Codo a Codo" # input().strip()
sub_string = "codo" # input().strip()
count = count_substring(string, sub_string)
print(count)
str = "este es un string que tiene varias coincidencias de strings con el sub-str"
sub_str = "string"
print("La palabra [", sub_str, "] aparece ", count_substring(str, sub_str), " veces")
| 0 | 0 | 0 |
f94bb7d82e48623ee2ee98f8725951e7e565525b | 207 | py | Python | numba/tests/test_unbound_variables.py | liuzhenhai/numba | 855a2b262ae3d82bd6ac1c3e1c0acb36ee2e2acf | [
"BSD-2-Clause"
] | 1 | 2015-01-29T06:52:36.000Z | 2015-01-29T06:52:36.000Z | numba/tests/test_unbound_variables.py | shiquanwang/numba | a41c85fdd7d6abf8ea1ebe9116939ddc2217193b | [
"BSD-2-Clause"
] | null | null | null | numba/tests/test_unbound_variables.py | shiquanwang/numba | a41c85fdd7d6abf8ea1ebe9116939ddc2217193b | [
"BSD-2-Clause"
] | null | null | null | from numba import *
a = 10
b = 11
c = 12
func = jitter()
assert func() == (20, 22)
| 10.35 | 25 | 0.487923 | from numba import *
a = 10
b = 11
c = 12
def jitter():
a = 20
b = 21
c = 22
@jit(object_())
def func():
return a, c
return func
func = jitter()
assert func() == (20, 22)
| 99 | 0 | 23 |
07b3fe050dd8aba94b1a7ac455bd9bc8824e2813 | 6,908 | py | Python | poezio/tabs/listtab.py | louiz/poezio | 8edef603fb12e1f79719f09dd0eed7c7f399c8fc | [
"Zlib"
] | 1 | 2021-10-10T18:20:02.000Z | 2021-10-10T18:20:02.000Z | poezio/tabs/listtab.py | louiz/poezio | 8edef603fb12e1f79719f09dd0eed7c7f399c8fc | [
"Zlib"
] | null | null | null | poezio/tabs/listtab.py | louiz/poezio | 8edef603fb12e1f79719f09dd0eed7c7f399c8fc | [
"Zlib"
] | null | null | null | """
A generic tab that displays a serie of items in a scrollable, searchable,
sortable list. It should be inherited, to actually provide methods that
insert items in the list, and that lets the user interact with them.
"""
import curses
import collections
import logging
from typing import Dict, Callable
from poezio import windows
from poezio.core.structs import Command
from poezio.decorators import refresh_wrapper
from poezio.tabs import Tab
log = logging.getLogger(__name__)
| 33.862745 | 79 | 0.613926 | """
A generic tab that displays a serie of items in a scrollable, searchable,
sortable list. It should be inherited, to actually provide methods that
insert items in the list, and that lets the user interact with them.
"""
import curses
import collections
import logging
from typing import Dict, Callable
from poezio import windows
from poezio.core.structs import Command
from poezio.decorators import refresh_wrapper
from poezio.tabs import Tab
log = logging.getLogger(__name__)
class ListTab(Tab):
plugin_commands = {} # type: Dict[str, Command]
plugin_keys = {} # type: Dict[str, Callable]
def __init__(self, core, name, help_message, header_text, cols):
"""Parameters:
name: The name of the tab
help_message: The default help message displayed instead of the
input
header_text: The text displayed on the header line, at the top of
the tab
cols: a tuple of 2-tuples. e.g. (('column1_name', number),
('column2_name', number))
"""
Tab.__init__(self, core)
self.state = 'normal'
self._error_message = ''
self.name = name
columns = collections.OrderedDict()
for col, num in cols:
columns[col] = num
self.list_header = windows.ColumnHeaderWin(list(columns))
self.listview = windows.ListWin(columns)
self.info_header = windows.MucListInfoWin(header_text)
self.default_help_message = windows.HelpText(help_message)
self.input = self.default_help_message
self.key_func["KEY_DOWN"] = self.move_cursor_down
self.key_func["KEY_UP"] = self.move_cursor_up
self.key_func['^I'] = self.completion
self.key_func["/"] = self.on_slash
self.key_func['KEY_LEFT'] = self.list_header.sel_column_left
self.key_func['KEY_RIGHT'] = self.list_header.sel_column_right
self.key_func[' '] = self.sort_by
self.register_command('close', self.close, shortdesc='Close this tab.')
self.resize()
self.update_keys()
self.update_commands()
def get_columns_sizes(self):
"""
Must be implemented in subclasses. Must return a dict like this:
{'column1_name': size1,
'column2_name': size2}
Where the size are calculated based on the size of the tab etc
"""
raise NotImplementedError
def refresh(self):
if self.need_resize:
self.resize()
log.debug(' TAB Refresh: %s', self.__class__.__name__)
if self.size.tab_degrade_y:
display_info_win = False
else:
display_info_win = True
self.info_header.refresh(window=self.listview)
if display_info_win:
self.info_win.refresh()
self.refresh_tab_win()
self.list_header.refresh()
self.listview.refresh()
self.input.refresh()
def resize(self):
if self.size.tab_degrade_y:
info_win_height = 0
tab_win_height = 0
else:
info_win_height = self.core.information_win_size
tab_win_height = Tab.tab_win_height()
self.info_header.resize(
1, self.width, self.height - 2 - info_win_height - tab_win_height,
0)
column_size = self.get_columns_sizes()
self.list_header.resize_columns(column_size)
self.list_header.resize(1, self.width, 0, 0)
self.listview.resize_columns(column_size)
self.listview.resize(
self.height - 3 - info_win_height - tab_win_height, self.width, 1,
0)
self.input.resize(1, self.width, self.height - 1, 0)
def on_slash(self):
"""
'/' is pressed, activate the input
"""
curses.curs_set(1)
self.input = windows.CommandInput("", self.reset_help_message,
self.execute_slash_command)
self.input.resize(1, self.width, self.height - 1, 0)
self.input.do_command("/") # we add the slash
def close(self, arg=None):
self.core.close_tab(self)
def set_error(self, msg, code, body):
"""
If there's an error (retrieving the values etc)
"""
self._error_message = 'Error: %(code)s - %(msg)s: %(body)s' % {
'msg': msg,
'body': body,
'code': code
}
self.info_header.message = self._error_message
self.info_header.refresh()
curses.doupdate()
def sort_by(self):
if self.list_header.get_order():
self.listview.sort_by_column(
col_name=self.list_header.get_sel_column(), asc=False)
self.list_header.set_order(False)
self.list_header.refresh()
else:
self.listview.sort_by_column(
col_name=self.list_header.get_sel_column(), asc=True)
self.list_header.set_order(True)
self.list_header.refresh()
self.core.doupdate()
@refresh_wrapper.always
def reset_help_message(self, _=None):
if self.closed:
return True
curses.curs_set(0)
self.input = self.default_help_message
self.input.resize(1, self.width, self.height - 1, 0)
return True
def execute_slash_command(self, txt):
if txt.startswith('/'):
self.input.key_enter()
self.execute_command(txt)
return self.reset_help_message()
def completion(self):
if isinstance(self.input, windows.Input):
self.complete_commands(self.input)
def on_input(self, key, raw):
res = self.input.do_command(key, raw=raw)
if res:
return not isinstance(self.input, windows.Input)
if not raw and key in self.key_func:
return self.key_func[key]()
def on_info_win_size_changed(self):
if self.core.information_win_size >= self.height - 3:
return
self.info_header.resize(
1, self.width, self.height - 2 - self.core.information_win_size -
Tab.tab_win_height(), 0)
self.listview.resize(
self.height - 3 - self.core.information_win_size -
Tab.tab_win_height(), self.width, 1, 0)
def on_lose_focus(self):
self.state = 'normal'
def on_gain_focus(self):
self.state = 'current'
curses.curs_set(0)
def on_scroll_up(self):
return self.listview.scroll_up()
def on_scroll_down(self):
return self.listview.scroll_down()
def move_cursor_up(self):
self.listview.move_cursor_up()
self.listview.refresh()
self.core.doupdate()
def move_cursor_down(self):
self.listview.move_cursor_down()
self.listview.refresh()
self.core.doupdate()
def matching_names(self):
return [(2, self.name)]
| 3,257 | 3,143 | 23 |
ebba62b7baa76e44699e25199dc269d3a03448b5 | 363 | py | Python | board/urls.py | ChoiEunji0114/mju_festival | a28a6600b51925288a861babefe3f2b1f01f68c6 | [
"MIT"
] | 7 | 2019-05-09T15:18:17.000Z | 2019-05-16T15:46:25.000Z | board/urls.py | ChoiEunji0114/mju_festival | a28a6600b51925288a861babefe3f2b1f01f68c6 | [
"MIT"
] | 7 | 2020-06-05T20:48:43.000Z | 2022-02-10T07:15:09.000Z | board/urls.py | ChoiEunji0114/mju_festival | a28a6600b51925288a861babefe3f2b1f01f68c6 | [
"MIT"
] | 3 | 2019-05-12T07:30:48.000Z | 2019-05-15T12:33:05.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='board_home'),
path('new/', views.new, name='board_new'),
path('detail/<int:board_id>', views.detail, name='board_detail'),
path('delete/<int:board_id>', views.delete, name='board_delete'),
path('edit/<int:board_id>', views.edit, name='board_edit'),
] | 36.3 | 69 | 0.669421 | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='board_home'),
path('new/', views.new, name='board_new'),
path('detail/<int:board_id>', views.detail, name='board_detail'),
path('delete/<int:board_id>', views.delete, name='board_delete'),
path('edit/<int:board_id>', views.edit, name='board_edit'),
] | 0 | 0 | 0 |
042f25bed6eb41f3b3c624060da54487a5e11a4f | 1,010 | py | Python | day09_2.py | filippocorradino/advent_of_code_2020 | be46aabf8c58e4e96f541f3555646e1a4875861a | [
"MIT"
] | 1 | 2020-12-01T09:28:43.000Z | 2020-12-01T09:28:43.000Z | day09_2.py | filippocorradino/advent_of_code_2020 | be46aabf8c58e4e96f541f3555646e1a4875861a | [
"MIT"
] | null | null | null | day09_2.py | filippocorradino/advent_of_code_2020 | be46aabf8c58e4e96f541f3555646e1a4875861a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
Advent of Code 2020 - Day 9 - Challenge 2
https://adventofcode.com/2020/day/9
Solution: 35602097
"""
__author__ = "Filippo Corradino"
__email__ = "filippo.corradino@gmail.com"
from day09_1 import find_invalid
if __name__ == "__main__":
main()
| 25.25 | 68 | 0.60297 | #!/usr/bin/env python
# encoding: utf-8
"""
Advent of Code 2020 - Day 9 - Challenge 2
https://adventofcode.com/2020/day/9
Solution: 35602097
"""
__author__ = "Filippo Corradino"
__email__ = "filippo.corradino@gmail.com"
from day09_1 import find_invalid
def main(ifile='inputs/day_09_input.txt', preamble=25):
with open(ifile) as file:
xmas = [int(line) for line in file]
value = find_invalid(xmas, preamble)
inf = 0
while inf < len(xmas)-1:
sup = inf + 2
currentsum = sum(xmas[inf:sup])
while currentsum < value and sup < len(xmas):
sup += 1
currentsum += xmas[sup-1]
if currentsum == value:
min_value = min(xmas[inf:sup])
max_value = max(xmas[inf:sup])
result = min_value + max_value
print(f"\nSum of minimum and maximum value: {result}\n")
return result
inf += 1
raise RuntimeError("Did not find a valid sum")
if __name__ == "__main__":
main()
| 689 | 0 | 23 |
19f9dec0a5382290dfe8ea07a917e0029022e9b7 | 2,249 | py | Python | jupyterlab/PreviewSearchPage.py | Larz60p/MakerProjectApril2019 | 2fd4d68aa66c1f4ad3b01f6a9589a078319280d7 | [
"MIT"
] | 1 | 2019-04-25T22:53:52.000Z | 2019-04-25T22:53:52.000Z | jupyterlab/PreviewSearchPage.py | Larz60p/MakerProjectApril2019 | 2fd4d68aa66c1f4ad3b01f6a9589a078319280d7 | [
"MIT"
] | null | null | null | jupyterlab/PreviewSearchPage.py | Larz60p/MakerProjectApril2019 | 2fd4d68aa66c1f4ad3b01f6a9589a078319280d7 | [
"MIT"
] | null | null | null | # PreviesSearchPage.py
from selenium import webdriver
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import BusinessPaths
import time
import PrettifyPage
import CreateDict
import json
import sys
if __name__ == '__main__':
PreviewSearchPage()
| 34.075758 | 121 | 0.649622 | # PreviesSearchPage.py
from selenium import webdriver
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import BusinessPaths
import time
import PrettifyPage
import CreateDict
import json
import sys
class PreviewSearchPage:
def __init__(self):
self.bpath = BusinessPaths.BusinessPaths()
self.pp = PrettifyPage.PrettifyPage()
self.cd = CreateDict.CreateDict()
self.analyze_page()
def start_browser(self):
caps = webdriver.DesiredCapabilities().FIREFOX
caps["marionette"] = True
self.browser = webdriver.Firefox(capabilities=caps)
def stop_browser(self):
self.browser.close()
def save_page(self, filename):
soup = BeautifulSoup(self.browser.page_source, "lxml")
with filename.open('w') as fp:
fp.write(self.pp.prettify(soup, 2))
def analyze_page(self):
self.start_browser()
self.get_search_page('Andover')
self.stop_browser()
def get_search_page(self, searchitem):
# pick city with multiple pages
url = self.bpath.base_url
self.browser.get(url)
time.sleep(2)
print(f'Main Page URL: {self.browser.current_url}')
self.browser.find_element(By.XPATH, '/html/body/div[2]/div[4]/div/form/div/div/span[1]/select/option[3]').click()
searchbox = self.browser.find_element(By.XPATH, '//*[@id="query"]')
searchbox.clear()
searchbox.send_keys(searchitem)
self.browser.find_element(By.XPATH, '/html/body/div[2]/div[4]/div/form/div/div/span[3]/button').click()
time.sleep(2)
print(f'Results Page 1 URL: {self.browser.current_url}')
# get page 2
# find next page button and click
self.browser.find_element(By.XPATH, '/html/body/div[2]/div/div[2]/div[3]/div[2]/div/span[1]/a/icon').click()
time.sleep(2)
print(f'Results Page 2 URL: {self.browser.current_url}')
# Get url of a detail page
self.browser.find_element(By.XPATH, '/html/body/div[2]/div/div[2]/table/tbody/tr[1]/td[1]/a').click()
time.sleep(2)
print(f'Detail Page URL: {self.browser.current_url}')
if __name__ == '__main__':
PreviewSearchPage()
| 1,777 | 3 | 192 |
ef3ca4c47347792c652cb4a02c6e84dcd3261f80 | 2,454 | py | Python | app/room/lobby/lobby_events_models.py | hmajid2301/banter-bus-core-api | 79a304965b58f0cb131e0770ffc3bd734ec4dc60 | [
"Apache-2.0"
] | null | null | null | app/room/lobby/lobby_events_models.py | hmajid2301/banter-bus-core-api | 79a304965b58f0cb131e0770ffc3bd734ec4dc60 | [
"Apache-2.0"
] | null | null | null | app/room/lobby/lobby_events_models.py | hmajid2301/banter-bus-core-api | 79a304965b58f0cb131e0770ffc3bd734ec4dc60 | [
"Apache-2.0"
] | null | null | null | from typing import List, Union
from pydantic import BaseModel, validator
from app.event_models import EventModel
JOIN_ROOM = "JOIN_ROOM"
REJOIN_ROOM = "REJOIN_ROOM"
ROOM_JOINED = "ROOM_JOINED"
NEW_ROOM_JOINED = "NEW_ROOM_JOINED"
KICK_PLAYER = "KICK_PLAYER"
PLAYER_KICKED = "PLAYER_KICKED"
PLAYER_DISCONNECTED = "PLAYER_DISCONNECTED"
HOST_DISCONNECTED = "HOST_DISCONNECTED"
START_GAME = "START_GAME"
GAME_STARTED = "GAME_STARTED"
| 19.171875 | 43 | 0.685412 | from typing import List, Union
from pydantic import BaseModel, validator
from app.event_models import EventModel
JOIN_ROOM = "JOIN_ROOM"
REJOIN_ROOM = "REJOIN_ROOM"
ROOM_JOINED = "ROOM_JOINED"
NEW_ROOM_JOINED = "NEW_ROOM_JOINED"
KICK_PLAYER = "KICK_PLAYER"
PLAYER_KICKED = "PLAYER_KICKED"
PLAYER_DISCONNECTED = "PLAYER_DISCONNECTED"
HOST_DISCONNECTED = "HOST_DISCONNECTED"
START_GAME = "START_GAME"
GAME_STARTED = "GAME_STARTED"
class JoinRoom(EventModel):
nickname: str
avatar: Union[str, bytes]
room_code: str
@validator("avatar", pre=True)
def base64_string_to_bytes(cls, value):
if isinstance(value, str):
return value.encode()
return value
@property
def event_name(self):
return JOIN_ROOM
class RejoinRoom(EventModel):
player_id: str
@property
def event_name(self):
return REJOIN_ROOM
class Player(BaseModel):
nickname: str
avatar: Union[str, bytes]
@validator("avatar", pre=True)
def base64_bytes_to_string(cls, value):
if isinstance(value, bytes):
return value.decode()
return value
class RoomJoined(EventModel):
host_player_nickname: str
players: List[Player]
@property
def event_name(self):
return ROOM_JOINED
class NewRoomJoined(EventModel):
player_id: str
@property
def event_name(self):
return NEW_ROOM_JOINED
class KickPlayer(EventModel):
kick_player_nickname: str
player_id: str
room_code: str
@property
def event_name(self):
return KICK_PLAYER
class PlayerKicked(EventModel):
nickname: str
@property
def event_name(self):
return PLAYER_KICKED
class PlayerDisconnected(EventModel):
nickname: str
avatar: Union[str, bytes]
@validator("avatar", pre=True)
def base64_bytes_to_string(cls, value):
if isinstance(value, bytes):
return value.decode()
return value
@property
def event_name(self):
return PLAYER_DISCONNECTED
class HostDisconnected(EventModel):
new_host_nickname: str
@property
def event_name(self):
return HOST_DISCONNECTED
class StartGame(EventModel):
player_id: str
game_name: str
room_code: str
@property
def event_name(self):
return START_GAME
class GameStarted(EventModel):
game_name: str
@property
def event_name(self):
return GAME_STARTED
| 616 | 1,142 | 253 |
af00604e8cec5ece69bda0fcc4b6b77604e5f984 | 2,070 | py | Python | Session09_AWSSagemakerAndLargeScaleModelTraining/utils_cifar.py | garima-mahato/TSAI_EMLO1.0 | f1478572a20988296831e70d6cf1dac9b36e7573 | [
"Apache-2.0"
] | null | null | null | Session09_AWSSagemakerAndLargeScaleModelTraining/utils_cifar.py | garima-mahato/TSAI_EMLO1.0 | f1478572a20988296831e70d6cf1dac9b36e7573 | [
"Apache-2.0"
] | null | null | null | Session09_AWSSagemakerAndLargeScaleModelTraining/utils_cifar.py | garima-mahato/TSAI_EMLO1.0 | f1478572a20988296831e70d6cf1dac9b36e7573 | [
"Apache-2.0"
] | null | null | null | import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
classes = ('beaver','dolphin','otter','seal','whale','aquarium fish','flatfish','ray','shark','trout','orchids','poppies','roses','sunflowers','tulips','bottles','bowls','cans','cups','plates','apples','mushrooms','oranges','pears','sweet peppers','clock','computer keyboard','lamp','telephone','television','bed','chair','couch','table','wardrobe','bee','beetle','butterfly','caterpillar','cockroach','bear','leopard','lion','tiger','wolf','bridge','castle','house','road','skyscraper','cloud','forest','mountain','plain','sea','camel','cattle','chimpanzee','elephant','kangaroo','fox','porcupine','possum','raccoon','skunk','crab','lobster','snail','spider','worm','baby','boy','girl','man','woman','crocodile','dinosaur','lizard','snake','turtle','hamster','mouse','rabbit','shrew','squirrel','maple','oak','palm','pine','willow','bicycle','bus','motorcycle','pickup truck','train','lawn-mower','rocket','streetcar','tank','tractor')
# function to show an image | 57.5 | 936 | 0.637681 | import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
classes = ('beaver','dolphin','otter','seal','whale','aquarium fish','flatfish','ray','shark','trout','orchids','poppies','roses','sunflowers','tulips','bottles','bowls','cans','cups','plates','apples','mushrooms','oranges','pears','sweet peppers','clock','computer keyboard','lamp','telephone','television','bed','chair','couch','table','wardrobe','bee','beetle','butterfly','caterpillar','cockroach','bear','leopard','lion','tiger','wolf','bridge','castle','house','road','skyscraper','cloud','forest','mountain','plain','sea','camel','cattle','chimpanzee','elephant','kangaroo','fox','porcupine','possum','raccoon','skunk','crab','lobster','snail','spider','worm','baby','boy','girl','man','woman','crocodile','dinosaur','lizard','snake','turtle','hamster','mouse','rabbit','shrew','squirrel','maple','oak','palm','pine','willow','bicycle','bus','motorcycle','pickup truck','train','lawn-mower','rocket','streetcar','tank','tractor')
def _get_transform():
return transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def get_train_data_loader():
transform = _get_transform()
trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
download=True, transform=transform)
return torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
def get_test_data_loader():
transform = _get_transform()
testset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=True, transform=transform)
return torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
# function to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0))) | 870 | 0 | 95 |
557db076fc25c985a349f5da185ef3f69dff348f | 435 | py | Python | suspicious/migrations/0003_suspicious_mac.py | NumanIbnMazid/magfetch | fb6297fd3b2277bf48289ff95d1ed4f071b9aded | [
"MIT"
] | 1 | 2020-03-13T07:09:34.000Z | 2020-03-13T07:09:34.000Z | suspicious/migrations/0003_suspicious_mac.py | NumanIbnMazid/magfetch | fb6297fd3b2277bf48289ff95d1ed4f071b9aded | [
"MIT"
] | 8 | 2020-02-11T23:52:58.000Z | 2022-03-11T23:42:09.000Z | suspicious/migrations/0003_suspicious_mac.py | NumanIbnMazid/magfetch | fb6297fd3b2277bf48289ff95d1ed4f071b9aded | [
"MIT"
] | 1 | 2020-03-13T07:09:35.000Z | 2020-03-13T07:09:35.000Z | # Generated by Django 2.1.7 on 2019-04-26 09:16
from django.db import migrations, models
| 22.894737 | 102 | 0.616092 | # Generated by Django 2.1.7 on 2019-04-26 09:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('suspicious', '0002_suspicious_ip'),
]
operations = [
migrations.AddField(
model_name='suspicious',
name='mac',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='mac address'),
),
]
| 0 | 321 | 23 |
4d1420ceee345823dfa047e8767832ea59517104 | 121 | py | Python | keras_efficientnets/__init__.py | devinwang/keras-efficientnets | 692776594b798ec2a174c55f45e06dfe362d9cb1 | [
"MIT"
] | 1 | 2019-08-09T21:06:38.000Z | 2019-08-09T21:06:38.000Z | keras_efficientnets/__init__.py | devinwang/keras-efficientnets | 692776594b798ec2a174c55f45e06dfe362d9cb1 | [
"MIT"
] | null | null | null | keras_efficientnets/__init__.py | devinwang/keras-efficientnets | 692776594b798ec2a174c55f45e06dfe362d9cb1 | [
"MIT"
] | null | null | null | from keras_efficientnets.efficientnet import *
from keras_efficientnets.config import BlockArgs
__version__ = '0.1.6.1'
| 24.2 | 48 | 0.826446 | from keras_efficientnets.efficientnet import *
from keras_efficientnets.config import BlockArgs
__version__ = '0.1.6.1'
| 0 | 0 | 0 |
0984ccd85e47d788cdbe4d321f70bfd7214ca50f | 663 | py | Python | pikapi/spiders/__init__.py | yeshl/pikapi | 3aff872c02c432991376bfd04284b97a185c91ab | [
"Apache-2.0"
] | null | null | null | pikapi/spiders/__init__.py | yeshl/pikapi | 3aff872c02c432991376bfd04284b97a185c91ab | [
"Apache-2.0"
] | null | null | null | pikapi/spiders/__init__.py | yeshl/pikapi | 3aff872c02c432991376bfd04284b97a185c91ab | [
"Apache-2.0"
] | null | null | null | from pikapi.spiders.spider_by_browser import *
from pikapi.spiders.spider_by_cookie import *
from pikapi.spiders.spider_by_req import *
all_providers = [
SpiderXiladaili,
SpiderYqie,
SpiderZdaye,
SpiderSuperfastip,
SpiderXsdaili,
SpiderCrossincode,
SpiderTxt,
SpiderKxdaili,
SpiderJiangxianli,
SpiderProxylistplus,
SpiderProxyListen,
SpiderIp3366,
Spider31f,
SpiderFeilong,
SpiderIphai,
Spider89ip,
SpiderCnProxy,
SpiderData5u,
SpiderMrhinkydink,
SpiderKuaidaili,
SpiderIpaddress,
SpiderXici,
Spider66ipcn,
Spider66ip,
SpiderGoubanjia,
SpiderCoolProxy,
]
| 18.416667 | 46 | 0.71644 | from pikapi.spiders.spider_by_browser import *
from pikapi.spiders.spider_by_cookie import *
from pikapi.spiders.spider_by_req import *
all_providers = [
SpiderXiladaili,
SpiderYqie,
SpiderZdaye,
SpiderSuperfastip,
SpiderXsdaili,
SpiderCrossincode,
SpiderTxt,
SpiderKxdaili,
SpiderJiangxianli,
SpiderProxylistplus,
SpiderProxyListen,
SpiderIp3366,
Spider31f,
SpiderFeilong,
SpiderIphai,
Spider89ip,
SpiderCnProxy,
SpiderData5u,
SpiderMrhinkydink,
SpiderKuaidaili,
SpiderIpaddress,
SpiderXici,
Spider66ipcn,
Spider66ip,
SpiderGoubanjia,
SpiderCoolProxy,
]
| 0 | 0 | 0 |
84439ad92b590765aa87bac53211938c3317f694 | 2,273 | py | Python | vis_row_depth.py | tim885/DeepDepthRefiner | a59f376b5b0ff01b0d166ec8d946a20c81a6b190 | [
"MIT"
] | 4 | 2020-05-25T02:53:49.000Z | 2022-03-03T03:11:38.000Z | data/vis_row_depth.py | YoungXIAO13/DeepDepthRefiner | 055380e99f94206b5a098debca6c93aa274f9d29 | [
"MIT"
] | null | null | null | data/vis_row_depth.py | YoungXIAO13/DeepDepthRefiner | 055380e99f94206b5a098debca6c93aa274f9d29 | [
"MIT"
] | 2 | 2020-03-31T18:07:41.000Z | 2021-06-26T23:57:03.000Z | import argparse
import os
from scipy.io import loadmat
import numpy as np
import cv2
import matplotlib
matplotlib.use('agg') # use matplotlib without GUI support
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/home/xuchong/Projects/occ_edge_order/data/dataset_real/NYUv2/data/val_occ_order_raycasting_woNormal_avgROI_1mm')
parser.add_argument('--gt_depth', type=str, default='/space_sdd/NYU/nyuv2_depth.npy')
parser.add_argument('--refine_dir', type=str,
default='/space_sdd/NYU/depth_refine/depth1_grad1_occ0.1_change1_1e-5/eigen/depth_npy')
opt = parser.parse_args()
# load rgb list
img_list = sorted([name for name in os.listdir(opt.data_dir) if name.endswith("-rgb.png")])
# load gt depth
gt_depths = np.load(opt.gt_depth)
# load initial depth map list
init_depths = read_eigen()
# load refined depth map list
refine_list = sorted(os.listdir(opt.refine_dir))
eigen_crop = [21, 461, 25, 617]
index = 120
row = 300
img = cv2.imread(os.path.join(opt.data_dir, img_list[index]), -1)
print('img shape is {}'.format(img.shape))
gt_depth = gt_depths[index][21:461, 25:617]
print('gt depth shape is {}'.format(gt_depth.shape))
init_depth = init_depths[index][21:461, 25:617]
print('init depth shape is {}'.format(init_depth.shape))
refine_depth = np.load(os.path.join(opt.refine_dir, refine_list[index]))[21:461, 25:617]
print('refine depth shape is {}'.format(refine_depth.shape))
# draw the figure
fig, (ax1, ax2) = plt.subplots(nrows=2)
img[row - 3: row + 3, :, :] = (img[row - 3: row + 3, :, :] + 255) / 2
ax1.imshow(img)
t = np.arange(592)
ax2.plot(t, gt_depth[row, t], 'r-', t, init_depth[row, t], 'b-', t, refine_depth[row, t], 'g-')
asp = np.diff(ax2.get_xlim())[0] / np.diff(ax2.get_ylim())[0]
asp /= np.abs(np.diff(ax1.get_xlim())[0] / np.diff(ax1.get_ylim())[0])
ax2.set_aspect(asp)
fig.savefig('vis_row_depth.eps')
plt.close(fig)
| 30.716216 | 166 | 0.703036 | import argparse
import os
from scipy.io import loadmat
import numpy as np
import cv2
import matplotlib
matplotlib.use('agg') # use matplotlib without GUI support
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/home/xuchong/Projects/occ_edge_order/data/dataset_real/NYUv2/data/val_occ_order_raycasting_woNormal_avgROI_1mm')
parser.add_argument('--gt_depth', type=str, default='/space_sdd/NYU/nyuv2_depth.npy')
parser.add_argument('--refine_dir', type=str,
default='/space_sdd/NYU/depth_refine/depth1_grad1_occ0.1_change1_1e-5/eigen/depth_npy')
opt = parser.parse_args()
# load rgb list
img_list = sorted([name for name in os.listdir(opt.data_dir) if name.endswith("-rgb.png")])
# load gt depth
gt_depths = np.load(opt.gt_depth)
# load initial depth map list
def read_eigen():
ours = loadmat('/space_sdd/NYU/depth_predictions/eigen_nyud_depth_predictions.mat')
ours = ours['fine_predictions']
ours = ours.transpose((2, 0, 1))
out = []
for line in ours:
line = cv2.resize(line, (640, 480))
out.append(line)
out = np.array(out)
return out
init_depths = read_eigen()
# load refined depth map list
refine_list = sorted(os.listdir(opt.refine_dir))
eigen_crop = [21, 461, 25, 617]
index = 120
row = 300
img = cv2.imread(os.path.join(opt.data_dir, img_list[index]), -1)
print('img shape is {}'.format(img.shape))
gt_depth = gt_depths[index][21:461, 25:617]
print('gt depth shape is {}'.format(gt_depth.shape))
init_depth = init_depths[index][21:461, 25:617]
print('init depth shape is {}'.format(init_depth.shape))
refine_depth = np.load(os.path.join(opt.refine_dir, refine_list[index]))[21:461, 25:617]
print('refine depth shape is {}'.format(refine_depth.shape))
# draw the figure
fig, (ax1, ax2) = plt.subplots(nrows=2)
img[row - 3: row + 3, :, :] = (img[row - 3: row + 3, :, :] + 255) / 2
ax1.imshow(img)
t = np.arange(592)
ax2.plot(t, gt_depth[row, t], 'r-', t, init_depth[row, t], 'b-', t, refine_depth[row, t], 'g-')
asp = np.diff(ax2.get_xlim())[0] / np.diff(ax2.get_ylim())[0]
asp /= np.abs(np.diff(ax1.get_xlim())[0] / np.diff(ax1.get_ylim())[0])
ax2.set_aspect(asp)
fig.savefig('vis_row_depth.eps')
plt.close(fig)
| 300 | 0 | 22 |
89bc650b351296268570ec4daf49e811a3b26cc7 | 4,301 | py | Python | datasets/mvn_pretrain.py | ZhengPeng7/PSxMVN | a8787946bdb2c97643041d24f7be53ae128f8494 | [
"MIT"
] | 5 | 2021-12-07T03:11:02.000Z | 2022-01-22T15:52:19.000Z | datasets/mvn_pretrain.py | ZhengPeng7/PSxMVN | a8787946bdb2c97643041d24f7be53ae128f8494 | [
"MIT"
] | 2 | 2021-12-18T07:24:25.000Z | 2022-03-31T08:43:48.000Z | datasets/mvn_pretrain.py | ZhengPeng7/GLCNet | 5ec7d4eb0ddece3f789df0b25f414bc4b5ac1d9e | [
"MIT"
] | null | null | null | import os
import os.path as osp
import re
import numpy as np
from numpy import array, int32
from scipy.io import loadmat
from .base import BaseDataset
| 37.077586 | 105 | 0.516624 | import os
import os.path as osp
import re
import numpy as np
from numpy import array, int32
from scipy.io import loadmat
from .base import BaseDataset
class MVN_pretrain(BaseDataset):
def __init__(self, root, transforms, split):
self.name = "MVN_pretrain"
self.img_prefix = osp.join(root, "frames")
self.anno_preloaded = 'data/anno_loaded/{}.txt'.format(split)
super(MVN_pretrain, self).__init__(root, transforms, split)
def _load_queries(self):
if os.path.exists(self.anno_preloaded):
print('Loading preloaded one...')
with open(self.anno_preloaded, 'r') as fin:
queries = eval(fin.read())
return queries
query_info = osp.join(self.root, "query_info.txt")
with open(query_info, "rb") as f:
raw = f.readlines()
queries = []
for line in raw:
linelist = str(line, "utf-8").split(" ")
pid = int(linelist[0])
x, y, w, h = (
float(linelist[1]),
float(linelist[2]),
float(linelist[3]),
float(linelist[4]),
)
roi = np.array([x, y, x + w, y + h]).astype(np.int32)
roi = np.clip(roi, 0, None) # several coordinates are negative
img_name = linelist[5].strip() + ".jpg"
queries.append(
{
"img_name": img_name,
"img_path": osp.join(self.img_prefix.replace('frames', 'query_portraits'), img_name),
"boxes": roi[np.newaxis, :],
"pids": np.array([pid]),
}
)
print('-----------\n-----------\n{} pid:'.format(self.split))
with open('data/anno_loaded/{}.txt'.format(self.split), 'w') as fout:
fout.write(str(queries))
return queries
def _load_split_img_names(self):
"""
Load the image names for the specific split.
"""
assert self.split in ("train", "gallery")
if self.split == "train":
imgs = loadmat(osp.join(self.root, "frame_train.mat"))["img_index_train"]
else:
imgs = loadmat(osp.join(self.root, "frame_test.mat"))["img_index_test"]
return [img[0][0] + ".jpg" for img in imgs]
def _load_annotations(self):
if self.split == "query":
return self._load_queries()
if os.path.exists(self.anno_preloaded):
print('Loading preloaded one...')
with open(self.anno_preloaded, 'r') as fin:
annotations = eval(fin.read())
pids = []
for i in annotations:
pids.append(i['pids'][0])
np.savetxt('data/anno_loaded/pids_{}.txt'.format(self.split), pids)
return annotations
annotations = []
imgs = self._load_split_img_names()
print('Loading annotations >>>')
for idx_img, img_name in enumerate(imgs):
if idx_img % 1000 == 0:
print('{}/{},\t'.format(idx_img, len(imgs)), end='')
anno_path = osp.join(self.root, "annotations", img_name.replace('.jpg', ''))
anno = loadmat(anno_path)
box_key = "box_new"
if box_key not in anno.keys():
box_key = "anno_file"
if box_key not in anno.keys():
box_key = "anno_previous"
rois = anno[box_key][:, 1:]
ids = anno[box_key][:, 0]
rois = np.clip(rois, 0, None) # several coordinates are negative
assert len(rois) == len(ids)
rois[:, 2:] += rois[:, :2]
ids[ids == -2] = 5555 # assign pid = 5555 for unlabeled people
annotations.append(
{
"img_name": img_name,
"img_path": osp.join(self.img_prefix, img_name),
"boxes": rois.astype(np.int32),
# FIXME: (training pids) 1, 2,..., 478, 480, 481, 482, 483, 932, 5555
"pids": ids.astype(np.int32),
}
)
with open('data/anno_loaded/{}.txt'.format(self.split), 'w') as fout:
fout.write(str(annotations))
print('\n<<< End')
return annotations
| 3,598 | 526 | 23 |
0b0ed57b1d31e159aa68e36722ffa323f273b4a6 | 4,375 | py | Python | pyge/gameObjects/primitives.py | Jonathan-Andrews/pyGE-Python-Game-Engine | 747e38b9a9f752bfee89bef54417be6723329e90 | [
"MIT"
] | 1 | 2020-03-02T18:34:18.000Z | 2020-03-02T18:34:18.000Z | pyge/gameObjects/primitives.py | Jonathan-Andrews/pyGE-Python-Game-Engine | 747e38b9a9f752bfee89bef54417be6723329e90 | [
"MIT"
] | null | null | null | pyge/gameObjects/primitives.py | Jonathan-Andrews/pyGE-Python-Game-Engine | 747e38b9a9f752bfee89bef54417be6723329e90 | [
"MIT"
] | 1 | 2020-05-10T14:03:58.000Z | 2020-05-10T14:03:58.000Z | """
A list of functions that return gameObject classes of primitive shapes.
"""
from math import cos, sin, pi
from .object2d import Object2D
# ----------------------------------------------------------------------------------------
def draw_square(x:float, y:float, height:float = 1, width:float = 1, fill:bool = False):
"""
Returns a Object2D class that draws a square
Arguments:
x : float : The x starting point of the square I.E the bottom left corner.
y : float : The y starting point of the square I.E the bottom left corner.
height : float : The height of the square.
width : float : The width of the square.
fill : bool : Should the shape be filled.
"""
# Calculate the other x and y cords.
cords = [[x, y]]
cords.append([x+width, y])
cords.append([x+width, y+height])
cords.append([x, y+height])
if fill:
return Object2D(cords, [[0,1,2],[0,3,2]], draw_type='triangles')
else:
return Object2D(cords, [[0,1],[1,2],[2,3],[3,0]], draw_type='lines')
# ----------------------------------------------------------------------------------------
def draw_triangle(cords:list, fill=False):
"""
Returns a Object2D class that draws a triangle
Arguments:
cords : float : The x and y cords for each vertex of the triangle, should look like [[x1,y1]...]
fill : bool : Should the shape be filled.
"""
if len(cords) > 3:
raise TypeError("At primitives.draw_triangle(): The length of the given cords is greater than 3, a triangle should only have 3 cords.")
if fill:
return Object2D(cords, [[0,1,2]], draw_type='triangles')
else:
return Object2D(cords, [[0,1],[1,2],[2,0]], draw_type='lines')
# ----------------------------------------------------------------------------------------
def draw_circle(center_x:float, center_y:float, radius:float = 0.3, segments:int = 360, fill:bool=False):
"""
Returns an Object2D class that draws a circle
Arguments:
center_x : float : The x cord for the center of the circle.
center_y : float : The y cord for the center of the circle.
radius : float : The radius of the circle.
segments : int : How many segments to make the circle from.
fill : bool : Should the shape be filled.
"""
edges = []
cords = []
for i in range(segments):
theta = (2 * pi * i)/segments # Get the current angle
x = radius * cos(theta) + center_x # Get the x cord
y = radius * sin(theta) + center_y # Get the y cord
cords.append([x, y])
if fill:
cords.insert(0, [center_x, center_y])
for i in range(len(cords)-2):
edges.append([0, i+1, i+2])
edges.append([0, segments, 1]) # Fixes a little glitch
return Object2D(cords, edges, draw_type='triangles')
else:
for i in range(len(cords)-1):
edges.append([i, i+1])
edges.append([segments-1,0]) # Fixes a little glitch
return Object2D(cords, edges, draw_type='lines')
# ----------------------------------------------------------------------------------------
def draw_arc(center_x:float, center_y:float, radius:float = 0.3, arc_angle:float = 90, start_angle:float = 0, segments:int = 360, fill:bool=False):
"""
Returns an Object2D class that draws a circle, angles should not be given in radians.
Arguments:
center_x : float : The x cord for the center of the circle.
center_y : float : The y cord for the center of the circle.
radius : float : The radius of the circle.
arc_angle : float : The angle of the arc.
start_angle : float : The angle from where the arc should start from.
segments : int : How many segments to make the circle from.
fill : bool : Should the shape be filled.
"""
edges = []
cords = []
for i in range(segments):
theta = ((arc_angle * pi * i/180) / segments) + (start_angle*90/180) # Get the current angle
x = radius * cos(theta) + center_x # Get the x cord
y = radius * sin(theta) + center_y # Get the y cord
cords.append([x, y])
if fill:
cords.insert(0, [center_x, center_y-(center_y-cords[0][1])])
for i in range(len(cords)-2):
edges.append([0, i+1, i+2])
return Object2D(cords, edges, draw_type='triangles')
else:
for i in range(len(cords)-1):
edges.append([i, i+1])
return Object2D(cords, edges, draw_type='lines')
| 33.914729 | 148 | 0.585829 | """
A list of functions that return gameObject classes of primitive shapes.
"""
from math import cos, sin, pi
from .object2d import Object2D
# ----------------------------------------------------------------------------------------
def draw_square(x:float, y:float, height:float = 1, width:float = 1, fill:bool = False):
"""
Returns a Object2D class that draws a square
Arguments:
x : float : The x starting point of the square I.E the bottom left corner.
y : float : The y starting point of the square I.E the bottom left corner.
height : float : The height of the square.
width : float : The width of the square.
fill : bool : Should the shape be filled.
"""
# Calculate the other x and y cords.
cords = [[x, y]]
cords.append([x+width, y])
cords.append([x+width, y+height])
cords.append([x, y+height])
if fill:
return Object2D(cords, [[0,1,2],[0,3,2]], draw_type='triangles')
else:
return Object2D(cords, [[0,1],[1,2],[2,3],[3,0]], draw_type='lines')
# ----------------------------------------------------------------------------------------
def draw_triangle(cords:list, fill=False):
"""
Returns a Object2D class that draws a triangle
Arguments:
cords : float : The x and y cords for each vertex of the triangle, should look like [[x1,y1]...]
fill : bool : Should the shape be filled.
"""
if len(cords) > 3:
raise TypeError("At primitives.draw_triangle(): The length of the given cords is greater than 3, a triangle should only have 3 cords.")
if fill:
return Object2D(cords, [[0,1,2]], draw_type='triangles')
else:
return Object2D(cords, [[0,1],[1,2],[2,0]], draw_type='lines')
# ----------------------------------------------------------------------------------------
def draw_circle(center_x:float, center_y:float, radius:float = 0.3, segments:int = 360, fill:bool=False):
"""
Returns an Object2D class that draws a circle
Arguments:
center_x : float : The x cord for the center of the circle.
center_y : float : The y cord for the center of the circle.
radius : float : The radius of the circle.
segments : int : How many segments to make the circle from.
fill : bool : Should the shape be filled.
"""
edges = []
cords = []
for i in range(segments):
theta = (2 * pi * i)/segments # Get the current angle
x = radius * cos(theta) + center_x # Get the x cord
y = radius * sin(theta) + center_y # Get the y cord
cords.append([x, y])
if fill:
cords.insert(0, [center_x, center_y])
for i in range(len(cords)-2):
edges.append([0, i+1, i+2])
edges.append([0, segments, 1]) # Fixes a little glitch
return Object2D(cords, edges, draw_type='triangles')
else:
for i in range(len(cords)-1):
edges.append([i, i+1])
edges.append([segments-1,0]) # Fixes a little glitch
return Object2D(cords, edges, draw_type='lines')
# ----------------------------------------------------------------------------------------
def draw_arc(center_x:float, center_y:float, radius:float = 0.3, arc_angle:float = 90, start_angle:float = 0, segments:int = 360, fill:bool=False):
"""
Returns an Object2D class that draws a circle, angles should not be given in radians.
Arguments:
center_x : float : The x cord for the center of the circle.
center_y : float : The y cord for the center of the circle.
radius : float : The radius of the circle.
arc_angle : float : The angle of the arc.
start_angle : float : The angle from where the arc should start from.
segments : int : How many segments to make the circle from.
fill : bool : Should the shape be filled.
"""
edges = []
cords = []
for i in range(segments):
theta = ((arc_angle * pi * i/180) / segments) + (start_angle*90/180) # Get the current angle
x = radius * cos(theta) + center_x # Get the x cord
y = radius * sin(theta) + center_y # Get the y cord
cords.append([x, y])
if fill:
cords.insert(0, [center_x, center_y-(center_y-cords[0][1])])
for i in range(len(cords)-2):
edges.append([0, i+1, i+2])
return Object2D(cords, edges, draw_type='triangles')
else:
for i in range(len(cords)-1):
edges.append([i, i+1])
return Object2D(cords, edges, draw_type='lines')
| 0 | 0 | 0 |
1ccc3c0d1862b99f87b021ff7667ea871018f267 | 27,305 | py | Python | mds_plugin/network.py | mike-lischke/mysql-shell-plugins | d7d15591dd8e70f7f5ef8ea579e0797eff30fa0a | [
"Apache-2.0",
"CC0-1.0"
] | 11 | 2022-03-02T11:04:16.000Z | 2022-03-29T05:28:23.000Z | mds_plugin/network.py | mike-lischke/mysql-shell-plugins | d7d15591dd8e70f7f5ef8ea579e0797eff30fa0a | [
"Apache-2.0",
"CC0-1.0"
] | 1 | 2022-03-25T15:12:16.000Z | 2022-03-31T18:59:22.000Z | mds_plugin/network.py | mike-lischke/mysql-shell-plugins | d7d15591dd8e70f7f5ef8ea579e0797eff30fa0a | [
"Apache-2.0",
"CC0-1.0"
] | 3 | 2022-03-24T11:32:12.000Z | 2022-03-25T20:40:14.000Z | # Copyright (c) 2021, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms, as
# designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Sub-Module to manage OCI Networking"""
from mysqlsh.plugin_manager import plugin_function
from mds_plugin import core, configuration
# cSpell:ignore vcns
def format_load_balancer_listing(items, current=None) -> str:
"""Formats a given list of objects in a human readable form
Args:
items: Either a list of objects or a single object
current (str): OCID of the current item
Returns:
The db_systems formated as str
"""
# If a single db_system was given, wrap it in a list
if not type(items) is list:
items = [items]
# return objects in READABLE text output
out = ""
id = 1
for i in items:
index = f"*{id:>3} " if current == i.id else f"{id:>4} "
ips = ""
for ip in i.ip_addresses:
ips += ip.ip_address + "*, " if ip.is_public else ", "
if len(ips) > 2:
ips = ips[0:-2]
out += (index +
core.fixed_len(i.display_name, 24, ' ', True) +
core.fixed_len(i.lifecycle_state, 8, ' ') +
core.fixed_len(f"{ips}", 24, '\n'))
id += 1
return out
@plugin_function('mds.list.networks')
def list_networks(**kwargs):
"""Lists all networks of the given compartment
Args:
**kwargs: Additional options
Keyword Args:
public_subnet (bool): Whether only public or private subnets should be
considered
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
return_formatted (bool): If set to true, a list object is returned.
check_privileges (bool): Checks if the user has privileges for the
subnet
Returns:
a network object
"""
public_subnet = kwargs.get("public_subnet")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
return_formatted = kwargs.get("return_formatted", True)
check_privileges = kwargs.get("check_privileges", False)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.exceptions
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# List the virtual networks
vcns = virtual_network.list_vcns(
compartment_id=compartment_id).data
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None:
# Loop over VCNs to see if access is granted
good_vcns = []
for vcn in vcns:
try:
if network_has_subnet(
network=vcn, compartment_id=compartment_id,
config=config,
public_subnet=public_subnet,
check_privileges=check_privileges):
good_vcns.append(vcn)
except oci.exceptions.ServiceError as e:
pass
vcns = good_vcns
if return_formatted:
return format_network_listing(vcns)
else:
return oci.util.to_dict(vcns)
except ValueError as e:
print(f"ERROR: {str(e)}")
return
@plugin_function('mds.get.network')
def get_network(**kwargs):
"""Returns a network object
If multiple or no networks are available in the current compartment,
let the user select a different compartment
Args:
**kwargs: Additional options
Keyword Args:
network_name (str): The display_name of the network
network_id (str): The OCID of the network
public_subnet (bool): Whether only public or private subnets should be
considered
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): Whether to query the user for input
ignore_current (bool): Whether to ignore the current
Returns:
a network object
"""
network_name = kwargs.get("network_name")
network_id = kwargs.get("network_id")
public_subnet = kwargs.get("public_subnet")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
ignore_current = kwargs.get("ignore_current", False)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.exceptions
from mds_plugin import compartment
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# If a specific network was specified, return this network
if network_id:
vcn = virtual_network.get_vcn(vcn_id=network_id).data
return vcn
# Loop until the user selected a compartment with vcns
vcns = []
rejected_vcns = []
while len(vcns) == 0:
try:
# List the virtual networks, filter by network_name if given
vcns = virtual_network.list_vcns(
compartment_id=compartment_id,
display_name=network_name).data
# Filter out rejected VCNs
vcns = [n for n in vcns if n not in rejected_vcns]
if len(vcns) == 0:
network_comp = compartment.get_compartment_by_id(
compartment_id=compartment_id, config=config)
print(f"The compartment {network_comp.name} does not "
"contain a suitable virtual network.")
if interactive:
print("Please select another compartment.\n")
else:
return
compartment_id = compartment.get_compartment_id(
compartment_id=compartment_id, config=config)
if compartment_id == None:
print("Operation cancelled.")
return
else:
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None:
# Loop over VCNs to see if access is granted
good_vcns = []
for vcn in vcns:
newly_rejected_vcns = []
try:
if network_has_subnet(
network=vcn,
compartment_id=compartment_id,
config=config,
public_subnet=public_subnet):
good_vcns.append(vcn)
else:
newly_rejected_vcns.append(vcn)
except oci.exceptions.ServiceError as e:
if e.status == 404:
newly_rejected_vcns.append(vcn)
rejected_vcns = rejected_vcns + newly_rejected_vcns
vcns = good_vcns
except oci.exceptions.ServiceError as e:
if e.code == "NotAuthorizedOrNotFound":
print(f'You do not have privileges to list the '
f'networks of this compartment.')
else:
print(f'Could not list networks of compartment '
f'{network_comp.name}\n')
print(
f'ERROR: {e.message}. (Code: {e.code}; '
f'Status: {e.status})')
vcns = []
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
vcns = []
# If there is a single network in this compartment, return this
# one if it matches the network_name (if given)
if len(vcns) == 1 and not ignore_current:
return vcns[0]
if not interactive:
print("Error: There are multiple virtual networks in this "
"compartment.")
return
# Let the user choose from the list
vcn = core.prompt_for_list_item(
item_list=vcns, prompt_caption=("Please enter the name or index "
"of the virtual network: "),
item_name_property="display_name",
print_list=True)
return vcn
except oci.exceptions.ServiceError as e:
if e.code == "NotAuthorizedOrNotFound":
print(f'You do not have privileges to access this network.')
else:
print(f'Could not get the network.')
print(
f'ERROR: {e.message}. (Code: {e.code}; '
f'Status: {e.status})')
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
@plugin_function('mds.list.subnets')
def list_subnets(**kwargs):
"""Lists all subnets of the given network
Args:
**kwargs: Additional options
Keyword Args:
network_id (str): The OCID of the parent network_id
public_subnet (bool): Whether only public subnets should be considered
availability_domain (str): The name if the availability_domain
ignore_current_network (bool): Whether to ignore the current network
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): Whether to query the user for input
return_formatted (bool): If set to true, a list object is returned.
Returns:
A list of subnets
"""
network_id = kwargs.get("network_id")
public_subnet = kwargs.get("public_subnet")
# availability_domain = kwargs.get("availability_domain")
ignore_current_network = kwargs.get("ignore_current_network")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
return_formatted = kwargs.get("return_formatted", True)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
# compartment_id = configuration.get_current_compartment_id(
# compartment_id=compartment_id, config=config)
if not ignore_current_network:
network_id = configuration.get_current_network_id(
network_id=network_id, config=config)
import oci.exceptions
from mds_plugin import compartment
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# If a subnet_id was given, return the subnet of that subnet_id
# if subnet_id is not None:
# try:
# return virtual_network.get_subnet(subnet_id=subnet_id).data
# except oci.exceptions.ServiceError as e:
# print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
# return
# except (ValueError, oci.exceptions.ClientError) as e:
# print(f'ERROR: {e}')
# return
network = get_network(network_id=network_id,
compartment_id=compartment_id, config=config,
public_subnet=public_subnet, interactive=interactive)
if network is None:
return
network_name = network.display_name if network.display_name else \
network.id
network_compartment = network.compartment_id
# Get the compartment
compartment = compartment.get_compartment_by_id(
compartment_id=network_compartment, config=config)
if compartment is None:
return
# If no availability_domain was specified, use a random one
# if availability_domain is None:
# availability_domain = compartment.get_availability_domain(
# compartment_id=compartment_id,
# availability_domain=availability_domain, config=config)
subnets = virtual_network.list_subnets(
compartment_id=network_compartment, vcn_id=network.id).data
# Filter subnets by Availability Domain, None means the subnet
# spans across all Availability Domains
# subnets = [s for s in subnets
# if s.availability_domain == availability_domain or
# s.availability_domain is None]
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None and public_subnet:
out = "All public "
subnets = [s for s in subnets
if subnet_is_public(subnet=s, config=config)]
elif public_subnet is not None and not public_subnet:
out = "All private "
subnets = [s for s in subnets
if not subnet_is_public(subnet=s, config=config)]
else:
out = "All "
out += f"subnets of Network '{network_name}' in compartment " + \
f"'{compartment.name}':\n\n"
if return_formatted:
return out + format_subnet_listing(subnets)
else:
return oci.util.to_dict(subnets)
except oci.exceptions.ServiceError as e:
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
return
@plugin_function('mds.get.subnet')
def get_subnet(**kwargs):
"""Returns a subnet object
If multiple or no networks are available in the current compartment,
let the user select a different compartment
Args:
**kwargs: Additional options
Keyword Args:
subnet_name (str): The display_name of the subnet
subnet_id (str): The OCID of the subnet
network_id (str): The OCID of the parent network_id
public_subnet (bool): Whether only public subnets should be considered
availability_domain (str): The name if the availability_domain
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): Whether to query the user for input
Returns:
a subnet object
"""
subnet_name = kwargs.get("subnet_name")
subnet_id = kwargs.get("subnet_id")
network_id = kwargs.get("network_id")
public_subnet = kwargs.get("public_subnet")
availability_domain = kwargs.get("availability_domain")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
network_id = configuration.get_current_network_id(
network_id=network_id, config=config)
except ValueError as e:
print(f"ERROR: {str(e)}")
return
import oci.exceptions
from mds_plugin import compartment
import re
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# If a subnet_id was given, return the subnet of that subnet_id
if subnet_id:
try:
return virtual_network.get_subnet(subnet_id=subnet_id).data
except oci.exceptions.ServiceError as e:
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
return
# If no network_id was given, query the user for one
network = get_network(
network_id=network_id,
compartment_id=compartment_id, config=config,
public_subnet=public_subnet, interactive=interactive)
if network is None:
return
network_id = network.id
compartment_id = network.compartment_id
# If no availability_domain was specified, use a random one
if availability_domain is None:
availability_domain_obj = compartment.get_availability_domain(
compartment_id=compartment_id,
random_selection=True,
availability_domain=availability_domain, config=config,
interactive=False, return_python_object=True)
availability_domain = availability_domain_obj.name
try:
subnets = virtual_network.list_subnets(
compartment_id=compartment_id, vcn_id=network_id).data
# Filter subnets by Availability Domain, None means the subnet
# spans across all Availability Domains
subnets = [s for s in subnets
if s.availability_domain == availability_domain or
s.availability_domain is None]
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet:
subnets = [s for s in subnets
if subnet_is_public(subnet=s, config=config)]
elif public_subnet is not None and not public_subnet:
subnets = [s for s in subnets
if not subnet_is_public(subnet=s, config=config)]
# If there are several subnets, let the user choose
if len(subnets) == 0:
return
elif len(subnets) == 1:
# If there is exactly 1 subnet, return that
return subnets[0]
print("\nPlease choose a subnet:\n")
i = 1
for s in subnets:
s_name = re.sub(r'[\n\r]', ' ',
s.display_name[:22] + '..'
if len(s.display_name) > 24
else s.display_name)
print(f"{i:>4} {s_name:24} {s.cidr_block:15}")
i += 1
print()
return core.prompt_for_list_item(
item_list=subnets, prompt_caption=(
"Please enter the name or index of the subnet: "),
item_name_property="display_name",
given_value=subnet_name)
except oci.exceptions.ServiceError as e:
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except Exception as e:
print(f'ERROR: {e}')
return
@plugin_function('mds.list.loadBalancers', shell=True, cli=True, web=True)
def list_load_balancers(**kwargs):
"""Lists load balancers
This function will list all load balancers of the compartment with the
given compartment_id.
Args:
**kwargs: Optional parameters
Keyword Args:
compartment_id (str): OCID of the parent compartment
config (dict): An OCI config object or None
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
return_type (str): "STR" will return a formatted string, "DICT" will
return the object converted to a dict structure and "OBJ" will
return the OCI Python object for internal plugin usage
raise_exceptions (bool): If set to true exceptions are raised
Returns:
Based on return_type
"""
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
return_type = kwargs.get(
"return_type", # In interactive mode, default to formatted str return
core.RETURN_STR if interactive else core.RETURN_DICT)
raise_exceptions = kwargs.get(
"raise_exceptions", # On internal call (RETURN_OBJ), raise exceptions
True if return_type == core.RETURN_OBJ else not interactive)
try:
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.exceptions
try:
# Initialize the Object Store client
load_balancer_cl = core.get_oci_load_balancer_client(config=config)
# List the load balancers
load_balancers = load_balancer_cl.list_load_balancers(
compartment_id=compartment_id).data
# Filter out all deleted items
load_balancers = [
l for l in load_balancers if l.lifecycle_state != "DELETED"]
return core.oci_object(
oci_object=load_balancers,
return_type=return_type,
format_function=format_load_balancer_listing)
except oci.exceptions.ServiceError as e:
if raise_exceptions:
raise
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
except Exception as e:
if raise_exceptions:
raise
print(f'ERROR: {e}') | 38.135475 | 85 | 0.587255 | # Copyright (c) 2021, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms, as
# designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Sub-Module to manage OCI Networking"""
from mysqlsh.plugin_manager import plugin_function
from mds_plugin import core, configuration
# cSpell:ignore vcns
def subnet_is_public(subnet, config):
import oci.exceptions
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# Get Routing Table
try:
rt = virtual_network.get_route_table(subnet.route_table_id).data
is_public = False
for rr in rt.route_rules:
if "internetgateway" in rr.network_entity_id:
is_public = True
break
return is_public
except oci.exceptions.ServiceError as e:
if e.status == 404:
return False
def subnet_privilege_check(subnet, config):
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
try:
subnet = virtual_network.get_subnet(subnet.id)
return True
except:
return False
def network_has_subnet(network, compartment_id, config, public_subnet=None,
check_privileges=False):
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# Get Subnets
subnets = virtual_network.list_subnets(
compartment_id=compartment_id, vcn_id=network.id).data
if public_subnet is not None:
if public_subnet:
subnets = [s for s in subnets
if subnet_is_public(subnet=s, config=config)]
else:
subnets = [s for s in subnets
if not subnet_is_public(subnet=s, config=config)]
return len(subnets) > 0
def format_network_listing(vcns, current_network_id=None):
import re
out = ""
# return compartments in READABLE text output
i = 1
for v in vcns:
# Shorten to 24 chars max, remove linebreaks
name = re.sub(r'[\n\r]', ' ',
v.display_name[:32] + '..'
if len(v.display_name) > 34
else v.display_name)
# Shorten to 24 chars max, remove linebreaks
domain_name = v.vcn_domain_name if v.vcn_domain_name else ''
domain_name = re.sub(r'[\n\r]', ' ',
domain_name[:30] + '..'
if len(domain_name) > 32
else domain_name)
index = f"*{i:>3}" if current_network_id == v.id else f"{i:>4}"
out += f"{index} {name:34} {domain_name:32} {v.cidr_block:19} " \
f"{v.lifecycle_state}\n"
i += 1
if len(vcns) == 0:
out = "No virtual networks available in this compartment.\n"
return out
def format_subnet_listing(subnets):
import re
out = ""
i = 1
for s in subnets:
s_name = re.sub(r'[\n\r]', ' ',
s.display_name[:22] + '..'
if len(s.display_name) > 24
else s.display_name)
availability_domain = \
s.availability_domain if s.availability_domain else ''
availability_domain = re.sub(r'[\n\r]', ' ',
availability_domain[:22] + '..'
if len(availability_domain) > 24
else availability_domain)
domain_name = s.subnet_domain_name if s.subnet_domain_name else ''
domain_name = re.sub(r'[\n\r]', ' ',
domain_name[:42] + '..'
if len(domain_name) > 44
else domain_name)
out += f"{i:>4} {s_name:24} {s.cidr_block:19} {availability_domain:24} "
out += f"{domain_name:44}\n"
i += 1
if len(subnets) == 0:
out = "No subnets available in this virtual network.\n"
return out
def format_load_balancer_listing(items, current=None) -> str:
"""Formats a given list of objects in a human readable form
Args:
items: Either a list of objects or a single object
current (str): OCID of the current item
Returns:
The db_systems formated as str
"""
# If a single db_system was given, wrap it in a list
if not type(items) is list:
items = [items]
# return objects in READABLE text output
out = ""
id = 1
for i in items:
index = f"*{id:>3} " if current == i.id else f"{id:>4} "
ips = ""
for ip in i.ip_addresses:
ips += ip.ip_address + "*, " if ip.is_public else ", "
if len(ips) > 2:
ips = ips[0:-2]
out += (index +
core.fixed_len(i.display_name, 24, ' ', True) +
core.fixed_len(i.lifecycle_state, 8, ' ') +
core.fixed_len(f"{ips}", 24, '\n'))
id += 1
return out
@plugin_function('mds.list.networks')
def list_networks(**kwargs):
"""Lists all networks of the given compartment
Args:
**kwargs: Additional options
Keyword Args:
public_subnet (bool): Whether only public or private subnets should be
considered
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
return_formatted (bool): If set to true, a list object is returned.
check_privileges (bool): Checks if the user has privileges for the
subnet
Returns:
a network object
"""
public_subnet = kwargs.get("public_subnet")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
return_formatted = kwargs.get("return_formatted", True)
check_privileges = kwargs.get("check_privileges", False)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.exceptions
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# List the virtual networks
vcns = virtual_network.list_vcns(
compartment_id=compartment_id).data
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None:
# Loop over VCNs to see if access is granted
good_vcns = []
for vcn in vcns:
try:
if network_has_subnet(
network=vcn, compartment_id=compartment_id,
config=config,
public_subnet=public_subnet,
check_privileges=check_privileges):
good_vcns.append(vcn)
except oci.exceptions.ServiceError as e:
pass
vcns = good_vcns
if return_formatted:
return format_network_listing(vcns)
else:
return oci.util.to_dict(vcns)
except ValueError as e:
print(f"ERROR: {str(e)}")
return
@plugin_function('mds.get.network')
def get_network(**kwargs):
"""Returns a network object
If multiple or no networks are available in the current compartment,
let the user select a different compartment
Args:
**kwargs: Additional options
Keyword Args:
network_name (str): The display_name of the network
network_id (str): The OCID of the network
public_subnet (bool): Whether only public or private subnets should be
considered
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): Whether to query the user for input
ignore_current (bool): Whether to ignore the current
Returns:
a network object
"""
network_name = kwargs.get("network_name")
network_id = kwargs.get("network_id")
public_subnet = kwargs.get("public_subnet")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
ignore_current = kwargs.get("ignore_current", False)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.exceptions
from mds_plugin import compartment
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# If a specific network was specified, return this network
if network_id:
vcn = virtual_network.get_vcn(vcn_id=network_id).data
return vcn
# Loop until the user selected a compartment with vcns
vcns = []
rejected_vcns = []
while len(vcns) == 0:
try:
# List the virtual networks, filter by network_name if given
vcns = virtual_network.list_vcns(
compartment_id=compartment_id,
display_name=network_name).data
# Filter out rejected VCNs
vcns = [n for n in vcns if n not in rejected_vcns]
if len(vcns) == 0:
network_comp = compartment.get_compartment_by_id(
compartment_id=compartment_id, config=config)
print(f"The compartment {network_comp.name} does not "
"contain a suitable virtual network.")
if interactive:
print("Please select another compartment.\n")
else:
return
compartment_id = compartment.get_compartment_id(
compartment_id=compartment_id, config=config)
if compartment_id == None:
print("Operation cancelled.")
return
else:
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None:
# Loop over VCNs to see if access is granted
good_vcns = []
for vcn in vcns:
newly_rejected_vcns = []
try:
if network_has_subnet(
network=vcn,
compartment_id=compartment_id,
config=config,
public_subnet=public_subnet):
good_vcns.append(vcn)
else:
newly_rejected_vcns.append(vcn)
except oci.exceptions.ServiceError as e:
if e.status == 404:
newly_rejected_vcns.append(vcn)
rejected_vcns = rejected_vcns + newly_rejected_vcns
vcns = good_vcns
except oci.exceptions.ServiceError as e:
if e.code == "NotAuthorizedOrNotFound":
print(f'You do not have privileges to list the '
f'networks of this compartment.')
else:
print(f'Could not list networks of compartment '
f'{network_comp.name}\n')
print(
f'ERROR: {e.message}. (Code: {e.code}; '
f'Status: {e.status})')
vcns = []
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
vcns = []
# If there is a single network in this compartment, return this
# one if it matches the network_name (if given)
if len(vcns) == 1 and not ignore_current:
return vcns[0]
if not interactive:
print("Error: There are multiple virtual networks in this "
"compartment.")
return
# Let the user choose from the list
vcn = core.prompt_for_list_item(
item_list=vcns, prompt_caption=("Please enter the name or index "
"of the virtual network: "),
item_name_property="display_name",
print_list=True)
return vcn
except oci.exceptions.ServiceError as e:
if e.code == "NotAuthorizedOrNotFound":
print(f'You do not have privileges to access this network.')
else:
print(f'Could not get the network.')
print(
f'ERROR: {e.message}. (Code: {e.code}; '
f'Status: {e.status})')
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
@plugin_function('mds.list.subnets')
def list_subnets(**kwargs):
"""Lists all subnets of the given network
Args:
**kwargs: Additional options
Keyword Args:
network_id (str): The OCID of the parent network_id
public_subnet (bool): Whether only public subnets should be considered
availability_domain (str): The name if the availability_domain
ignore_current_network (bool): Whether to ignore the current network
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): Whether to query the user for input
return_formatted (bool): If set to true, a list object is returned.
Returns:
A list of subnets
"""
network_id = kwargs.get("network_id")
public_subnet = kwargs.get("public_subnet")
# availability_domain = kwargs.get("availability_domain")
ignore_current_network = kwargs.get("ignore_current_network")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
return_formatted = kwargs.get("return_formatted", True)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
# compartment_id = configuration.get_current_compartment_id(
# compartment_id=compartment_id, config=config)
if not ignore_current_network:
network_id = configuration.get_current_network_id(
network_id=network_id, config=config)
import oci.exceptions
from mds_plugin import compartment
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# If a subnet_id was given, return the subnet of that subnet_id
# if subnet_id is not None:
# try:
# return virtual_network.get_subnet(subnet_id=subnet_id).data
# except oci.exceptions.ServiceError as e:
# print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
# return
# except (ValueError, oci.exceptions.ClientError) as e:
# print(f'ERROR: {e}')
# return
network = get_network(network_id=network_id,
compartment_id=compartment_id, config=config,
public_subnet=public_subnet, interactive=interactive)
if network is None:
return
network_name = network.display_name if network.display_name else \
network.id
network_compartment = network.compartment_id
# Get the compartment
compartment = compartment.get_compartment_by_id(
compartment_id=network_compartment, config=config)
if compartment is None:
return
# If no availability_domain was specified, use a random one
# if availability_domain is None:
# availability_domain = compartment.get_availability_domain(
# compartment_id=compartment_id,
# availability_domain=availability_domain, config=config)
subnets = virtual_network.list_subnets(
compartment_id=network_compartment, vcn_id=network.id).data
# Filter subnets by Availability Domain, None means the subnet
# spans across all Availability Domains
# subnets = [s for s in subnets
# if s.availability_domain == availability_domain or
# s.availability_domain is None]
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None and public_subnet:
out = "All public "
subnets = [s for s in subnets
if subnet_is_public(subnet=s, config=config)]
elif public_subnet is not None and not public_subnet:
out = "All private "
subnets = [s for s in subnets
if not subnet_is_public(subnet=s, config=config)]
else:
out = "All "
out += f"subnets of Network '{network_name}' in compartment " + \
f"'{compartment.name}':\n\n"
if return_formatted:
return out + format_subnet_listing(subnets)
else:
return oci.util.to_dict(subnets)
except oci.exceptions.ServiceError as e:
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
return
@plugin_function('mds.get.subnet')
def get_subnet(**kwargs):
"""Returns a subnet object
If multiple or no networks are available in the current compartment,
let the user select a different compartment
Args:
**kwargs: Additional options
Keyword Args:
subnet_name (str): The display_name of the subnet
subnet_id (str): The OCID of the subnet
network_id (str): The OCID of the parent network_id
public_subnet (bool): Whether only public subnets should be considered
availability_domain (str): The name if the availability_domain
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): Whether to query the user for input
Returns:
a subnet object
"""
subnet_name = kwargs.get("subnet_name")
subnet_id = kwargs.get("subnet_id")
network_id = kwargs.get("network_id")
public_subnet = kwargs.get("public_subnet")
availability_domain = kwargs.get("availability_domain")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
network_id = configuration.get_current_network_id(
network_id=network_id, config=config)
except ValueError as e:
print(f"ERROR: {str(e)}")
return
import oci.exceptions
from mds_plugin import compartment
import re
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# If a subnet_id was given, return the subnet of that subnet_id
if subnet_id:
try:
return virtual_network.get_subnet(subnet_id=subnet_id).data
except oci.exceptions.ServiceError as e:
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
return
# If no network_id was given, query the user for one
network = get_network(
network_id=network_id,
compartment_id=compartment_id, config=config,
public_subnet=public_subnet, interactive=interactive)
if network is None:
return
network_id = network.id
compartment_id = network.compartment_id
# If no availability_domain was specified, use a random one
if availability_domain is None:
availability_domain_obj = compartment.get_availability_domain(
compartment_id=compartment_id,
random_selection=True,
availability_domain=availability_domain, config=config,
interactive=False, return_python_object=True)
availability_domain = availability_domain_obj.name
try:
subnets = virtual_network.list_subnets(
compartment_id=compartment_id, vcn_id=network_id).data
# Filter subnets by Availability Domain, None means the subnet
# spans across all Availability Domains
subnets = [s for s in subnets
if s.availability_domain == availability_domain or
s.availability_domain is None]
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet:
subnets = [s for s in subnets
if subnet_is_public(subnet=s, config=config)]
elif public_subnet is not None and not public_subnet:
subnets = [s for s in subnets
if not subnet_is_public(subnet=s, config=config)]
# If there are several subnets, let the user choose
if len(subnets) == 0:
return
elif len(subnets) == 1:
# If there is exactly 1 subnet, return that
return subnets[0]
print("\nPlease choose a subnet:\n")
i = 1
for s in subnets:
s_name = re.sub(r'[\n\r]', ' ',
s.display_name[:22] + '..'
if len(s.display_name) > 24
else s.display_name)
print(f"{i:>4} {s_name:24} {s.cidr_block:15}")
i += 1
print()
return core.prompt_for_list_item(
item_list=subnets, prompt_caption=(
"Please enter the name or index of the subnet: "),
item_name_property="display_name",
given_value=subnet_name)
except oci.exceptions.ServiceError as e:
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except Exception as e:
print(f'ERROR: {e}')
return
@plugin_function('mds.list.loadBalancers', shell=True, cli=True, web=True)
def list_load_balancers(**kwargs):
"""Lists load balancers
This function will list all load balancers of the compartment with the
given compartment_id.
Args:
**kwargs: Optional parameters
Keyword Args:
compartment_id (str): OCID of the parent compartment
config (dict): An OCI config object or None
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
return_type (str): "STR" will return a formatted string, "DICT" will
return the object converted to a dict structure and "OBJ" will
return the OCI Python object for internal plugin usage
raise_exceptions (bool): If set to true exceptions are raised
Returns:
Based on return_type
"""
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
return_type = kwargs.get(
"return_type", # In interactive mode, default to formatted str return
core.RETURN_STR if interactive else core.RETURN_DICT)
raise_exceptions = kwargs.get(
"raise_exceptions", # On internal call (RETURN_OBJ), raise exceptions
True if return_type == core.RETURN_OBJ else not interactive)
try:
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.exceptions
try:
# Initialize the Object Store client
load_balancer_cl = core.get_oci_load_balancer_client(config=config)
# List the load balancers
load_balancers = load_balancer_cl.list_load_balancers(
compartment_id=compartment_id).data
# Filter out all deleted items
load_balancers = [
l for l in load_balancers if l.lifecycle_state != "DELETED"]
return core.oci_object(
oci_object=load_balancers,
return_type=return_type,
format_function=format_load_balancer_listing)
except oci.exceptions.ServiceError as e:
if raise_exceptions:
raise
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
except Exception as e:
if raise_exceptions:
raise
print(f'ERROR: {e}') | 3,746 | 0 | 125 |
94f255977493d198ff2cd8edf28b1c3eaffc4cd9 | 5,614 | py | Python | chess/lib/gui.py | SamMatzko/My-PyChess | 4b1b30b03b85679a2480e86b649614917a156ab0 | [
"MIT"
] | 64 | 2019-10-27T06:49:57.000Z | 2022-03-29T11:07:07.000Z | chess/lib/gui.py | SamMatzko/My-PyChess | 4b1b30b03b85679a2480e86b649614917a156ab0 | [
"MIT"
] | 11 | 2020-01-28T08:16:25.000Z | 2021-12-13T18:44:12.000Z | chess/lib/gui.py | SamMatzko/My-PyChess | 4b1b30b03b85679a2480e86b649614917a156ab0 | [
"MIT"
] | 30 | 2020-01-09T10:05:01.000Z | 2022-03-18T18:16:30.000Z | """
This file is a part of My-PyChess application.
In this file, we define some basic gui-related functions
For a better understanding of the variables used here, checkout docs.txt
"""
import pygame
from tools.loader import CHESS, BACK, putNum, putLargeNum
from tools import sound
# Apply 'convert_alpha()' on all pieces to optimise images for speed.
# This function displays the choice menu when called, taking user input.
# Returns the piece chosen by the user
# This function draws the board
# This funtion draws all pieces onto the board
# This function displays the prompt screen when a user tries to quit
# User must choose Yes or No, this function returns True or False respectively
# This function shows a small animation when the game starts, while also
# optimising images for display - call only once per game
| 37.178808 | 81 | 0.528144 | """
This file is a part of My-PyChess application.
In this file, we define some basic gui-related functions
For a better understanding of the variables used here, checkout docs.txt
"""
import pygame
from tools.loader import CHESS, BACK, putNum, putLargeNum
from tools import sound
# Apply 'convert_alpha()' on all pieces to optimise images for speed.
def convertPieces(win):
for i in range(2):
for key, val in CHESS.PIECES[i].items():
CHESS.PIECES[i][key] = val.convert_alpha(win)
# This function displays the choice menu when called, taking user input.
# Returns the piece chosen by the user
def getChoice(win, side):
win.blit(CHESS.CHOOSE, (130, 10))
win.blit(CHESS.PIECES[side]["q"], (250, 0))
win.blit(CHESS.PIECES[side]["b"], (300, 0))
win.blit(CHESS.PIECES[side]["r"], (350, 0))
win.blit(CHESS.PIECES[side]["n"], (400, 0))
pygame.display.update((0, 0, 500, 50))
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if 0 < event.pos[1] < 50:
if 250 < event.pos[0] < 300:
return "q"
elif 300 < event.pos[0] < 350:
return "b"
elif 350 < event.pos[0] < 400:
return "r"
elif 400 < event.pos[0] < 450:
return "n"
def showTimeOver(win, side):
pygame.draw.rect(win, (0, 0, 0), (100, 190, 300, 120))
pygame.draw.rect(win, (255, 255, 255), (100, 190, 300, 120), 4)
win.blit(CHESS.TIMEUP[0], (220, 200))
win.blit(CHESS.TIMEUP[1], (105, 220))
win.blit(CHESS.TIMEUP[2], (115, 240))
win.blit(CHESS.OK, (230, 270))
pygame.draw.rect(win, (255, 255, 255), (225, 270, 50, 30), 2)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if 225 < event.pos[0] < 275 and 270 < event.pos[1] < 300:
return
def putClock(win, timer):
if timer is None:
return
m1, s1 = divmod(timer[0] // 1000, 60)
m2, s2 = divmod(timer[1] // 1000, 60)
putLargeNum(win, format(m1, "02"), (100, 460), False)
win.blit(CHESS.COL, (130, 460))
putLargeNum(win, format(s1, "02"), (140, 460), False)
putLargeNum(win, format(m2, "02"), (210, 460), False)
win.blit(CHESS.COL, (240, 460))
putLargeNum(win, format(s2, "02"), (250, 460), False)
win.blit(CHESS.PIECES[0]["k"], (50, 450))
win.blit(CHESS.PIECES[1]["k"], (278, 450))
pygame.display.update()
# This function draws the board
def drawBoard(win):
win.fill((100, 200, 200))
pygame.draw.rect(win, (180, 100, 30), (50, 50, 400, 400))
for y in range(1, 9):
for x in range(1, 9):
if (x + y) % 2 == 0:
pygame.draw.rect(win, (220, 240, 240), (50 * x, 50 * y, 50, 50))
# This funtion draws all pieces onto the board
def drawPieces(win, board, flip):
for side in range(2):
for x, y, ptype in board[side]:
if flip:
x, y = 9 - x, 9 - y
win.blit(CHESS.PIECES[side][ptype], (x * 50, y * 50))
# This function displays the prompt screen when a user tries to quit
# User must choose Yes or No, this function returns True or False respectively
def prompt(win, msg=None):
pygame.draw.rect(win, (0, 0, 0), (110, 160, 280, 130))
pygame.draw.rect(win, (255, 255, 255), (110, 160, 280, 130), 4)
pygame.draw.rect(win, (255, 255, 255), (120, 160, 260, 60), 2)
win.blit(CHESS.YES, (145, 240))
win.blit(CHESS.NO, (305, 240))
pygame.draw.rect(win, (255, 255, 255), (140, 240, 60, 28), 2)
pygame.draw.rect(win, (255, 255, 255), (300, 240, 50, 28), 2)
if msg is None:
win.blit(CHESS.MESSAGE[0], (130, 160))
win.blit(CHESS.MESSAGE[1], (190, 190))
elif msg == -1:
win.blit(CHESS.MESSAGE[0], (130, 160))
win.blit(CHESS.MESSAGE[1], (190, 190))
win.blit(CHESS.SAVE_ERR, (115, 270))
else:
win.blit(CHESS.MESSAGE2[0], (123, 160))
win.blit(CHESS.MESSAGE2[1], (145, 190))
win.blit(CHESS.MSG, (135, 270))
putNum(win, msg, (345, 270))
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if 240 < event.pos[1] < 270:
if 140 < event.pos[0] < 200:
return True
elif 300 < event.pos[0] < 350:
return False
# This function shows a small animation when the game starts, while also
# optimising images for display - call only once per game
def start(win, load):
convertPieces(win)
sound.play_start(load)
clk = pygame.time.Clock()
for i in range(101):
clk.tick_busy_loop(140)
drawBoard(win)
for j in range(8):
win.blit(CHESS.PIECES[0]["p"], (0.5 * i * (j + 1), 225 + 1.25 * i))
win.blit(CHESS.PIECES[1]["p"], (0.5 * i * (j + 1), 225 - 1.25 * i))
for j, pc in enumerate(["r", "n", "b", "q", "k", "b", "n", "r"]):
win.blit(CHESS.PIECES[0][pc], (0.5 * i * (j + 1), 225 + 1.75 * i))
win.blit(CHESS.PIECES[1][pc], (0.5 * i * (j + 1), 225 - 1.75 * i))
pygame.display.update() | 4,560 | 0 | 187 |
e58d18a4188f5278cc3645bd10b1ba575317585f | 1,011 | py | Python | froide/team/urls.py | lanmarc77/froide | bddc8bb27c8a7c2a959003dda724194948bc381a | [
"MIT"
] | null | null | null | froide/team/urls.py | lanmarc77/froide | bddc8bb27c8a7c2a959003dda724194948bc381a | [
"MIT"
] | null | null | null | froide/team/urls.py | lanmarc77/froide | bddc8bb27c8a7c2a959003dda724194948bc381a | [
"MIT"
] | null | null | null | from django.urls import path
from .views import (
TeamListView, TeamDetailView, CreateTeamView, InviteTeamMemberView,
JoinTeamView, ChangeTeamMemberRoleView, DeleteTeamMemberRoleView,
JoinTeamUserView, DeleteTeamView
)
urlpatterns = [
path('', TeamListView.as_view(), name='team-list'),
path('create/', CreateTeamView.as_view(), name='team-create'),
path('<int:pk>/', TeamDetailView.as_view(), name='team-detail'),
path('<int:pk>/invite/', InviteTeamMemberView.as_view(),
name='team-invite'),
path('<int:pk>/delete/', DeleteTeamView.as_view(),
name='team-delete'),
path('<int:pk>/change-member/', ChangeTeamMemberRoleView.as_view(),
name='team-change_member'),
path('<int:pk>/delete-member/', DeleteTeamMemberRoleView.as_view(),
name='team-delete_member'),
path('join/<int:pk>/', JoinTeamUserView.as_view(),
name='team-join_user'),
path('join/<int:pk>/<str:secret>/', JoinTeamView.as_view(),
name='team-join'),
]
| 38.884615 | 71 | 0.67458 | from django.urls import path
from .views import (
TeamListView, TeamDetailView, CreateTeamView, InviteTeamMemberView,
JoinTeamView, ChangeTeamMemberRoleView, DeleteTeamMemberRoleView,
JoinTeamUserView, DeleteTeamView
)
urlpatterns = [
path('', TeamListView.as_view(), name='team-list'),
path('create/', CreateTeamView.as_view(), name='team-create'),
path('<int:pk>/', TeamDetailView.as_view(), name='team-detail'),
path('<int:pk>/invite/', InviteTeamMemberView.as_view(),
name='team-invite'),
path('<int:pk>/delete/', DeleteTeamView.as_view(),
name='team-delete'),
path('<int:pk>/change-member/', ChangeTeamMemberRoleView.as_view(),
name='team-change_member'),
path('<int:pk>/delete-member/', DeleteTeamMemberRoleView.as_view(),
name='team-delete_member'),
path('join/<int:pk>/', JoinTeamUserView.as_view(),
name='team-join_user'),
path('join/<int:pk>/<str:secret>/', JoinTeamView.as_view(),
name='team-join'),
]
| 0 | 0 | 0 |
8648427b141a81717f91928d45d78d1381e8f771 | 500 | py | Python | hardware/servo/__init__.py | jpalczewski/pills | ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26 | [
"MIT"
] | null | null | null | hardware/servo/__init__.py | jpalczewski/pills | ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26 | [
"MIT"
] | null | null | null | hardware/servo/__init__.py | jpalczewski/pills | ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26 | [
"MIT"
] | null | null | null | import time
import board
import pwmio
from adafruit_motor import servo
| 23.809524 | 57 | 0.722 | import time
import board
import pwmio
from adafruit_motor import servo
def right_rotate():
rightServoPin = pwmio.PWMOut(board.D17, frequency=50)
rightServo = servo.ContinuousServo(rightServoPin)
rightServo.throttle = 0.08
time.sleep(0.1)
rightServo.throttle = 0.0
def left_rotate():
leftServoPin = pwmio.PWMOut(board.D27, frequency=50)
leftServo = servo.ContinuousServo(leftServoPin)
leftServo.throttle = 0.08
time.sleep(0.1)
leftServo.throttle = 0.0 | 376 | 0 | 51 |
886d142c514e19d696465e7f00befa959fe90b28 | 10,444 | py | Python | training/ticketing_system/views.py | aberon10/training | 7f418d563280b9d1ab939935206b023e4206cb54 | [
"MIT"
] | null | null | null | training/ticketing_system/views.py | aberon10/training | 7f418d563280b9d1ab939935206b023e4206cb54 | [
"MIT"
] | null | null | null | training/ticketing_system/views.py | aberon10/training | 7f418d563280b9d1ab939935206b023e4206cb54 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
from django.db.models import Q
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.http import HttpResponseNotFound
from django.contrib.auth.hashers import make_password
from django.contrib.auth.hashers import check_password
from django.views.generic.edit import FormView
from django.views.generic import TemplateView
from django.views.generic import RedirectView
from .forms import SignInForm
from .forms import LoginForm
from .forms import TicketCreateForm
from .models import User
from .models import Ticket
class LoginView(FormView):
""" Login View. """
form_class = LoginForm
template_name = 'ticketing_system/login.html'
success_url = '/dashboard'
class LogoutView(RedirectView):
""" Logout View. """
url = '/login'
class RegisterView(TemplateView):
""" Register View. """
template_name = 'ticketing_system/register.html'
class DashboardView(TemplateView):
""" Dashboard View. """
template_name = 'ticketing_system/dashboard.html'
class TicketView(FormView):
""" Ticket View. """
form_class = TicketCreateForm
template_name = 'ticketing_system/ticket_form.html'
success_url = '/ticket'
| 33.367412 | 79 | 0.493776 | # -*- coding: utf-8 -*-
import time
from django.db.models import Q
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.http import HttpResponseNotFound
from django.contrib.auth.hashers import make_password
from django.contrib.auth.hashers import check_password
from django.views.generic.edit import FormView
from django.views.generic import TemplateView
from django.views.generic import RedirectView
from .forms import SignInForm
from .forms import LoginForm
from .forms import TicketCreateForm
from .models import User
from .models import Ticket
class LoginView(FormView):
""" Login View. """
form_class = LoginForm
template_name = 'ticketing_system/login.html'
success_url = '/dashboard'
def get(self, request, *args, **kwargs):
if request.session.get('user'):
return HttpResponseRedirect(self.get_success_url())
else:
return render(
request,
template_name=self.template_name,
context={'form': self.form_class}
)
def form_valid(self, form):
context = {
'form': form,
'error_login': 'The user and/or password do not match'
}
email = form.cleaned_data['email']
password = form.cleaned_data['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
pass
else:
if check_password(password, user.password):
# create the new user session
self.request.session['user'] = user.email
self.request.session['name'] = user.name
return HttpResponseRedirect(self.get_success_url())
return render(
self.request,
template_name=self.template_name,
context=context
)
class LogoutView(RedirectView):
""" Logout View. """
url = '/login'
def get(self, request, *args, **kwargs):
try:
# delete the user session
del request.session['user']
del request.session['name']
except KeyError:
pass
return super(LogoutView, self).get(request, *args, **kwargs)
class RegisterView(TemplateView):
""" Register View. """
template_name = 'ticketing_system/register.html'
def get(self, request, *args, **kwargs):
form = SignInForm()
return render(
request,
template_name=self.template_name,
context={'register_form': form}
)
def post(self, request, *args, **kwargs):
form = SignInForm(request.POST)
response = {
'register_form': form,
'message': '',
'success': False
}
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
confirm_password = form.cleaned_data['confirm_password']
name = form.cleaned_data['name']
if password != confirm_password:
response['register_form']['confirm_password'].error_messages =\
'Passwords do not match..'
else:
try:
User.objects.get(email=email)
except User.DoesNotExist:
user = User(
email=email,
name=name,
password=make_password(password)
)
user.save()
response['register_form'] = SignInForm()
response['success'] = True
response['message'] = 'You have successfully \
registered!'
else:
response['register_form']['email'].error_messages = \
'User already exists'
return render(
request,
template_name=self.template_name,
context=response,
)
class DashboardView(TemplateView):
""" Dashboard View. """
template_name = 'ticketing_system/dashboard.html'
def get(self, request, *args, **kwargs):
if request.session.get('user'):
user = User.objects.get(email=request.session['user'])
tickets = Ticket.objects.filter(
Q(status='O'),
Q(author=user) | Q(assignee=user)
).distinct()
return render(
request,
template_name=self.template_name,
context={
'current_path': request.path.split('/')[1],
'tickets': tickets
}
)
else:
return HttpResponseRedirect('/login')
def post(self, request, *args, **kwargs):
if request.session['user']:
title = request.POST.get('title')
status = request.POST.get('status')
user = User.objects.get(email=request.session['user'])
tickets = Ticket.objects.all()
if title != '':
tickets = tickets.filter(title__icontains=title)
tickets = tickets.filter(
Q(status=status),
Q(author=user) | Q(assignee=user)
).distinct()
return render(
request,
template_name=self.template_name,
context={
'current_path': request.path.split('/')[1],
'tickets': tickets
}
)
else:
return HttpResponseRedirect('/login')
class TicketView(FormView):
""" Ticket View. """
form_class = TicketCreateForm
template_name = 'ticketing_system/ticket_form.html'
success_url = '/ticket'
def get(self, request, *args, **kwargs):
if not request.session.get('user'):
return HttpResponseRedirect('/login')
else:
user = User.objects.get(email=request.session['user'])
try:
if kwargs['id_ticket']:
try:
ticket = Ticket.objects.filter(
Q(pk=int(kwargs['id_ticket'])),
Q(author=user) | Q(assignee=user)
)[0]
except Ticket.DoesNotExist:
return HttpResponseNotFound('<h1>Page not found</h1>')
else:
form = self.form_class(initial={
'title': ticket.title,
'body': ticket.body,
'author': ticket.author,
'created': ticket.created,
'status': ticket.status,
'assignee': ticket.assignee.all()
})
except KeyError:
form = self.form_class(initial={
'author': request.session['user'],
'created': time.strftime('%Y-%m-%d'),
'status': 'O',
'assignee': user.id
})
return render(
request,
template_name=self.template_name,
context={'form': form}
)
def post(self, request, *args, **kwargs):
if not request.session.get('user'):
return HttpResponseRedirect('/login')
else:
error_message = ''
ticket = Ticket()
assignees_users = request.POST.getlist('assignee')
form = TicketCreateForm({
'title': request.POST.get('title'),
'body': request.POST.get('body'),
'status': request.POST.get('status'),
'created': request.POST.get('created')
})
if form.is_valid():
title = form.cleaned_data['title']
body = form.cleaned_data['body']
email = self.request.session['user']
created = form.cleaned_data['created']
status = form.cleaned_data['status']
author = User.objects.get(email=email)
try:
if kwargs['id_ticket']:
ticket = Ticket.objects.get(
pk=int(kwargs['id_ticket'])
)
for item in ticket.assignee.all():
user = User.objects.get(pk=int(item.id))
ticket.assignee.remove(user)
except KeyError:
pass
try:
users = []
for user in assignees_users:
users.append(User.objects.get(pk=int(user)))
except User.DoesNotExist:
error_message = 'Error creating ticket'
else:
ticket.title = title
ticket.body = body
ticket.author = author
ticket.created = created
ticket.status = status
ticket.save()
if not users:
users.append(author)
ticket.assignee.set(users)
return HttpResponseRedirect('/dashboard')
return render(
request,
template_name=self.template_name,
context={
'form': TicketCreateForm(request.POST),
'error_message': error_message
}
)
class TicketDeleteView(TemplateView):
def get(self, request, *args, **kwargs):
if not request.session.get('user'):
return HttpResponseRedirect('/login')
else:
try:
if kwargs['id_ticket']:
user = User.objects.get(email=request.session['user'])
ticket = Ticket.objects.filter(
Q(pk=int(kwargs['id_ticket'])),
Q(author=user) | Q(assignee=user)
).distinct()
ticket.delete()
except KeyError:
pass
except Ticket.DoesNotExist:
pass
return HttpResponseRedirect('/dashboard')
| 8,892 | 16 | 293 |
2cd4b352e3ce6d32d5fe1fe1431c15e7c5cbd407 | 897 | py | Python | linear_algebra/Gauss_elim_example.py | mattzett/EP501_python | 6b6cc87e435a372f84aa1f68c13cc8c73d477998 | [
"MIT"
] | null | null | null | linear_algebra/Gauss_elim_example.py | mattzett/EP501_python | 6b6cc87e435a372f84aa1f68c13cc8c73d477998 | [
"MIT"
] | 1 | 2020-10-06T13:29:01.000Z | 2020-10-06T13:29:01.000Z | linear_algebra/Gauss_elim_example.py | mattzett/EP501_python | 6b6cc87e435a372f84aa1f68c13cc8c73d477998 | [
"MIT"
] | 6 | 2020-09-01T10:35:59.000Z | 2020-09-18T10:12:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 20 15:40:51 2020
@author: zettergm
known issues:
1) Need to control number of decimal places in output printing to improve readability
"""
import numpy as np
from elimtools import Gauss_elim,backsub
nrow=10
ncol=10
A=np.random.randn(nrow,ncol)
b=np.random.randn(nrow,1)
# Simple test problem for debugging
#A=np.array([[1.0, 4.0, 2.0], [3.0, 2.0, 1.0], [2.0, 1.0, 3.0]]) # system to be solved
#b=np.array([[15.0], [10.0], [13.0]]) # RHS of system
# Solve with elimtools
[Awork,order]=Gauss_elim(A,b,True)
x=backsub(Awork[order,:],True)
print("Value of x computed via Gaussian elimination and backsubstitution: ")
print(x)
# Use built-in linear algebra routines to solve and compare
xpyth=np.linalg.solve(A,b);
print("Solution vector computed via built-in numpy routine")
print(xpyth) | 27.181818 | 89 | 0.683389 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 20 15:40:51 2020
@author: zettergm
known issues:
1) Need to control number of decimal places in output printing to improve readability
"""
import numpy as np
from elimtools import Gauss_elim,backsub
nrow=10
ncol=10
A=np.random.randn(nrow,ncol)
b=np.random.randn(nrow,1)
# Simple test problem for debugging
#A=np.array([[1.0, 4.0, 2.0], [3.0, 2.0, 1.0], [2.0, 1.0, 3.0]]) # system to be solved
#b=np.array([[15.0], [10.0], [13.0]]) # RHS of system
# Solve with elimtools
[Awork,order]=Gauss_elim(A,b,True)
x=backsub(Awork[order,:],True)
print("Value of x computed via Gaussian elimination and backsubstitution: ")
print(x)
# Use built-in linear algebra routines to solve and compare
xpyth=np.linalg.solve(A,b);
print("Solution vector computed via built-in numpy routine")
print(xpyth) | 0 | 0 | 0 |
bbed0d926f4b21ac78cf6111053350a3ef352712 | 5,534 | py | Python | Hiking_project_trailscraper.py | edeneault/pyfe_capstone_hikingtrails | 849328a47095933f2e8ce8a8a5a0d56d04dd3807 | [
"MIT"
] | 2 | 2020-08-10T13:18:31.000Z | 2021-10-02T18:05:55.000Z | Hiking_project_trailscraper.py | edeneault/pyfe_capstone_hikingtrails | 849328a47095933f2e8ce8a8a5a0d56d04dd3807 | [
"MIT"
] | null | null | null | Hiking_project_trailscraper.py | edeneault/pyfe_capstone_hikingtrails | 849328a47095933f2e8ce8a8a5a0d56d04dd3807 | [
"MIT"
] | null | null | null | ################################################################################
# #### Hiking_project_trailsraper.py ##### #
# #### written by: Etienne Deneault ##### #
################################################################################
# Environment-import
import json
import requests
import sqlite3
import random
from shapely.geometry import Point, Polygon
# initalize coordinates at the center of mainland US
# Enter Hiking Project API key here. Further Dev needed for encrytion through env variable external program.
api_key = 'your_api_key'
# Base URL for getTrail_datas method
url = 'https://www.hikingproject.com/data/get-trails?'
# Bounding Box Coordinates for the Mainland US obtained on
coords = [(-82.4423472248,25.062761981), (-85.6719092689,29.570088284), (-89.362854179,29.650311372), (-97.9443083368,26.5579317639), (-117.2294984422,32.2612294633), (-125.072755035,41.8816215194), (-124.2423002086,48.2581198196), (-121.566356834,49.3519856698), (-96.4679400287,48.808034488), (-92.0388082824,48.5643678729), (-84.6569184623,46.8886898289), (-81.7964176498,42.8361752569), (-76.3522736392,43.7095457837), (-70.2622322402,46.3181308653), (-68.1399397216,47.014662257), (-66.4790086112,44.7016972202), (-69.7085706554,43.3080101182), (-76.1677269302,38.7876636101), (-75.7986335121,35.6284188995), (-80.7814268432,30.8459373794), (-79.5818678699,26.4753651459), (-82.4423472248,25.062761981)]
poly = Polygon(coords)
point_in_poly = get_all_point_in_polygon(poly)
print("Number of coordinate points to check:", len(point_in_poly))
# print(point_in_poly)
# Randomize list of coordinates
random.shuffle(point_in_poly)
##### Main Loop #####
if __name__ == "__main__":
while True:
try:
maxDistance = input('Enter your search area (0-200 miles): ')
maxDistance = int(maxDistance)
except ValueError:
print("I did not understand the input, please try again using a distance numeric value in miles.")
continue
try:
maxResults = input('Enter your search area - Maximum bumber of trail results (0-500): ')
maxResults = int(maxResults)
except ValueError:
print("I did not understand the input, please try again using a numeric value.")
continue
try:
userReq_count = input('Enter the number of requests to the API (0-199): ')
userReq_count = int(userReq_count)
except ValueError:
print("I did not understand the input, please try again using a numeric value.")
continue
req_count = 0
# Note: Reverse cordinates to input (lat,lon) == (y,x)
for (x,y) in point_in_poly:
req_count = req_count + 1
print('Query Count', req_count)
maxDistance = 100
maxResults = 400
parameters = {"lat": y, "lon": x , "maxDistance": maxDistance, "maxResults": maxResults, "key": api_key}
# Make a get request with the parameters.
response = requests.get(url, params=parameters)
print(response.url)
print(response.headers)
print(response.status_code)
# print(response.text)
# print(response.headers)
# print(response.json())
db = sqlite3.connect("SQL_data/trails.sqlite")
cur = db.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS Trailsdb (id INTEGER NOT NULL PRIMARY KEY UNIQUE, name TEXT,
type TEXT, summary TEXT UNIQUE, difficulty TEXT, stars INTEGER, starVotes INTEGER,
location TEXT, url TEXT UNIQUE, length INTEGER, ascent INTEGER, descent INTEGER, high INTEGER,
low INTEGER, longitude INTEGER, latitude INTEGER, conditionStatus TEXT, conditionDetails TEXT,
conditionDate TEXT)''')
str_data = response.text
json_data = json.loads(str_data)
# for entry in json_data:
# print(entry)
for trail in json_data['trails']:
#print(child)
cur.execute("Insert or replace into trailsdb values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(trail['id'], trail['name'], trail['type'], trail['summary'], trail['difficulty'],
trail['stars'], trail['starVotes'], trail['location'], trail['url'], trail['length'],
trail['ascent'], trail['descent'], trail['high'], trail['low'],trail['longitude'],
trail['latitude'], trail['conditionStatus'], trail['conditionDetails'],
trail['conditionDate']))
db.commit()
if req_count == userReq_count:
break
db.commit()
print("all done")
break
| 44.272 | 709 | 0.564149 | ################################################################################
# #### Hiking_project_trailsraper.py ##### #
# #### written by: Etienne Deneault ##### #
################################################################################
# Environment-import
import json
import requests
import sqlite3
import random
from shapely.geometry import Point, Polygon
# initalize coordinates at the center of mainland US
def get_all_point_in_polygon(coords):
(minx, miny, maxx, maxy) = poly.bounds
minx = int(minx)
miny = int(miny)
maxx = int(maxx)
maxy = int(maxy)
# print("poly.bounds:", poly.bounds)
#define list of tuples
a = []
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
p = Point(x, y)
if poly.contains(p):
a.append([x, y])
return a
# Enter Hiking Project API key here. Further Dev needed for encrytion through env variable external program.
api_key = 'your_api_key'
# Base URL for getTrail_datas method
url = 'https://www.hikingproject.com/data/get-trails?'
# Bounding Box Coordinates for the Mainland US obtained on
coords = [(-82.4423472248,25.062761981), (-85.6719092689,29.570088284), (-89.362854179,29.650311372), (-97.9443083368,26.5579317639), (-117.2294984422,32.2612294633), (-125.072755035,41.8816215194), (-124.2423002086,48.2581198196), (-121.566356834,49.3519856698), (-96.4679400287,48.808034488), (-92.0388082824,48.5643678729), (-84.6569184623,46.8886898289), (-81.7964176498,42.8361752569), (-76.3522736392,43.7095457837), (-70.2622322402,46.3181308653), (-68.1399397216,47.014662257), (-66.4790086112,44.7016972202), (-69.7085706554,43.3080101182), (-76.1677269302,38.7876636101), (-75.7986335121,35.6284188995), (-80.7814268432,30.8459373794), (-79.5818678699,26.4753651459), (-82.4423472248,25.062761981)]
poly = Polygon(coords)
point_in_poly = get_all_point_in_polygon(poly)
print("Number of coordinate points to check:", len(point_in_poly))
# print(point_in_poly)
# Randomize list of coordinates
random.shuffle(point_in_poly)
##### Main Loop #####
if __name__ == "__main__":
while True:
try:
maxDistance = input('Enter your search area (0-200 miles): ')
maxDistance = int(maxDistance)
except ValueError:
print("I did not understand the input, please try again using a distance numeric value in miles.")
continue
try:
maxResults = input('Enter your search area - Maximum bumber of trail results (0-500): ')
maxResults = int(maxResults)
except ValueError:
print("I did not understand the input, please try again using a numeric value.")
continue
try:
userReq_count = input('Enter the number of requests to the API (0-199): ')
userReq_count = int(userReq_count)
except ValueError:
print("I did not understand the input, please try again using a numeric value.")
continue
req_count = 0
# Note: Reverse cordinates to input (lat,lon) == (y,x)
for (x,y) in point_in_poly:
req_count = req_count + 1
print('Query Count', req_count)
maxDistance = 100
maxResults = 400
parameters = {"lat": y, "lon": x , "maxDistance": maxDistance, "maxResults": maxResults, "key": api_key}
# Make a get request with the parameters.
response = requests.get(url, params=parameters)
print(response.url)
print(response.headers)
print(response.status_code)
# print(response.text)
# print(response.headers)
# print(response.json())
db = sqlite3.connect("SQL_data/trails.sqlite")
cur = db.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS Trailsdb (id INTEGER NOT NULL PRIMARY KEY UNIQUE, name TEXT,
type TEXT, summary TEXT UNIQUE, difficulty TEXT, stars INTEGER, starVotes INTEGER,
location TEXT, url TEXT UNIQUE, length INTEGER, ascent INTEGER, descent INTEGER, high INTEGER,
low INTEGER, longitude INTEGER, latitude INTEGER, conditionStatus TEXT, conditionDetails TEXT,
conditionDate TEXT)''')
str_data = response.text
json_data = json.loads(str_data)
# for entry in json_data:
# print(entry)
for trail in json_data['trails']:
#print(child)
cur.execute("Insert or replace into trailsdb values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(trail['id'], trail['name'], trail['type'], trail['summary'], trail['difficulty'],
trail['stars'], trail['starVotes'], trail['location'], trail['url'], trail['length'],
trail['ascent'], trail['descent'], trail['high'], trail['low'],trail['longitude'],
trail['latitude'], trail['conditionStatus'], trail['conditionDetails'],
trail['conditionDate']))
db.commit()
if req_count == userReq_count:
break
db.commit()
print("all done")
break
| 421 | 0 | 23 |
860e087d2cc5e2026e5ed61697c96f45cf4a41b9 | 42,444 | py | Python | zvmsdk/database.py | iaasci-ibm/python-zvm-sdk | cd73087ca1b6d48897fd39c512aae1f7e5f5371e | [
"Apache-2.0"
] | null | null | null | zvmsdk/database.py | iaasci-ibm/python-zvm-sdk | cd73087ca1b6d48897fd39c512aae1f7e5f5371e | [
"Apache-2.0"
] | 6 | 2020-02-25T03:27:47.000Z | 2021-04-08T12:52:49.000Z | zvmsdk/database.py | iaasci-ibm/python-zvm-sdk | cd73087ca1b6d48897fd39c512aae1f7e5f5371e | [
"Apache-2.0"
] | 2 | 2020-07-14T09:27:54.000Z | 2021-04-13T09:06:00.000Z | # Copyright 2017,2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import random
import os
import six
import sqlite3
import threading
import uuid
import json
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import exception
from zvmsdk import log
CONF = config.CONF
LOG = log.LOG
_DIR_MODE = 0o755
_VOLUME_CONN = None
_NETWORK_CONN = None
_IMAGE_CONN = None
_GUEST_CONN = None
_FCP_CONN = None
_DBLOCK_VOLUME = threading.RLock()
_DBLOCK_NETWORK = threading.RLock()
_DBLOCK_IMAGE = threading.RLock()
_DBLOCK_GUEST = threading.RLock()
_DBLOCK_FCP = threading.RLock()
@contextlib.contextmanager
@contextlib.contextmanager
@contextlib.contextmanager
@contextlib.contextmanager
| 38.975207 | 79 | 0.527589 | # Copyright 2017,2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import random
import os
import six
import sqlite3
import threading
import uuid
import json
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import exception
from zvmsdk import log
CONF = config.CONF
LOG = log.LOG
_DIR_MODE = 0o755
_VOLUME_CONN = None
_NETWORK_CONN = None
_IMAGE_CONN = None
_GUEST_CONN = None
_FCP_CONN = None
_DBLOCK_VOLUME = threading.RLock()
_DBLOCK_NETWORK = threading.RLock()
_DBLOCK_IMAGE = threading.RLock()
_DBLOCK_GUEST = threading.RLock()
_DBLOCK_FCP = threading.RLock()
@contextlib.contextmanager
def get_network_conn():
global _NETWORK_CONN, _DBLOCK_NETWORK
if not _NETWORK_CONN:
_NETWORK_CONN = _init_db_conn(const.DATABASE_NETWORK)
_DBLOCK_NETWORK.acquire()
try:
yield _NETWORK_CONN
except Exception as err:
msg = "Execute SQL statements error: %s" % six.text_type(err)
LOG.error(msg)
raise exception.SDKNetworkOperationError(rs=1, msg=msg)
finally:
_DBLOCK_NETWORK.release()
@contextlib.contextmanager
def get_image_conn():
global _IMAGE_CONN, _DBLOCK_IMAGE
if not _IMAGE_CONN:
_IMAGE_CONN = _init_db_conn(const.DATABASE_IMAGE)
_DBLOCK_IMAGE.acquire()
try:
yield _IMAGE_CONN
except Exception as err:
LOG.error("Execute SQL statements error: %s", six.text_type(err))
raise exception.SDKDatabaseException(msg=err)
finally:
_DBLOCK_IMAGE.release()
@contextlib.contextmanager
def get_guest_conn():
global _GUEST_CONN, _DBLOCK_GUEST
if not _GUEST_CONN:
_GUEST_CONN = _init_db_conn(const.DATABASE_GUEST)
_DBLOCK_GUEST.acquire()
try:
yield _GUEST_CONN
except Exception as err:
msg = "Execute SQL statements error: %s" % six.text_type(err)
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=1, msg=msg)
finally:
_DBLOCK_GUEST.release()
@contextlib.contextmanager
def get_fcp_conn():
global _FCP_CONN, _DBLOCK_FCP
if not _FCP_CONN:
_FCP_CONN = _init_db_conn(const.DATABASE_FCP)
_DBLOCK_FCP.acquire()
try:
yield _FCP_CONN
except exception.SDKBaseException as err:
msg = "Got SDK exception in FCP DB operation: %s" % six.text_type(err)
LOG.error(msg)
raise
except Exception as err:
msg = "Execute SQL statements error: %s" % six.text_type(err)
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=1, msg=msg)
finally:
_DBLOCK_FCP.release()
def _init_db_conn(db_file):
db_dir = CONF.database.dir
if not os.path.exists(db_dir):
os.makedirs(db_dir, _DIR_MODE)
database = os.path.join(db_dir, db_file)
return sqlite3.connect(database,
check_same_thread=False,
isolation_level=None)
class NetworkDbOperator(object):
def __init__(self):
self._module_id = 'network'
self._create_switch_table()
def _create_switch_table(self):
create_table_sql = ' '.join((
'create table if not exists switch (',
'userid varchar(8) COLLATE NOCASE,',
'interface varchar(4) COLLATE NOCASE,',
'switch varchar(8) COLLATE NOCASE,',
'port varchar(128) COLLATE NOCASE,',
'comments varchar(128),',
'primary key (userid, interface));'))
with get_network_conn() as conn:
conn.execute(create_table_sql)
def _get_switch_by_user_interface(self, userid, interface):
with get_network_conn() as conn:
res = conn.execute("SELECT * FROM switch "
"WHERE userid=? and interface=?",
(userid, interface))
switch_record = res.fetchall()
if len(switch_record) == 1:
return switch_record[0]
elif len(switch_record) == 0:
return None
def switch_delete_record_for_userid(self, userid):
"""Remove userid switch record from switch table."""
with get_network_conn() as conn:
conn.execute("DELETE FROM switch WHERE userid=?",
(userid,))
LOG.debug("Switch record for user %s is removed from "
"switch table" % userid)
def switch_delete_record_for_nic(self, userid, interface):
"""Remove userid switch record from switch table."""
with get_network_conn() as conn:
conn.execute("DELETE FROM switch WHERE userid=? and interface=?",
(userid, interface))
LOG.debug("Switch record for user %s with nic %s is removed from "
"switch table" % (userid, interface))
def switch_add_record(self, userid, interface, port=None,
switch=None, comments=None):
"""Add userid and nic name address into switch table."""
with get_network_conn() as conn:
conn.execute("INSERT INTO switch VALUES (?, ?, ?, ?, ?)",
(userid, interface, switch, port, comments))
LOG.debug("New record in the switch table: user %s, "
"nic %s, port %s" %
(userid, interface, port))
def switch_add_record_migrated(self, userid, interface, switch,
port=None, comments=None):
"""Add userid and interfaces and switch into switch table."""
with get_network_conn() as conn:
conn.execute("INSERT INTO switch VALUES (?, ?, ?, ?, ?)",
(userid, interface, switch, port, comments))
LOG.debug("New record in the switch table: user %s, "
"nic %s, switch %s" %
(userid, interface, switch))
def switch_update_record_with_switch(self, userid, interface,
switch=None):
"""Update information in switch table."""
if not self._get_switch_by_user_interface(userid, interface):
msg = "User %s with nic %s does not exist in DB" % (userid,
interface)
LOG.error(msg)
obj_desc = ('User %s with nic %s' % (userid, interface))
raise exception.SDKObjectNotExistError(obj_desc,
modID=self._module_id)
if switch is not None:
with get_network_conn() as conn:
conn.execute("UPDATE switch SET switch=? "
"WHERE userid=? and interface=?",
(switch, userid, interface))
LOG.debug("Set switch to %s for user %s with nic %s "
"in switch table" %
(switch, userid, interface))
else:
with get_network_conn() as conn:
conn.execute("UPDATE switch SET switch=NULL "
"WHERE userid=? and interface=?",
(userid, interface))
LOG.debug("Set switch to None for user %s with nic %s "
"in switch table" %
(userid, interface))
def _parse_switch_record(self, switch_list):
# Map each switch record to be a dict, with the key is the field name
# in switch DB
switch_keys_list = ['userid', 'interface', 'switch',
'port', 'comments']
switch_result = []
for item in switch_list:
switch_item = dict(zip(switch_keys_list, item))
switch_result.append(switch_item)
return switch_result
def switch_select_table(self):
with get_network_conn() as conn:
result = conn.execute("SELECT * FROM switch")
nic_settings = result.fetchall()
return self._parse_switch_record(nic_settings)
def switch_select_record_for_userid(self, userid):
with get_network_conn() as conn:
result = conn.execute("SELECT * FROM switch "
"WHERE userid=?", (userid,))
switch_info = result.fetchall()
return self._parse_switch_record(switch_info)
def switch_select_record(self, userid=None, nic_id=None, vswitch=None):
if ((userid is None) and
(nic_id is None) and
(vswitch is None)):
return self.switch_select_table()
sql_cmd = "SELECT * FROM switch WHERE"
sql_var = []
if userid is not None:
sql_cmd += " userid=? and"
sql_var.append(userid)
if nic_id is not None:
sql_cmd += " port=? and"
sql_var.append(nic_id)
if vswitch is not None:
sql_cmd += " switch=?"
sql_var.append(vswitch)
# remove the tailing ' and'
sql_cmd = sql_cmd.strip(' and')
with get_network_conn() as conn:
result = conn.execute(sql_cmd, sql_var)
switch_list = result.fetchall()
return self._parse_switch_record(switch_list)
class FCPDbOperator(object):
def __init__(self):
self._module_id = 'volume'
self._initialize_table()
def _initialize_table(self):
sql = ' '.join((
'CREATE TABLE IF NOT EXISTS fcp(',
'fcp_id char(4) PRIMARY KEY COLLATE NOCASE,',
'assigner_id varchar(8) COLLATE NOCASE,', # foreign key of a VM
'connections integer,', # 0 means no assigner
'reserved integer,', # 0 for not reserved
'path integer,', # 0 or path0, 1 for path1
'comment varchar(128))'))
with get_fcp_conn() as conn:
conn.execute(sql)
def _update_reserve(self, fcp, reserved):
with get_fcp_conn() as conn:
conn.execute("UPDATE fcp SET reserved=? "
"WHERE fcp_id=?",
(reserved, fcp))
def unreserve(self, fcp):
self._update_reserve(fcp, 0)
def reserve(self, fcp):
self._update_reserve(fcp, 1)
def is_reserved(self, fcp):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp WHERE "
"fcp_id=?", (fcp,))
fcp_list = result.fetchall()
reserved = fcp_list[0][3]
return reserved == 1
def negation(self, fcp):
""" now we have a problem, we need to lock FCP devices when attaching
or detaching is running. But detach has different process order with
attach.
When attach, Cinder will call get_volume_connector first and then
call attah_volume.
When detach, Cinder will call detach first and then call
get_volume_connector.
During this process, if we want to lock our FCP, we need a negation
or reverse operation to let us can lock FCP in multiprocess env.
"""
if self.is_reserved(fcp):
# if reserved == 1, reverse it to 0
self.unreserve(fcp)
else:
# if reserved == 0, reverse it to 1
self.reserve(fcp)
def find_and_reserve(self):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp where connections=0 "
"and reserved=0")
fcp_list = result.fetchall()
if len(fcp_list) == 0:
LOG.info("no more fcp to be allocated")
return None
# allocate first fcp found
fcp = fcp_list[0][0]
self._update_reserve(fcp, 1)
return fcp
def new(self, fcp, path):
with get_fcp_conn() as conn:
conn.execute("INSERT INTO fcp (fcp_id, assigner_id, "
"connections, reserved, path, comment) VALUES "
"(?, ?, ?, ?, ?, ?)",
(fcp, '', 0, 0, path, ''))
def assign(self, fcp, assigner_id, update_connections=True):
with get_fcp_conn() as conn:
if update_connections:
conn.execute("UPDATE fcp SET assigner_id=?, connections=? "
"WHERE fcp_id=?",
(assigner_id, 1, fcp))
else:
conn.execute("UPDATE fcp SET assigner_id=? "
"WHERE fcp_id=?",
(assigner_id, fcp))
def delete(self, fcp):
with get_fcp_conn() as conn:
conn.execute("DELETE FROM fcp "
"WHERE fcp_id=?", (fcp,))
def get_all_fcps_of_assigner(self, assigner_id=None):
"""Get dict of all fcp records of specified assigner.
If assigner is None, will get all fcp records.
Format of return is like :
[
(fcp_id, userid, connections, reserved, path, comment),
(u'283c', u'user1', 2, 1, 0, {'state': 'active', 'owner': 'user1'}),
(u'483c', u'user2', 0, 0, 1, {'state': 'free'})
]
"""
with get_fcp_conn() as conn:
if assigner_id:
result = conn.execute("SELECT fcp_id, assigner_id, "
"connections, reserved, path, comment FROM "
"fcp WHERE assigner_id=?", (assigner_id,))
else:
result = conn.execute("SELECT fcp_id, assigner_id, "
"connections, reserved, path, comment FROM fcp")
results = result.fetchall()
if not results:
if assigner_id:
msg = 'No FCPs found belongs to userid %s.' % assigner_id
obj_desc = "FCP belongs to userid: %s" % assigner_id
else:
msg = 'No FCPs found in database.'
obj_desc = "FCP records in database"
LOG.error(msg)
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
else:
# transfer comment str to dict format
fcp_info = []
for item in results:
item = list(item)
if item[5]:
item[5] = eval(item[5])
fcp_info.append(tuple(item))
return fcp_info
def get_usage_of_fcp(self, fcp):
connections = 0
reserved = 0
with get_fcp_conn() as conn:
result = conn.execute("SELECT assigner_id, reserved, connections "
"FROM fcp WHERE fcp_id=?", (fcp,))
fcp_info = result.fetchall()
if not fcp_info:
msg = 'FCP with id: %s does not exist in DB.' % fcp
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
assigner_id = fcp_info[0][0]
reserved = fcp_info[0][1]
connections = fcp_info[0][2]
return assigner_id, reserved, connections
def update_usage_of_fcp(self, fcp, assigner_id, reserved, connections):
with get_fcp_conn() as conn:
conn.execute("UPDATE fcp SET assigner_id=?, reserved=?, "
"connections=? WHERE fcp_id=?", (assigner_id,
reserved,
connections,
fcp))
def get_comment_of_fcp(self, fcp):
"""Get the comment content, transfer into dict and return.
"""
with get_fcp_conn() as conn:
result = conn.execute("SELECT comment "
"FROM fcp WHERE fcp_id=?", (fcp,))
current_comment = result.fetchall()
if not current_comment or current_comment[0][0] == '':
current_comment = {}
else:
# transfer from str to dict
current_comment = eval(current_comment[0][0])
return current_comment
def update_comment_of_fcp(self, fcp, comment_dict):
"""Update the cotent of comment.
:param fcp: (str) the FCP ID string
:param comment_dict: (dict) the dict to describe the FCP status
this api will transfer this into string and store into db
The comment in database should be a string like:
"{'state': 'active', 'owner': 'iaas0001'}"
"""
# the input parameter comment_dict must be a dict
if not isinstance(comment_dict, dict):
msg = ("Failed to update comment of FCP %s because input "
"comment %s is not a dict type." % (fcp, comment_dict))
raise exception.SDKInternalError(msg=msg, modID=self._module_id)
new_comment = str(comment_dict)
# storage the new comment into database
with get_fcp_conn() as conn:
conn.execute("UPDATE fcp SET comment=? "
"WHERE fcp_id=?", (new_comment, fcp))
def update_path_of_fcp(self, fcp, path):
with get_fcp_conn() as conn:
conn.execute("UPDATE fcp SET path=? WHERE "
"fcp_id=?", (path, fcp))
def increase_usage(self, fcp):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp WHERE "
"fcp_id=?", (fcp,))
fcp_list = result.fetchall()
if not fcp_list:
msg = 'FCP with id: %s does not exist in DB.' % fcp
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
connections = fcp_list[0][2]
connections += 1
conn.execute("UPDATE fcp SET connections=? "
"WHERE fcp_id=?", (connections, fcp))
return connections
def increase_usage_by_assigner(self, fcp, assigner_id):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp WHERE "
"fcp_id=?", (fcp,))
fcp_list = result.fetchall()
if not fcp_list:
msg = 'FCP with id: %s does not exist in DB.' % fcp
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
connections = fcp_list[0][2]
connections += 1
conn.execute("UPDATE fcp SET assigner_id=?, connections=? "
"WHERE fcp_id=?", (assigner_id, connections, fcp))
return connections
def decrease_usage(self, fcp):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp WHERE "
"fcp_id=?", (fcp,))
fcp_list = result.fetchall()
if not fcp_list:
msg = 'FCP with id: %s does not exist in DB.' % fcp
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
connections = fcp_list[0][2]
if connections == 0:
msg = 'FCP with id: %s no connections in DB.' % fcp
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
else:
connections -= 1
if connections < 0:
connections = 0
LOG.warning("Warning: connections of fcp is negative",
fcp)
conn.execute("UPDATE fcp SET connections=? "
"WHERE fcp_id=?",
(connections, fcp))
return connections
def get_connections_from_assigner(self, assigner_id):
connections = 0
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp WHERE "
"assigner_id=?", (assigner_id,))
fcp_list = result.fetchall()
if not fcp_list:
connections = 0
else:
for fcp in fcp_list:
connections = connections + fcp[2]
return connections
def get_connections_from_fcp(self, fcp):
connections = 0
with get_fcp_conn() as conn:
result = conn.execute("SELECT connections FROM fcp WHERE "
"fcp_id=?", (fcp,))
fcp_info = result.fetchall()
if not fcp_info:
msg = 'FCP with id: %s does not exist in DB.' % fcp
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
connections = fcp_info[0][0]
return connections
def get_allocated_fcps_from_assigner(self, assigner_id):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp WHERE assigner_id=? "
"AND (connections<>0 OR reserved<>0) "
"ORDER BY fcp_id ASC", (assigner_id,))
fcp_list = result.fetchall()
return fcp_list
def get_reserved_fcps_from_assigner(self, assigner_id):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp WHERE assigner_id=? "
"AND reserved <> 0 "
"ORDER BY fcp_id ASC", (assigner_id,))
fcp_list = result.fetchall()
return fcp_list
def get_all(self):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp")
fcp_list = result.fetchall()
return fcp_list
def get_from_fcp(self, fcp):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp where fcp_id=?", (fcp,))
fcp_list = result.fetchall()
return fcp_list
def get_path_count(self):
with get_fcp_conn() as conn:
# Get distinct path list in DB
result = conn.execute("SELECT DISTINCT path FROM fcp")
path_list = result.fetchall()
return len(path_list)
def get_fcp_pair_with_same_index(self):
fcp_list = []
fcp_pair_map = {}
with get_fcp_conn() as conn:
result = conn.execute("SELECT COUNT(path) FROM fcp "
"WHERE reserved = 0 AND connections = 0 "
"GROUP BY path")
free_count_per_path = result.fetchall()
'''
count_per_path examples:
in normal cases, all path has same count, eg.
4 paths: [7, 7, 7, 7]
2 paths: [7, 7]
we can also handle rare abnormal cases,
where path count differs, eg.
4 paths: [7, 4, 5, 6]
2 paths: [7, 6]
'''
result = conn.execute("SELECT COUNT(path) FROM fcp "
"GROUP BY path "
"ORDER BY path ASC")
count_per_path = [a[0] for a in result.fetchall()]
# return [] if no free fcp found from at least one path
if len(free_count_per_path) < len(count_per_path):
# For get_fcp_pair with same index, we will not check the
# CONF.volume.min_fcp_paths_count, the returned fcp count
# should always equal to the total paths count
LOG.error("Available paths count: %s, total paths count: "
"%s." %
(len(free_count_per_path), len(count_per_path)))
return fcp_list
'''
fcps 2 paths example:
fcp conn reserved
------------------
[('1a00', 1, 1),
('1a01', 0, 0),
('1a02', 0, 0),
('1a03', 0, 0),
('1a04', 0, 1),
...
('1b00', 1, 0),
('1b01', 2, 1),
('1b02', 0, 0),
('1b03', 0, 0),
('1b04', 0, 0),
... ]
'''
result = conn.execute("SELECT fcp_id, connections, reserved "
"FROM fcp "
"ORDER BY path, fcp_id")
fcps = result.fetchall()
'''
get all free fcps from 1st path
fcp_pair_map example:
idx fcp_pair
----------------
{ 1 : ['1a01'],
2 : ['1a02'],
3 : ['1a03']}
'''
for i in range(count_per_path[0]):
if fcps[i][1] == fcps[i][2] == 0:
fcp_pair_map[i] = [fcps[i][0]]
'''
select out pairs if member count == path count
fcp_pair_map example:
idx fcp_pair
----------------------
{ 2 : ['1a02', '1b02'],
3 : ['1a03', '1b03']}
'''
for idx in fcp_pair_map.copy():
s = 0
for i, c in enumerate(count_per_path[:-1]):
s += c
# avoid index out of range for per path in fcps[]
if idx < count_per_path[i + 1] and \
fcps[s + idx][1] == fcps[s + idx][2] == 0:
fcp_pair_map[idx].append(fcps[s + idx][0])
else:
fcp_pair_map.pop(idx)
break
'''
saves one pair randomly chosen from fcp_pair_map.values()
fcp_list example:
['1a03', '1b03']
'''
if fcp_pair_map:
fcp_list = random.choice(sorted(fcp_pair_map.values()))
else:
LOG.error("Not enough FCPs in fcp pool")
return fcp_list
def get_fcp_pair(self):
fcp_list = []
with get_fcp_conn() as conn:
# Get distinct path list in DB
result = conn.execute("SELECT DISTINCT path FROM fcp")
path_list = result.fetchall()
# Get fcp_list of every path
for no in path_list:
result = conn.execute("SELECT * FROM fcp where connections=0 "
"and reserved=0 and path=%s order by "
"fcp_id" % no)
fcps = result.fetchall()
if not fcps:
# continue to find whether other paths has available FCP
continue
index = random.randint(0, len(fcps) - 1)
fcp_list.append(fcps[index][0])
# Start to check whether the available count >= min_fcp_paths_count
allocated_paths = len(fcp_list)
total_paths = len(path_list)
if allocated_paths < total_paths:
LOG.info("Not all paths have available FCP devices. "
"The count of paths having available FCP: %d is less "
"than total paths: %d. "
"The configured minimum FCP paths count is: %d." %
(allocated_paths, total_paths,
CONF.volume.min_fcp_paths_count))
if allocated_paths >= CONF.volume.min_fcp_paths_count:
LOG.warning("Return the FCPs from the available paths to "
"continue.")
return fcp_list
else:
LOG.error("Not enough FCPs available, return empty list.")
return []
else:
return fcp_list
def get_all_free_unreserved(self):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp where connections=0 "
"and reserved=0")
fcp_list = result.fetchall()
return fcp_list
class ImageDbOperator(object):
def __init__(self):
self._create_image_table()
self._module_id = 'image'
def _create_image_table(self):
create_image_table_sql = ' '.join((
'CREATE TABLE IF NOT EXISTS image (',
'imagename varchar(128) PRIMARY KEY COLLATE NOCASE,',
'imageosdistro varchar(16),',
'md5sum varchar(512),',
'disk_size_units varchar(512),',
'image_size_in_bytes varchar(512),',
'type varchar(16),',
'comments varchar(128))'))
with get_image_conn() as conn:
conn.execute(create_image_table_sql)
def image_add_record(self, imagename, imageosdistro, md5sum,
disk_size_units, image_size_in_bytes,
type, comments=None):
if comments is not None:
with get_image_conn() as conn:
conn.execute("INSERT INTO image (imagename, imageosdistro,"
"md5sum, disk_size_units, image_size_in_bytes,"
" type, comments) VALUES (?, ?, ?, ?, ?, ?, ?)",
(imagename, imageosdistro, md5sum,
disk_size_units, image_size_in_bytes, type,
comments))
else:
with get_image_conn() as conn:
conn.execute("INSERT INTO image (imagename, imageosdistro,"
"md5sum, disk_size_units, image_size_in_bytes,"
" type) VALUES (?, ?, ?, ?, ?, ?)",
(imagename, imageosdistro, md5sum,
disk_size_units, image_size_in_bytes, type))
def image_query_record(self, imagename=None):
"""Query the image record from database, if imagename is None, all
of the image records will be returned, otherwise only the specified
image record will be returned."""
if imagename:
with get_image_conn() as conn:
result = conn.execute("SELECT * FROM image WHERE "
"imagename=?", (imagename,))
image_list = result.fetchall()
if not image_list:
obj_desc = "Image with name: %s" % imagename
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
else:
with get_image_conn() as conn:
result = conn.execute("SELECT * FROM image")
image_list = result.fetchall()
# Map each image record to be a dict, with the key is the field name in
# image DB
image_keys_list = ['imagename', 'imageosdistro', 'md5sum',
'disk_size_units', 'image_size_in_bytes', 'type',
'comments']
image_result = []
for item in image_list:
image_item = dict(zip(image_keys_list, item))
image_result.append(image_item)
return image_result
def image_delete_record(self, imagename):
"""Delete the record of specified imagename from image table"""
with get_image_conn() as conn:
conn.execute("DELETE FROM image WHERE imagename=?", (imagename,))
class GuestDbOperator(object):
def __init__(self):
self._create_guests_table()
self._module_id = 'guest'
def _create_guests_table(self):
"""
net_set: it is used to describe network interface status, the initial
value is 0, no network interface. It will be updated to be
1 after the network interface is configured
"""
sql = ' '.join((
'CREATE TABLE IF NOT EXISTS guests(',
'id char(36) PRIMARY KEY COLLATE NOCASE,',
'userid varchar(8) NOT NULL UNIQUE COLLATE NOCASE,',
'metadata varchar(255),',
'net_set smallint DEFAULT 0,',
'comments text)'))
with get_guest_conn() as conn:
conn.execute(sql)
def _check_existence_by_id(self, guest_id, ignore=False):
guest = self.get_guest_by_id(guest_id)
if guest is None:
msg = 'Guest with id: %s does not exist in DB.' % guest_id
if ignore:
# Just print a warning message
LOG.info(msg)
else:
LOG.error(msg)
obj_desc = "Guest with id: %s" % guest_id
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
return guest
def _check_existence_by_userid(self, userid, ignore=False):
guest = self.get_guest_by_userid(userid)
if guest is None:
msg = 'Guest with userid: %s does not exist in DB.' % userid
if ignore:
# Just print a warning message
LOG.info(msg)
else:
LOG.error(msg)
obj_desc = "Guest with userid: %s" % userid
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
return guest
def add_guest_registered(self, userid, meta, net_set,
comments=None):
# Add guest which is migrated from other host or onboarded.
guest_id = str(uuid.uuid4())
with get_guest_conn() as conn:
conn.execute(
"INSERT INTO guests VALUES (?, ?, ?, ?, ?)",
(guest_id, userid, meta, net_set, comments))
def add_guest(self, userid, meta='', comments=''):
# Generate uuid automatically
guest_id = str(uuid.uuid4())
net_set = '0'
with get_guest_conn() as conn:
conn.execute(
"INSERT INTO guests VALUES (?, ?, ?, ?, ?)",
(guest_id, userid, meta, net_set, comments))
def delete_guest_by_id(self, guest_id):
# First check whether the guest exist in db table
guest = self._check_existence_by_id(guest_id, ignore=True)
if guest is None:
return
# Update guest if exist
with get_guest_conn() as conn:
conn.execute(
"DELETE FROM guests WHERE id=?", (guest_id,))
def delete_guest_by_userid(self, userid):
# First check whether the guest exist in db table
guest = self._check_existence_by_userid(userid, ignore=True)
if guest is None:
return
with get_guest_conn() as conn:
conn.execute(
"DELETE FROM guests WHERE userid=?", (userid,))
def get_guest_metadata_with_userid(self, userid):
with get_guest_conn() as conn:
res = conn.execute("SELECT metadata FROM guests "
"WHERE userid=?", (userid,))
guests = res.fetchall()
return guests
def update_guest_by_id(self, uuid, userid=None, meta=None, net_set=None,
comments=None):
if ((userid is None) and (meta is None) and
(net_set is None) and (comments is None)):
msg = ("Update guest with id: %s failed, no field "
"specified to be updated." % uuid)
LOG.error(msg)
raise exception.SDKInternalError(msg=msg, modID=self._module_id)
# First check whether the guest exist in db table
self._check_existence_by_id(uuid)
# Start update
sql_cmd = "UPDATE guests SET"
sql_var = []
if userid is not None:
sql_cmd += " userid=?,"
sql_var.append(userid)
if meta is not None:
sql_cmd += " metadata=?,"
sql_var.append(meta)
if net_set is not None:
sql_cmd += " net_set=?,"
sql_var.append(net_set)
if comments is not None:
sql_cmd += " comments=?,"
sql_var.append(comments)
# remove the tailing comma
sql_cmd = sql_cmd.strip(',')
# Add the id filter
sql_cmd += " WHERE id=?"
sql_var.append(uuid)
with get_guest_conn() as conn:
conn.execute(sql_cmd, sql_var)
def update_guest_by_userid(self, userid, meta=None, net_set=None,
comments=None):
userid = userid
if (meta is None) and (net_set is None) and (comments is None):
msg = ("Update guest with userid: %s failed, no field "
"specified to be updated." % userid)
LOG.error(msg)
raise exception.SDKInternalError(msg=msg, modID=self._module_id)
# First check whether the guest exist in db table
self._check_existence_by_userid(userid)
# Start update
sql_cmd = "UPDATE guests SET"
sql_var = []
if meta is not None:
sql_cmd += " metadata=?,"
sql_var.append(meta)
if net_set is not None:
sql_cmd += " net_set=?,"
sql_var.append(net_set)
if comments is not None:
new_comments = json.dumps(comments)
sql_cmd += " comments=?,"
sql_var.append(new_comments)
# remove the tailing comma
sql_cmd = sql_cmd.strip(',')
# Add the id filter
sql_cmd += " WHERE userid=?"
sql_var.append(userid)
with get_guest_conn() as conn:
conn.execute(sql_cmd, sql_var)
def get_guest_list(self):
with get_guest_conn() as conn:
res = conn.execute("SELECT * FROM guests")
guests = res.fetchall()
return guests
def get_migrated_guest_list(self):
with get_guest_conn() as conn:
res = conn.execute("SELECT userid FROM guests "
"WHERE comments LIKE '%\"migrated\": 1%'")
guests = res.fetchall()
return guests
def get_migrated_guest_info_list(self):
with get_guest_conn() as conn:
res = conn.execute("SELECT * FROM guests "
"WHERE comments LIKE '%\"migrated\": 1%'")
guests = res.fetchall()
return guests
def get_comments_by_userid(self, userid):
""" Get comments record.
output should be like: {'k1': 'v1', 'k2': 'v2'}'
"""
userid = userid
with get_guest_conn() as conn:
res = conn.execute("SELECT comments FROM guests "
"WHERE userid=?", (userid,))
result = res.fetchall()
comments = {}
if result[0][0]:
comments = json.loads(result[0][0])
return comments
def get_metadata_by_userid(self, userid):
"""get metadata record.
output should be like: "a=1,b=2,c=3"
"""
userid = userid
with get_guest_conn() as conn:
res = conn.execute("SELECT * FROM guests "
"WHERE userid=?", (userid,))
guest = res.fetchall()
if len(guest) == 1:
return guest[0][2]
elif len(guest) == 0:
LOG.debug("Guest with userid: %s not found from DB!" % userid)
return ''
else:
msg = "Guest with userid: %s have multiple records!" % userid
LOG.error(msg)
raise exception.SDKInternalError(msg=msg, modID=self._module_id)
def transfer_metadata_to_dict(self, meta):
"""transfer str to dict.
output should be like: {'a':1, 'b':2, 'c':3}
"""
dic = {}
arr = meta.strip(' ,').split(',')
for i in arr:
temp = i.split('=')
key = temp[0].strip()
value = temp[1].strip()
dic[key] = value
return dic
def get_guest_by_id(self, guest_id):
with get_guest_conn() as conn:
res = conn.execute("SELECT * FROM guests "
"WHERE id=?", (guest_id,))
guest = res.fetchall()
# As id is the primary key, the filtered entry number should be 0 or 1
if len(guest) == 1:
return guest[0]
elif len(guest) == 0:
LOG.debug("Guest with id: %s not found from DB!" % guest_id)
return None
# Code shouldn't come here, just in case
return None
def get_guest_by_userid(self, userid):
userid = userid
with get_guest_conn() as conn:
res = conn.execute("SELECT * FROM guests "
"WHERE userid=?", (userid,))
guest = res.fetchall()
# As id is the primary key, the filtered entry number should be 0 or 1
if len(guest) == 1:
return guest[0]
elif len(guest) == 0:
LOG.debug("Guest with userid: %s not found from DB!" % userid)
return None
# Code shouldn't come here, just in case
return None
| 28,260 | 12,704 | 203 |
ed35d1eff08218ca131c5f14d91b3125155013d6 | 2,076 | py | Python | tests/test_api/test_databases.py | quantori/async-couch | c46ce8da6f058024c95db67151a4498042280206 | [
"MIT"
] | 1 | 2020-05-01T01:53:23.000Z | 2020-05-01T01:53:23.000Z | tests/test_api/test_databases.py | quantori/async-couch | c46ce8da6f058024c95db67151a4498042280206 | [
"MIT"
] | 2 | 2020-05-09T19:02:18.000Z | 2021-12-27T10:46:14.000Z | tests/test_api/test_databases.py | quantori/async-couch | c46ce8da6f058024c95db67151a4498042280206 | [
"MIT"
] | 2 | 2021-12-24T09:14:39.000Z | 2022-01-26T08:18:03.000Z | from typing import Callable
from async_couch import CouchClient
db_name = 'test_db_01'
invalid_db_name = 'invalid_%^^&_name'
non_existing_db = 'non_existing_database'
doc_id = None
| 28.833333 | 72 | 0.74422 | from typing import Callable
from async_couch import CouchClient
db_name = 'test_db_01'
invalid_db_name = 'invalid_%^^&_name'
non_existing_db = 'non_existing_database'
doc_id = None
def test_create(async_run: Callable, client: CouchClient):
response = async_run(client.db_create(invalid_db_name))
assert response.status_code == 400
response = async_run(client.db_create(db_name))
assert response.status_code == 201
assert response.json().get('ok') is True
response = async_run(client.db_create(db_name))
assert response.status_code == 412
def test_existing(async_run: Callable, client: CouchClient):
response = async_run(client.db_exists(non_existing_db))
assert response.status_code == 404
response = async_run(client.db_exists(db_name))
assert response.status_code == 200
def test_create_doc(async_run: Callable, client: CouchClient):
global doc_id
doc = dict(test=True)
response = async_run(client.db_create_doc(db_name, doc))
assert response.status_code == 201
doc_id = response.model.id
response = async_run(client.db_create_doc(db_name, doc, batch='ok'))
assert response.status_code == 202
response = async_run(client.db_create_doc(non_existing_db, doc))
assert response.status_code == 404
def test_all_docs(async_run: Callable, client: CouchClient):
response = async_run(client.db_all_docs(db_name, keys=[doc_id]))
assert response.status_code == 200
assert len(response.model.rows) == 1
def test_design_docs(async_run: Callable, client: CouchClient):
response = async_run(client.db_design_docs(db_name, keys=[doc_id]))
assert response.status_code == 200
assert len(response.model.rows) == 1
def test_delete(async_run: Callable, client: CouchClient):
response = async_run(client.db_delete(db_name))
assert response.status_code == 200
response = async_run(client.db_delete(invalid_db_name))
assert response.status_code == 404
response = async_run(client.db_delete(non_existing_db))
assert response.status_code == 404
| 1,745 | 0 | 138 |
f77aadb1a8300f3980338c3873142149680e9b5d | 3,510 | py | Python | lib/extras/cocoa_definitions.py | cduhard/vimconfig | caf4eb276aafb92ebd6bec7f36f4fc04fba70605 | [
"MIT"
] | 1 | 2016-05-09T09:06:18.000Z | 2016-05-09T09:06:18.000Z | lib/extras/cocoa_definitions.py | cduhard/vimconfig | caf4eb276aafb92ebd6bec7f36f4fc04fba70605 | [
"MIT"
] | null | null | null | lib/extras/cocoa_definitions.py | cduhard/vimconfig | caf4eb276aafb92ebd6bec7f36f4fc04fba70605 | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''Creates a folder containing text files of Cocoa keywords.'''
import os, commands, re
from sys import argv
def find(searchpath, ext):
'''Mimics the "find searchpath -name *.ext" unix command.'''
results = []
for path, dirs, files in os.walk(searchpath):
for filename in files:
if filename.endswith(ext):
results.append(os.path.join(path, filename))
return results
def find_headers(frameworks):
'''Returns list of the header files for the given frameworks.'''
headers = []
for framework in frameworks:
headers.extend(find('/System/Library/Frameworks/%s.framework'
% framework, '.h'))
return headers
def default_headers():
'''Headers for common Cocoa frameworks.'''
frameworks = ('Foundation', 'AppKit', 'AddressBook', 'CoreData',
'PreferencePanes', 'QTKit', 'ScreenSaver', 'SyncServices',
'WebKit')
return find_headers(frameworks)
def match_output(command, regex, group_num):
'''
Returns an ordered list of all matches of the supplied regex for the
output of the given command.
'''
results = []
for line in commands.getoutput(command).split("\n"):
match = re.search(regex, line)
if match and not match.group(group_num) in results:
results.append(match.group(group_num))
results.sort()
return results
def get_functions(header_files):
'''Returns list of Cocoa Functions.'''
lines = match_output(r"grep -h '^[A-Z][A-Z_]* [^;]* \**NS\w\+ *(' "
+ header_files, r'NS\w+\s*\(.*?\)', 0)
for i in range(len(lines)):
lines[i] = lines[i].replace('NSInteger', 'int')
lines[i] = lines[i].replace('NSUInteger', 'unsigned int')
lines[i] = lines[i].replace('CGFloat', 'float')
return lines
def get_types(header_files):
'''Returns a list of Cocoa Types.'''
return match_output(r"grep -h 'typedef .* _*NS[A-Za-z]*' "
+ header_files, r'(NS[A-Za-z]+)\s*(;|{)', 1)
def get_constants(header_files):
'''Returns a list of Cocoa Constants.'''
return match_output(r"awk '/^(typedef )?enum .*\{/ {pr = 1;} /\}/ {pr = 0;}"
r"{ if(pr) print $0; }' " + header_files,
r'^\s*(NS[A-Z][A-Za-z0-9_]*)', 1)
def get_notifications(header_files):
'''Returns a list of Cocoa Notifications.'''
return match_output(r"grep -h '\*NS.*Notification' "
+ header_files, r'NS\w*Notification', 0)
def write_file(filename, lines):
'''Attempts to write list to file or exits with error if it can't.'''
try:
f = open(filename, 'w')
except IOError, error:
raise SystemExit(argv[0] + ': %s' % error)
f.write("\n".join(lines))
f.close()
def extract_files_to(dirname=None):
'''Extracts .txt files to given directory or ./cocoa_indexes by default.'''
if dirname is None:
dirname = './cocoa_indexes'
if not os.path.isdir(dirname):
os.mkdir(dirname)
headers = ' '.join(default_headers())
write_file(dirname + '/functions.txt', get_functions (headers))
write_file(dirname + '/types.txt', get_types (headers))
write_file(dirname + '/constants.txt', get_constants (headers))
write_file(dirname + '/notifications.txt', get_notifications(headers))
if __name__ == '__main__':
extract_files_to(argv[1] if len(argv) > 1 else None)
| 37.741935 | 80 | 0.601994 | #!/usr/bin/python
'''Creates a folder containing text files of Cocoa keywords.'''
import os, commands, re
from sys import argv
def find(searchpath, ext):
'''Mimics the "find searchpath -name *.ext" unix command.'''
results = []
for path, dirs, files in os.walk(searchpath):
for filename in files:
if filename.endswith(ext):
results.append(os.path.join(path, filename))
return results
def find_headers(frameworks):
'''Returns list of the header files for the given frameworks.'''
headers = []
for framework in frameworks:
headers.extend(find('/System/Library/Frameworks/%s.framework'
% framework, '.h'))
return headers
def default_headers():
'''Headers for common Cocoa frameworks.'''
frameworks = ('Foundation', 'AppKit', 'AddressBook', 'CoreData',
'PreferencePanes', 'QTKit', 'ScreenSaver', 'SyncServices',
'WebKit')
return find_headers(frameworks)
def match_output(command, regex, group_num):
'''
Returns an ordered list of all matches of the supplied regex for the
output of the given command.
'''
results = []
for line in commands.getoutput(command).split("\n"):
match = re.search(regex, line)
if match and not match.group(group_num) in results:
results.append(match.group(group_num))
results.sort()
return results
def get_functions(header_files):
'''Returns list of Cocoa Functions.'''
lines = match_output(r"grep -h '^[A-Z][A-Z_]* [^;]* \**NS\w\+ *(' "
+ header_files, r'NS\w+\s*\(.*?\)', 0)
for i in range(len(lines)):
lines[i] = lines[i].replace('NSInteger', 'int')
lines[i] = lines[i].replace('NSUInteger', 'unsigned int')
lines[i] = lines[i].replace('CGFloat', 'float')
return lines
def get_types(header_files):
'''Returns a list of Cocoa Types.'''
return match_output(r"grep -h 'typedef .* _*NS[A-Za-z]*' "
+ header_files, r'(NS[A-Za-z]+)\s*(;|{)', 1)
def get_constants(header_files):
'''Returns a list of Cocoa Constants.'''
return match_output(r"awk '/^(typedef )?enum .*\{/ {pr = 1;} /\}/ {pr = 0;}"
r"{ if(pr) print $0; }' " + header_files,
r'^\s*(NS[A-Z][A-Za-z0-9_]*)', 1)
def get_notifications(header_files):
'''Returns a list of Cocoa Notifications.'''
return match_output(r"grep -h '\*NS.*Notification' "
+ header_files, r'NS\w*Notification', 0)
def write_file(filename, lines):
'''Attempts to write list to file or exits with error if it can't.'''
try:
f = open(filename, 'w')
except IOError, error:
raise SystemExit(argv[0] + ': %s' % error)
f.write("\n".join(lines))
f.close()
def extract_files_to(dirname=None):
'''Extracts .txt files to given directory or ./cocoa_indexes by default.'''
if dirname is None:
dirname = './cocoa_indexes'
if not os.path.isdir(dirname):
os.mkdir(dirname)
headers = ' '.join(default_headers())
write_file(dirname + '/functions.txt', get_functions (headers))
write_file(dirname + '/types.txt', get_types (headers))
write_file(dirname + '/constants.txt', get_constants (headers))
write_file(dirname + '/notifications.txt', get_notifications(headers))
if __name__ == '__main__':
extract_files_to(argv[1] if len(argv) > 1 else None)
| 0 | 0 | 0 |
344d442d85a139cfdea7e2f40c9bf045c0e2dee0 | 808 | py | Python | benchmarks/intersperse/intersperse.py | hirnimeshrampuresoftware/iteration_utilities | a77a3d880a52cb663538d67415520d74e418a906 | [
"Apache-2.0"
] | 72 | 2016-09-12T03:01:02.000Z | 2022-03-05T16:54:45.000Z | benchmarks/intersperse/intersperse.py | hirnimeshrampuresoftware/iteration_utilities | a77a3d880a52cb663538d67415520d74e418a906 | [
"Apache-2.0"
] | 127 | 2016-09-14T02:07:33.000Z | 2022-03-19T13:17:32.000Z | benchmarks/intersperse/intersperse.py | hirnimeshrampuresoftware/iteration_utilities | a77a3d880a52cb663538d67415520d74e418a906 | [
"Apache-2.0"
] | 11 | 2017-02-22T20:40:37.000Z | 2022-03-05T16:55:40.000Z | import iteration_utilities
import more_itertools
import toolz
import cytoolz
import pydash
| 31.076923 | 80 | 0.77599 | import iteration_utilities
import more_itertools
import toolz
import cytoolz
import pydash
def bench_iu_intersperse(iterable, func=iteration_utilities.intersperse):
iteration_utilities.consume(func(iterable, 2), None)
def bench_more_itertools_intersperse(iterable, func=more_itertools.intersperse):
iteration_utilities.consume(func(2, iterable), None)
def bench_toolz_interpose(iterable, func=toolz.interpose):
iteration_utilities.consume(func(2, iterable), None)
def bench_cytoolz_interpose(iterable, func=cytoolz.interpose):
iteration_utilities.consume(func(2, iterable), None)
def bench_pd(iterable, func=pydash.intersperse):
func(iterable, 2)
def args_list_length():
for exponent in range(2, 18):
size = 2**exponent
yield size, [i % 10 for i in range(size)]
| 579 | 0 | 138 |
fba73375fa965c956afa537dc1de06b0ce34438e | 1,931 | py | Python | threads.py | techborn/pIRC | cb866e307e082947ef209bca88d01706311552c3 | [
"MIT"
] | null | null | null | threads.py | techborn/pIRC | cb866e307e082947ef209bca88d01706311552c3 | [
"MIT"
] | null | null | null | threads.py | techborn/pIRC | cb866e307e082947ef209bca88d01706311552c3 | [
"MIT"
] | null | null | null | import threading
import sys
from traceback import print_tb,print_exc
from random import randint
from time import ctime as now
class JobThread(threading.Thread):
"""
Thread that executes a job every N milliseconds
"""
def shutdown(self):
"""
Stop this thread
"""
self._finished.set()
def is_shutdown(self):
"""
Boolean check on the thread's state
"""
return bool(self._finished.isSet())
def run(self):
"""
Keep running this thread until it's shutdown
"""
self._finished.wait(10)
while not self._finished.isSet():
try:
self._func(self._ref)
self._error = False
except:
if not self._error:
print(" ")
print(">>>Exception occured in thread: {0}".format(sys.exc_info()[1]))
print_tb(sys.exc_info()[2])
print(" ")
f = open('{0} - ThreadLog.txt'.format(self._ref.config['name']),'a')
f.write("\r\n")
f.write(now())
f.write("\r\nConnection: {0}\r\n".format(self._ref.config['host']))
print_exc(None,f)
f.write("\r\n")
f.close()
self._error = True
finally:
if self._func._max:
self._finished.wait(randint(self._func._min,self._func._max)*0.001)
else:
self._finished.wait(self._func._min*0.001)
| 30.650794 | 90 | 0.489384 | import threading
import sys
from traceback import print_tb,print_exc
from random import randint
from time import ctime as now
class JobThread(threading.Thread):
"""
Thread that executes a job every N milliseconds
"""
def __init__(self, func, ref):
threading.Thread.__init__(self)
self._finished = threading.Event()
self._func = func
self._ref = ref
self._error = False
def copy(self):
return self.__class__(self._func, self._ref)
def shutdown(self):
"""
Stop this thread
"""
self._finished.set()
def is_shutdown(self):
"""
Boolean check on the thread's state
"""
return bool(self._finished.isSet())
def run(self):
"""
Keep running this thread until it's shutdown
"""
self._finished.wait(10)
while not self._finished.isSet():
try:
self._func(self._ref)
self._error = False
except:
if not self._error:
print(" ")
print(">>>Exception occured in thread: {0}".format(sys.exc_info()[1]))
print_tb(sys.exc_info()[2])
print(" ")
f = open('{0} - ThreadLog.txt'.format(self._ref.config['name']),'a')
f.write("\r\n")
f.write(now())
f.write("\r\nConnection: {0}\r\n".format(self._ref.config['host']))
print_exc(None,f)
f.write("\r\n")
f.close()
self._error = True
finally:
if self._func._max:
self._finished.wait(randint(self._func._min,self._func._max)*0.001)
else:
self._finished.wait(self._func._min*0.001)
| 233 | 0 | 62 |
fb7197d2b1374d676009db827b83327d4a1efba1 | 3,725 | py | Python | applications/trilinos_application/test_examples/cantilever2d.gid/cantilever2dstatic.py | jiaqiwang969/Kratos-test | ed082abc163e7b627f110a1ae1da465f52f48348 | [
"BSD-4-Clause"
] | null | null | null | applications/trilinos_application/test_examples/cantilever2d.gid/cantilever2dstatic.py | jiaqiwang969/Kratos-test | ed082abc163e7b627f110a1ae1da465f52f48348 | [
"BSD-4-Clause"
] | null | null | null | applications/trilinos_application/test_examples/cantilever2d.gid/cantilever2dstatic.py | jiaqiwang969/Kratos-test | ed082abc163e7b627f110a1ae1da465f52f48348 | [
"BSD-4-Clause"
] | null | null | null | import mpi #needed to use mpi
##################################################################
##################################################################
#setting the domain size for the problem to be solved
domain_size = 2
##################################################################
##################################################################
## ATTENTION: here the order is important
#including kratos path
kratos_libs_path = '../../../../libs' ##kratos_root/libs
kratos_applications_path = '../../../../applications' ##kratos_root/applications
kratos_python_scripts_path = '../../../../applications/structural_application/python_scripts'
import sys
sys.path.append(kratos_libs_path)
sys.path.append(kratos_applications_path)
sys.path.append(kratos_python_scripts_path)
#importing Kratos main library
from Kratos import *
kernel = Kernel() #defining kernel
#importing applications
import applications_interface
applications_interface.Import_StructuralApplication = True
applications_interface.Import_KratosTrilinosApplication = True
applications_interface.Import_KratosMetisApplication = True
applications_interface.ImportApplications(kernel, kratos_applications_path)
from KratosStructuralApplication import *
from KratosTrilinosApplication import *
from KratosMetisApplication import *
## from now on the order is not anymore crucial
##################################################################
##################################################################
#defining a model part
model_part = ModelPart("FluidPart");
#adding of Variables to Model Part should be here
import trilinos_structural_solver_static
trilinos_structural_solver_static.AddVariables(model_part)
model_part.AddNodalSolutionStepVariable(PARTITION_INDEX)
#reading a model
gid_mode = GiDPostMode.GiD_PostBinary
multifile = MultiFileFlag.MultipleFiles
deformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed
write_conditions = WriteConditionsFlag.WriteElementsOnly
gid_io = GidIO("cantilever2d",gid_mode,multifile,deformed_mesh_flag, write_conditions)
number_of_partitions = mpi.size #we set it equal to the number of processors
print "number_of_partitions", number_of_partitions
partitioner = MetisPartitioningProcess(model_part, gid_io, number_of_partitions, domain_size);
partitioner.Execute()
mesh_name = mpi.rank
gid_io.InitializeMesh( mesh_name );
gid_io.WriteMesh((model_part).GetMesh());
gid_io.FinalizeMesh()
print "pippo"
print model_part
#print model_part.Properties
#writing the mesh
#gid_io.WriteMesh(model_part.GetMesh(),domain_size,GiDPostMode.GiD_PostBinary);
#the buffer size should be set up here after the mesh is read for the first time
model_part.SetBufferSize(2)
#importing the solver files
trilinos_structural_solver_static.AddDofs(model_part)
#creating a fluid solver object
solver = trilinos_structural_solver_static.StaticStructuralSolver(model_part,domain_size)
##pILUPrecond = ILU0Preconditioner()
##solver.structure_linear_solver = BICGSTABSolver(1e-9, 5000,pILUPrecond)
model_part.Properties[1].SetValue(CONSTITUTIVE_LAW, Isotropic2D() )
print "Linear elastic model selected"
solver.Initialize()
(solver).SetEchoLevel(2);
Dt = 0.001
nsteps = 5
print("initializing results")
gid_io.InitializeResults(mesh_name,(model_part).GetMesh())
for step in range(0,nsteps):
time = Dt*step
model_part.CloneTimeStep(time)
print time
#print model_part.ProcessInfo()[TIME]
#solving the fluid problem
solver.Solve()
#print the results
print "a"
gid_io.WriteNodalResults(DISPLACEMENT,model_part.Nodes,time,0)
gid_io.WriteNodalResults(REACTION,model_part.Nodes,time,0)
gid_io.FinalizeResults()
print "finito"
| 31.567797 | 94 | 0.721611 | import mpi #needed to use mpi
##################################################################
##################################################################
#setting the domain size for the problem to be solved
domain_size = 2
##################################################################
##################################################################
## ATTENTION: here the order is important
#including kratos path
kratos_libs_path = '../../../../libs' ##kratos_root/libs
kratos_applications_path = '../../../../applications' ##kratos_root/applications
kratos_python_scripts_path = '../../../../applications/structural_application/python_scripts'
import sys
sys.path.append(kratos_libs_path)
sys.path.append(kratos_applications_path)
sys.path.append(kratos_python_scripts_path)
#importing Kratos main library
from Kratos import *
kernel = Kernel() #defining kernel
#importing applications
import applications_interface
applications_interface.Import_StructuralApplication = True
applications_interface.Import_KratosTrilinosApplication = True
applications_interface.Import_KratosMetisApplication = True
applications_interface.ImportApplications(kernel, kratos_applications_path)
from KratosStructuralApplication import *
from KratosTrilinosApplication import *
from KratosMetisApplication import *
## from now on the order is not anymore crucial
##################################################################
##################################################################
#defining a model part
model_part = ModelPart("FluidPart");
#adding of Variables to Model Part should be here
import trilinos_structural_solver_static
trilinos_structural_solver_static.AddVariables(model_part)
model_part.AddNodalSolutionStepVariable(PARTITION_INDEX)
#reading a model
gid_mode = GiDPostMode.GiD_PostBinary
multifile = MultiFileFlag.MultipleFiles
deformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed
write_conditions = WriteConditionsFlag.WriteElementsOnly
gid_io = GidIO("cantilever2d",gid_mode,multifile,deformed_mesh_flag, write_conditions)
number_of_partitions = mpi.size #we set it equal to the number of processors
print "number_of_partitions", number_of_partitions
partitioner = MetisPartitioningProcess(model_part, gid_io, number_of_partitions, domain_size);
partitioner.Execute()
mesh_name = mpi.rank
gid_io.InitializeMesh( mesh_name );
gid_io.WriteMesh((model_part).GetMesh());
gid_io.FinalizeMesh()
print "pippo"
print model_part
#print model_part.Properties
#writing the mesh
#gid_io.WriteMesh(model_part.GetMesh(),domain_size,GiDPostMode.GiD_PostBinary);
#the buffer size should be set up here after the mesh is read for the first time
model_part.SetBufferSize(2)
#importing the solver files
trilinos_structural_solver_static.AddDofs(model_part)
#creating a fluid solver object
solver = trilinos_structural_solver_static.StaticStructuralSolver(model_part,domain_size)
##pILUPrecond = ILU0Preconditioner()
##solver.structure_linear_solver = BICGSTABSolver(1e-9, 5000,pILUPrecond)
model_part.Properties[1].SetValue(CONSTITUTIVE_LAW, Isotropic2D() )
print "Linear elastic model selected"
solver.Initialize()
(solver).SetEchoLevel(2);
Dt = 0.001
nsteps = 5
print("initializing results")
gid_io.InitializeResults(mesh_name,(model_part).GetMesh())
for step in range(0,nsteps):
time = Dt*step
model_part.CloneTimeStep(time)
print time
#print model_part.ProcessInfo()[TIME]
#solving the fluid problem
solver.Solve()
#print the results
print "a"
gid_io.WriteNodalResults(DISPLACEMENT,model_part.Nodes,time,0)
gid_io.WriteNodalResults(REACTION,model_part.Nodes,time,0)
gid_io.FinalizeResults()
print "finito"
| 0 | 0 | 0 |
2f83da54d897e766d0d3af8c0eb9288935bf6e10 | 17,675 | py | Python | uranai.py | heeeedgehog/chat2021 | a7e0b06ec7dc6cf66092afda18bbc770c58fd5b6 | [
"CC0-1.0"
] | null | null | null | uranai.py | heeeedgehog/chat2021 | a7e0b06ec7dc6cf66092afda18bbc770c58fd5b6 | [
"CC0-1.0"
] | null | null | null | uranai.py | heeeedgehog/chat2021 | a7e0b06ec7dc6cf66092afda18bbc770c58fd5b6 | [
"CC0-1.0"
] | null | null | null | import re
import time
import random
import IPython
from google.colab import output
n = 0
# アイコンの指定
BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png'
YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png'
# 'name', 'birthday', 'asking'
frame = {}
TYPE = []
| 36.518595 | 169 | 0.606789 | import re
import time
import random
import IPython
from google.colab import output
n = 0
def chat(text, **kw): #チャット用の関数(ここを書き換える)
global n
n += 1
return 'ほ' * n
# アイコンの指定
BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png'
YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png'
def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw):
def display_bot(bot_text):
with output.redirect_to_element('#output'):
bot_name = kw.get('bot_name', 'Master')
bot_icon = kw.get('bot_icon', BOT_ICON)
display(IPython.display.HTML(f'''
<div class="sb-box">
<div class="icon-img icon-img-left">
<img src="{bot_icon}" width="60px">
</div><!-- /.icon-img icon-img-left -->
<div class="icon-name icon-name-left">{bot_name}</div>
<div class="sb-side sb-side-left">
<div class="sb-txt sb-txt-left">
{bot_text}
</div><!-- /.sb-txt sb-txt-left -->
</div><!-- /.sb-side sb-side-left -->
</div><!-- /.sb-box -->
'''))
def display_you(your_text):
with output.redirect_to_element('#output'):
your_name = kw.get('your_name', 'あなた')
your_icon = kw.get('your_icon', YOUR_ICON)
display(IPython.display.HTML(f'''
<div class="sb-box">
<div class="icon-img icon-img-right">
<img src="{your_icon}" width="60px">
</div><!-- /.icon-img icon-img-right -->
<div class="icon-name icon-name-right">{your_name}</div>
<div class="sb-side sb-side-right">
<div class="sb-txt sb-txt-right">
{your_text}
</div><!-- /.sb-txt sb-txt-right -->
</div><!-- /.sb-side sb-side-right -->
</div><!-- /.sb-box -->
'''))
display(IPython.display.HTML('''
<style>
/* 全体 */
.sb-box {
position: relative;
overflow: hidden;
}
/* アイコン画像 */
.icon-img {
position: absolute;
overflow: hidden;
top: 0;
width: 80px;
height: 80px;
}
/* アイコン画像(左) */
.icon-img-left {
left: 0;
}
/* アイコン画像(右) */
.icon-img-right {
right: 0;
}
/* アイコン画像 */
.icon-img img {
border-radius: 50%;
border: 2px solid #eee;
}
/* アイコンネーム */
.icon-name {
position: absolute;
width: 80px;
text-align: center;
top: 83px;
color: #fff;
font-size: 10px;
}
/* アイコンネーム(左) */
.icon-name-left {
left: 0;
}
/* アイコンネーム(右) */
.icon-name-right {
right: 0;
}
/* 吹き出し */
.sb-side {
position: relative;
float: left;
margin: 0 105px 40px 105px;
}
.sb-side-right {
float: right;
}
/* 吹き出し内のテキスト */
.sb-txt {
position: relative;
border: 2px solid #eee;
border-radius: 6px;
background: #eee;
color: #333;
font-size: 15px;
line-height: 1.7;
padding: 18px;
}
.sb-txt>p:last-of-type {
padding-bottom: 0;
margin-bottom: 0;
}
/* 吹き出しの三角 */
.sb-txt:before {
content: "";
position: absolute;
border-style: solid;
top: 16px;
z-index: 3;
}
.sb-txt:after {
content: "";
position: absolute;
border-style: solid;
top: 15px;
z-index: 2;
}
/* 吹き出しの三角(左) */
.sb-txt-left:before {
left: -7px;
border-width: 7px 10px 7px 0;
border-color: transparent #eee transparent transparent;
}
.sb-txt-left:after {
left: -10px;
border-width: 8px 10px 8px 0;
border-color: transparent #eee transparent transparent;
}
/* 吹き出しの三角(右) */
.sb-txt-right:before {
right: -7px;
border-width: 7px 0 7px 10px;
border-color: transparent transparent transparent #eee;
}
.sb-txt-right:after {
right: -10px;
border-width: 8px 0 8px 10px;
border-color: transparent transparent transparent #eee;
}
/* 767px(iPad)以下 */
@media (max-width: 767px) {
.icon-img {
width: 60px;
height: 60px;
}
/* アイコンネーム */
.icon-name {
width: 60px;
top: 62px;
font-size: 9px;
}
/* 吹き出し(左) */
.sb-side-left {
margin: 0 0 30px 78px;
/* 吹き出し(左)の上下左右の余白を狭く */
}
/* 吹き出し(右) */
.sb-side-right {
margin: 0 78px 30px 0;
/* 吹き出し(右)の上下左右の余白を狭く */
}
/* 吹き出し内のテキスト */
.sb-txt {
padding: 12px;
/* 吹き出し内の上下左右の余白を-6px */
}
}
</style>
<script>
var inputPane = document.getElementById('input');
inputPane.addEventListener('keydown', (e) => {
if(e.keyCode == 13) {
google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {});
inputPane.value=''
}
});
</script>
<div id='output' style='background: #66d;'></div>
<div style='text-align: right'><textarea id='input' style='width: 100%; background: #eee;'></textarea></div>
'''))
def convert(your_text):
display_you(your_text)
bot_text = chat(your_text, **kw)
time.sleep(random.randint(0,4))
display_bot(bot_text)
output.register_callback('notebook.Convert', convert)
if start is not None:
display_bot(start)
# フレーム 状態をもつ辞書
# 'name', 'birthday', 'asking'
frame = {}
TYPE = []
def number(x):
number = list(x)
number = [''.join( x for x in number if x not in '\n')]
number = sum([[*word] for word in number], [])
m = re.compile('^[0-9]+$')
result = [s for s in number if m.match(s)]
number = list(map(int, result))
#sn = sum(int(c) for c in number)
return len(number)
def match(x):
Match = list(x)
Match = ''.join( x for x in Match if x not in '\n')
pattern = r'\d\d'
result = re.match(pattern, Match)
if result == None:
return 'None'
def soulnumber(X):
number = [''.join( x for x in X if x not in '\n')]
number = sum([[*word] for word in number], [])
m = re.compile('^[0-9]+$')
result = [s for s in number if m.match(s)]
number = list(map(int, result))
sn = sum(int(c) for c in number)
if sn % 11 == 0: # ゾロ目の時
return sn
if sn > 9: #2桁の時は
return soulnumber(str(sn)) #再帰を使う
return sn
def uranai(input_text):
global frame # 外部の状態を参照する
if 'asking' in frame: # asking から更新する
frame[frame['asking']] = input_text
del frame['asking']
if 'name' not in frame:
frame['asking'] = 'name' # 名前をたずねる
return 'あなたの名前は?'
if frame['name'] == '\n':
del frame['name']
frame['asking'] = 'name'
return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。'
if 'name' in frame and 'year' not in frame:
frame['asking'] = 'year' # 誕生年をたずねる
return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。'
if 'name' in frame and (number(frame['year']) != 4 or match(frame['year']) == 'None'):
del frame['year']
frame['asking'] = 'year' # 誕生年をたずねる
return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。'
if 'name' in frame and 'year' in frame and 'month' not in frame:
frame['asking'] = 'month' # 誕生月をたずねる
return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。'
if 'name' in frame and 'year' in frame and (number(frame['month']) != 2 or match(frame['month']) == 'None'):
del frame['month']
frame['asking'] = 'month' # 誕生月をたずねる
return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' not in frame:
frame['asking'] = 'day' # 誕生日をたずねる
return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。'
if 'name' in frame and 'year' in frame and 'month' in frame and (number(frame['day']) != 2 or match(frame['day']) == 'None'):
del frame['day']
frame['asking'] = 'day' # 誕生日をたずねる
return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' not in frame: # 占います
frame['asking'] = 'type'
return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and frame['type'] != '\nA' and frame['type'] != '\nB' and frame['type'] != '\nC': # 占います
del frame['type']
frame['asking'] = 'type'
return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' in frame and 'manzoku' not in frame:
if frame['type'] == '\nA':
#number = list(frame['year']) + list(frame['month']) + list(frame['day'])
TYPE.append('A')
soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day']))
if soul == 1:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 2:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 3:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 4:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 5:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 6:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 7:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 8:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 9:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 11:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 22:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 33:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
else:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
if frame['type'] == '\nB':
#number = list(frame['year']) + list(frame['month']) + list(frame['day'])
TYPE.append('B')
soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day']))
if soul == 1:
frame['asking'] = 'manzoku'
return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 2:
frame['asking'] = 'manzoku'
return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 3:
frame['asking'] = 'manzoku'
return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 4:
frame['asking'] = 'manzoku'
return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 5:
frame['asking'] = 'manzoku'
return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 6:
frame['asking'] = 'manzoku'
return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 7:
frame['asking'] = 'manzoku'
return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 8:
frame['asking'] = 'manzoku'
return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 9:
frame['asking'] = 'manzoku'
return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 11:
frame['asking'] = 'manzoku'
return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 22:
frame['asking'] = 'manzoku'
return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 33:
frame['asking'] = 'manzoku'
return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
else:
frame['asking'] = 'manzoku'
return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
if frame['type'] == '\nC':
TYPE.append('C')
soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day']))
if soul == 1:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 2:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 3:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 4:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 5:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 6:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 7:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 8:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 9:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 11:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 22:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 33:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
else:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' in frame and frame['manzoku'] != '\nY' and frame['manzoku'] != '\nN':
del frame['manzoku']
frame['asking'] = 'manzoku'
return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' in frame and frame['manzoku'] == '\nY':
return 'よかったです!また占いしにきてくださいね!'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' in frame and frame['manzoku'] == '\nN' and len(TYPE) < 3:
#TYPE.append(frame['type'])
del frame['type']
del frame['manzoku']
frame['asking'] = 'type'
return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' in frame and frame['manzoku'] == '\nN' and len(TYPE) >= 3:
return 'A,B,Cの占いであなたをハッピーにさせることができずに申し訳ないです。でも占いでは見つけることのできなかったあなたの魅力は必ずあるはずです!!元気を出してください!!!'
return output_text
def start():
run_chat(chat=uranai)
| 25,871 | 0 | 160 |
7de1aa8a34f62c7e9615ba4c4b38a1f8141a275a | 21,397 | py | Python | test/python/T0_t/WMBS_t/JobSplitting_t/Repack_t.py | silviodonato/T0 | a093729d08b31175ed35cd20e889bd7094ce152a | [
"Apache-2.0"
] | 6 | 2016-03-09T14:36:19.000Z | 2021-07-27T01:28:00.000Z | test/python/T0_t/WMBS_t/JobSplitting_t/Repack_t.py | silviodonato/T0 | a093729d08b31175ed35cd20e889bd7094ce152a | [
"Apache-2.0"
] | 193 | 2015-01-07T21:03:43.000Z | 2022-03-31T12:22:18.000Z | test/python/T0_t/WMBS_t/JobSplitting_t/Repack_t.py | silviodonato/T0 | a093729d08b31175ed35cd20e889bd7094ce152a | [
"Apache-2.0"
] | 36 | 2015-01-28T19:01:54.000Z | 2021-12-15T17:18:20.000Z | #!/usr/bin/env python
"""
_Repack_t_
Repack job splitting test
"""
import unittest
import threading
import logging
import time
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMCore.DataStructs.Run import Run
from WMCore.DAOFactory import DAOFactory
from WMCore.JobSplitting.SplitterFactory import SplitterFactory
from WMCore.Services.UUIDLib import makeUUID
from WMQuality.TestInit import TestInit
class RepackTest(unittest.TestCase):
"""
_RepackTest_
Test for Repack job splitter
"""
def setUp(self):
"""
_setUp_
"""
import WMQuality.TestInit
WMQuality.TestInit.deleteDatabaseAfterEveryTest("I'm Serious")
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "T0.WMBS"])
self.splitterFactory = SplitterFactory(package = "T0.JobSplitting")
myThread = threading.currentThread()
daoFactory = DAOFactory(package = "T0.WMBS",
logger = logging,
dbinterface = myThread.dbi)
myThread.dbi.processData("""INSERT INTO wmbs_location
(id, site_name, state, state_time)
VALUES (1, 'SomeSite', 1, 1)
""", transaction = False)
myThread.dbi.processData("""INSERT INTO wmbs_pnns
(id, pnn)
VALUES (2, 'SomePNN')
""", transaction = False)
myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
(location, pnn)
VALUES (1, 2)
""", transaction = False)
insertRunDAO = daoFactory(classname = "RunConfig.InsertRun")
insertRunDAO.execute(binds = { 'RUN' : 1,
'HLTKEY' : "someHLTKey" },
transaction = False)
insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection")
for lumi in [1, 2, 3, 4]:
insertLumiDAO.execute(binds = { 'RUN' : 1,
'LUMI' : lumi },
transaction = False)
insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream")
insertStreamDAO.execute(binds = { 'STREAM' : "A" },
transaction = False)
insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset")
insertStreamFilesetDAO.execute(1, "A", "TestFileset1")
self.fileset1 = Fileset(name = "TestFileset1")
self.fileset1.load()
workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test")
workflow1.create()
self.subscription1 = Subscription(fileset = self.fileset1,
workflow = workflow1,
split_algo = "Repack",
type = "Repack")
self.subscription1.create()
# keep for later
self.insertClosedLumiDAO = daoFactory(classname = "RunLumiCloseout.InsertClosedLumi")
self.currentTime = int(time.time())
# default split parameters
self.splitArgs = {}
self.splitArgs['maxSizeSingleLumi'] = 20*1024*1024*1024
self.splitArgs['maxSizeMultiLumi'] = 10*1024*1024*1024
self.splitArgs['maxInputEvents'] = 500000
self.splitArgs['maxInputFiles'] = 1000
self.splitArgs['maxLatency'] = 50000
return
def tearDown(self):
"""
_tearDown_
"""
self.testInit.clearDatabase()
return
def getNumActiveSplitLumis(self):
"""
_getNumActiveSplitLumis_
helper function that counts the number of active split lumis
"""
myThread = threading.currentThread()
results = myThread.dbi.processData("""SELECT COUNT(*)
FROM lumi_section_split_active
""", transaction = False)[0].fetchall()
return results[0][0]
def test00(self):
"""
_test00_
Test that the job name prefix feature works
Test multi lumi size threshold
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
for lumi in [1, 2, 3, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
mySplitArgs['maxSizeMultiLumi'] = self.splitArgs['maxSizeMultiLumi']
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxSizeMultiLumi'] = 5000
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertTrue(job['name'].startswith("Repack-"),
"ERROR: Job has wrong name")
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.fileset1.markOpen(False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertTrue(job['name'].startswith("Repack-"),
"ERROR: Job has wrong name")
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
def test01(self):
"""
_test01_
Test multi lumi event threshold
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 3, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxInputEvents'] = 500
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.fileset1.markOpen(False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
def test02(self):
"""
_test02_
Test single lumi size threshold
Single lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1]:
filecount = 8
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxSizeSingleLumi'] = 6500
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 2,
"ERROR: JobFactory didn't create two jobs")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 6,
"ERROR: Job does not process 6 files")
job = jobGroups[0].jobs[1]
self.assertEqual(len(job.getFiles()), 2,
"ERROR: Job does not process 2 files")
self.assertEqual(self.getNumActiveSplitLumis(), 1,
"ERROR: Split lumis were not created")
return
def test03(self):
"""
_test03_
Test single lumi event threshold
Single lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1]:
filecount = 8
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxInputEvents'] = 650
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 2,
"ERROR: JobFactory didn't create two jobs")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 6,
"ERROR: Job does not process 6 files")
job = jobGroups[0].jobs[1]
self.assertEqual(len(job.getFiles()), 2,
"ERROR: Job does not process 2 files")
self.assertEqual(self.getNumActiveSplitLumis(), 1,
"ERROR: Split lumis were not created")
return
def test04(self):
"""
_test04_
Test streamer count threshold (only multi lumi)
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 3, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxInputFiles'] = 5
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.fileset1.markOpen(False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
def test05(self):
"""
_test05_
Test repacking of multiple lumis with holes in the lumi sequence
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
mySplitArgs['maxInputFiles'] = 5
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 1,
'LUMI' : 3,
'STREAM' : "A",
'FILECOUNT' : 0,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime },
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create one job")
self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4,
"ERROR: first job does not process 4 files")
return
def test06(self):
"""
_test06_
Test repacking of 3 lumis
2 small lumis (single job), followed by a big one (multiple jobs)
files for lumi 1 and 2 are below multi-lumi thresholds
files for lumi 3 are above single-lumi threshold
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 3]:
filecount = 2
for i in range(filecount):
if lumi == 3:
nevents = 500
else:
nevents = 100
newFile = File(makeUUID(), size = 1000, events = nevents)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
mySplitArgs['maxInputEvents'] = 900
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 3,
"ERROR: JobFactory didn't create three jobs")
self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4,
"ERROR: first job does not process 4 files")
self.assertEqual(len(jobGroups[0].jobs[1].getFiles()), 1,
"ERROR: second job does not process 1 file")
self.assertEqual(len(jobGroups[0].jobs[2].getFiles()), 1,
"ERROR: third job does not process 1 file")
return
if __name__ == '__main__':
unittest.main()
| 36.576068 | 104 | 0.504463 | #!/usr/bin/env python
"""
_Repack_t_
Repack job splitting test
"""
import unittest
import threading
import logging
import time
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMCore.DataStructs.Run import Run
from WMCore.DAOFactory import DAOFactory
from WMCore.JobSplitting.SplitterFactory import SplitterFactory
from WMCore.Services.UUIDLib import makeUUID
from WMQuality.TestInit import TestInit
class RepackTest(unittest.TestCase):
"""
_RepackTest_
Test for Repack job splitter
"""
def setUp(self):
"""
_setUp_
"""
import WMQuality.TestInit
WMQuality.TestInit.deleteDatabaseAfterEveryTest("I'm Serious")
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "T0.WMBS"])
self.splitterFactory = SplitterFactory(package = "T0.JobSplitting")
myThread = threading.currentThread()
daoFactory = DAOFactory(package = "T0.WMBS",
logger = logging,
dbinterface = myThread.dbi)
myThread.dbi.processData("""INSERT INTO wmbs_location
(id, site_name, state, state_time)
VALUES (1, 'SomeSite', 1, 1)
""", transaction = False)
myThread.dbi.processData("""INSERT INTO wmbs_pnns
(id, pnn)
VALUES (2, 'SomePNN')
""", transaction = False)
myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
(location, pnn)
VALUES (1, 2)
""", transaction = False)
insertRunDAO = daoFactory(classname = "RunConfig.InsertRun")
insertRunDAO.execute(binds = { 'RUN' : 1,
'HLTKEY' : "someHLTKey" },
transaction = False)
insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection")
for lumi in [1, 2, 3, 4]:
insertLumiDAO.execute(binds = { 'RUN' : 1,
'LUMI' : lumi },
transaction = False)
insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream")
insertStreamDAO.execute(binds = { 'STREAM' : "A" },
transaction = False)
insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset")
insertStreamFilesetDAO.execute(1, "A", "TestFileset1")
self.fileset1 = Fileset(name = "TestFileset1")
self.fileset1.load()
workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test")
workflow1.create()
self.subscription1 = Subscription(fileset = self.fileset1,
workflow = workflow1,
split_algo = "Repack",
type = "Repack")
self.subscription1.create()
# keep for later
self.insertClosedLumiDAO = daoFactory(classname = "RunLumiCloseout.InsertClosedLumi")
self.currentTime = int(time.time())
# default split parameters
self.splitArgs = {}
self.splitArgs['maxSizeSingleLumi'] = 20*1024*1024*1024
self.splitArgs['maxSizeMultiLumi'] = 10*1024*1024*1024
self.splitArgs['maxInputEvents'] = 500000
self.splitArgs['maxInputFiles'] = 1000
self.splitArgs['maxLatency'] = 50000
return
def tearDown(self):
"""
_tearDown_
"""
self.testInit.clearDatabase()
return
def getNumActiveSplitLumis(self):
"""
_getNumActiveSplitLumis_
helper function that counts the number of active split lumis
"""
myThread = threading.currentThread()
results = myThread.dbi.processData("""SELECT COUNT(*)
FROM lumi_section_split_active
""", transaction = False)[0].fetchall()
return results[0][0]
def test00(self):
"""
_test00_
Test that the job name prefix feature works
Test multi lumi size threshold
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
for lumi in [1, 2, 3, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
mySplitArgs['maxSizeMultiLumi'] = self.splitArgs['maxSizeMultiLumi']
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxSizeMultiLumi'] = 5000
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertTrue(job['name'].startswith("Repack-"),
"ERROR: Job has wrong name")
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.fileset1.markOpen(False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertTrue(job['name'].startswith("Repack-"),
"ERROR: Job has wrong name")
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
def test01(self):
"""
_test01_
Test multi lumi event threshold
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 3, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxInputEvents'] = 500
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.fileset1.markOpen(False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
def test02(self):
"""
_test02_
Test single lumi size threshold
Single lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1]:
filecount = 8
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxSizeSingleLumi'] = 6500
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 2,
"ERROR: JobFactory didn't create two jobs")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 6,
"ERROR: Job does not process 6 files")
job = jobGroups[0].jobs[1]
self.assertEqual(len(job.getFiles()), 2,
"ERROR: Job does not process 2 files")
self.assertEqual(self.getNumActiveSplitLumis(), 1,
"ERROR: Split lumis were not created")
return
def test03(self):
"""
_test03_
Test single lumi event threshold
Single lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1]:
filecount = 8
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxInputEvents'] = 650
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 2,
"ERROR: JobFactory didn't create two jobs")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 6,
"ERROR: Job does not process 6 files")
job = jobGroups[0].jobs[1]
self.assertEqual(len(job.getFiles()), 2,
"ERROR: Job does not process 2 files")
self.assertEqual(self.getNumActiveSplitLumis(), 1,
"ERROR: Split lumis were not created")
return
def test04(self):
"""
_test04_
Test streamer count threshold (only multi lumi)
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 3, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxInputFiles'] = 5
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.fileset1.markOpen(False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
def test05(self):
"""
_test05_
Test repacking of multiple lumis with holes in the lumi sequence
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
mySplitArgs['maxInputFiles'] = 5
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 1,
'LUMI' : 3,
'STREAM' : "A",
'FILECOUNT' : 0,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime },
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create one job")
self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4,
"ERROR: first job does not process 4 files")
return
def test06(self):
"""
_test06_
Test repacking of 3 lumis
2 small lumis (single job), followed by a big one (multiple jobs)
files for lumi 1 and 2 are below multi-lumi thresholds
files for lumi 3 are above single-lumi threshold
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 3]:
filecount = 2
for i in range(filecount):
if lumi == 3:
nevents = 500
else:
nevents = 100
newFile = File(makeUUID(), size = 1000, events = nevents)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
mySplitArgs['maxInputEvents'] = 900
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 3,
"ERROR: JobFactory didn't create three jobs")
self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4,
"ERROR: first job does not process 4 files")
self.assertEqual(len(jobGroups[0].jobs[1].getFiles()), 1,
"ERROR: second job does not process 1 file")
self.assertEqual(len(jobGroups[0].jobs[2].getFiles()), 1,
"ERROR: third job does not process 1 file")
return
if __name__ == '__main__':
unittest.main()
| 0 | 0 | 0 |
71b47648bb110a0a8317411106c6b0ab97f3e2a5 | 54,933 | py | Python | src/ext_libs/edflibpy/edfwriter.py | greydongilmore/merPrep | d84fd5617667180ae88805a7b73d5865b79026bd | [
"MIT"
] | null | null | null | src/ext_libs/edflibpy/edfwriter.py | greydongilmore/merPrep | d84fd5617667180ae88805a7b73d5865b79026bd | [
"MIT"
] | null | null | null | src/ext_libs/edflibpy/edfwriter.py | greydongilmore/merPrep | d84fd5617667180ae88805a7b73d5865b79026bd | [
"MIT"
] | null | null | null | #############################################################################
#
# Copyright (c) 2020 Teunis van Beelen
# All rights reserved.
#
# Email: teuniz@protonmail.com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
# LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#############################################################################
import sys
import io
import os
import string
import array
from collections import namedtuple
import numpy as np
from datetime import datetime
if sys.version_info[0] != 3 or sys.version_info[1] < 5:
print("Must be using Python version >= 3.5.0")
sys.exit()
if np.__version__ < "1.17.0":
print("Must be using NumPy version >= 1.17.0")
sys.exit()
################################################################################
# START class EDFwriter
################################################################################
class EDFwriter:
"""A writer for EDF+ and BDF+ files.
EDF header:
offset (hex, dec) length
---------------------------------------------------------------------
0x00 0 8 ascii : version of this data format (0)
0x08 8 80 ascii : local patient identification
0x58 88 80 ascii : local recording identification
0xA8 168 8 ascii : startdate of recording (dd.mm.yy)
0xB0 176 8 ascii : starttime of recording (hh.mm.ss)
0xB8 184 8 ascii : number of bytes in header record
0xC0 192 44 ascii : reserved
0xEC 236 8 ascii : number of data records (-1 if unknown)
0xF4 244 8 ascii : duration of a data record, in seconds
0xFC 252 4 ascii : number of signals
0x00 0 ns * 16 ascii : ns * label (e.g. EEG Fpz-Cz or Body temp)
ns * 0x10 ns * 16 ns * 80 ascii : ns * transducer type (e.g. AgAgCl electrode)
ns * 0x60 ns * 96 ns * 8 ascii : ns * physical dimension (e.g. uV or degreeC)
ns * 0x68 ns * 104 ns * 8 ascii : ns * physical minimum (e.g. -500 or 34)
ns * 0x70 ns * 112 ns * 8 ascii : ns * physical maximum (e.g. 500 or 40)
ns * 0x78 ns * 120 ns * 8 ascii : ns * digital minimum (e.g. -2048)
ns * 0x80 ns * 128 ns * 8 ascii : ns * digital maximum (e.g. 2047)
ns * 0x88 ns * 136 ns * 80 ascii : ns * prefiltering (e.g. HP:0.1Hz LP:75Hz N:60)
ns * 0xD8 ns * 216 ns * 8 ascii : ns * nr of samples in each data record
ns * 0xE0 ns * 224 ns * 32 ascii : ns * reserved
ns: number of signals
All fields are left aligned and filled up with spaces, no NULL's.
Only printable ASCII characters are allowed.
Decimal separator (if any) must be a dot. No grouping characters in numbers.
For more info about the EDF and EDF+ format, visit: https://edfplus.info/specs/
For more info about the BDF and BDF+ format, visit: https://www.teuniz.net/edfbrowser/bdfplus%20format%20description.html
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
For a better explanation about the relation between digital data and physical data,
read the document "Coding Schemes Used with Data Converters" (PDF):
https://www.ti.com/general/docs/lit/getliterature.tsp?baseLiteratureNumber=sbaa042
note: An EDF file usually contains multiple so-called datarecords. One datarecord usually has a duration of one second (this is the default but it is not mandatory!).
In that case a file with a duration of five minutes contains 300 datarecords. The duration of a datarecord can be freely choosen but, if possible, use values from
0.1 to 1 second for easier handling. Just make sure that the total size of one datarecord, expressed in bytes, does not exceed 10MByte (15MBytes for BDF(+)).
The RECOMMENDATION of a maximum datarecordsize of 61440 bytes in the EDF and EDF+ specification was usefull in the time people were still using DOS as their main operating system.
Using DOS and fast (near) pointers (16-bit pointers), the maximum allocatable block of memory was 64KByte.
This is not a concern anymore so the maximum datarecord size now is limited to 10MByte for EDF(+) and 15MByte for BDF(+). This helps to accommodate for higher samplingrates
used by modern Analog to Digital Converters.
EDF header character encoding: The EDF specification says that only (printable) ASCII characters are allowed.
When writing the header info, EDFlib will assume you are using Latin1 encoding and it will automatically convert
characters with accents, umlauts, tilde, etc. to their "normal" equivalent without the accent/umlaut/tilde/etc.
in order to create a valid EDF file.
The description/name of an EDF+ annotation on the other hand, is encoded in UTF-8.
author: Teunis van Beelen
"""
EDFLIB_TIME_DIMENSION = 10000000
EDFLIB_MAXSIGNALS = 640
EDFLIB_MAX_ANNOTATION_LEN = 512
EDFSEEK_SET = 0
EDFSEEK_CUR = 1
EDFSEEK_END = 2
EDFLIB_FILETYPE_EDF = 0
EDFLIB_FILETYPE_EDFPLUS = 1
EDFLIB_FILETYPE_BDF = 2
EDFLIB_FILETYPE_BDFPLUS = 3
EDFLIB_MALLOC_ERROR = -1
EDFLIB_NO_SUCH_FILE_OR_DIRECTORY = -2
EDFLIB_FILE_CONTAINS_FORMAT_ERRORS = -3
EDFLIB_MAXFILES_REACHED = -4
EDFLIB_FILE_READ_ERROR = -5
EDFLIB_FILE_ALREADY_OPENED = -6
EDFLIB_FILETYPE_ERROR = -7
EDFLIB_FILE_WRITE_ERROR = -8
EDFLIB_NUMBER_OF_SIGNALS_INVALID = -9
EDFLIB_FILE_IS_DISCONTINUOUS = -10
EDFLIB_INVALID_READ_ANNOTS_VALUE = -11
EDFLIB_INVALID_ARGUMENT = -12
EDFLIB_FILE_CLOSED = -13
EDFLIB_DO_NOT_READ_ANNOTATIONS = 0
EDFLIB_READ_ANNOTATIONS = 1
EDFLIB_READ_ALL_ANNOTATIONS = 2
EDFLIB_NO_SIGNALS = -20
EDFLIB_TOO_MANY_SIGNALS = -21
EDFLIB_NO_SAMPLES_IN_RECORD = -22
EDFLIB_DIGMIN_IS_DIGMAX = -23
EDFLIB_DIGMAX_LOWER_THAN_DIGMIN = -24
EDFLIB_PHYSMIN_IS_PHYSMAX = -25
EDFLIB_DATARECORD_SIZE_TOO_BIG = -26
EDFLIB_VERSION = 100
# max size of annotationtext
__EDFLIB_WRITE_MAX_ANNOTATION_LEN = 40
# bytes in datarecord for EDF annotations, must be an integer multiple of three and two
__EDFLIB_ANNOTATION_BYTES = 114
# for writing only
__EDFLIB_MAX_ANNOTATION_CHANNELS = 64
__EDFLIB_ANNOT_MEMBLOCKSZ = 1000
__EDFAnnotationStruct = namedtuple("annotation", ["onset", "duration", "description"])
def close(self) -> int:
"""Finalizes and closes the file.
This function is required after writing. Failing to do so will cause a corrupted and incomplete file.
Returns 0 on success, otherwise -1.
"""
if self.__status_ok:
if self.__datarecords < 100000000:
self.__file_out.seek(236, io.SEEK_SET)
if self.__fprint_int_number_nonlocalized(self.__file_out, self.__datarecords, 0, 0) < 2:
self.__file_out.write(bytes(" ", encoding="ascii"))
self.__write_annotations()
self.__file_out.close()
self.__status_ok = 0
return 0
else:
return -1
def version(self) -> int:
"""If version is 1.00 then it will return 100."""
return self.EDFLIB_VERSION
def setSampleFrequency(self, s: int, sf: int) -> int:
"""Sets the samplefrequency of signal s.
(In reallity, it sets the number of samples in a datarecord.)
The samplefrequency of a signal is determined as: sf = number of samples in a datarecord / datarecord duration.
The samplefrequency equals the number of samples in a datarecord only when the datarecord duration is set to the default of one second.
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
sf is the samplefrequency.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0) or (sf < 1):
return -1
self.__param_smp_per_record[s] = sf
return 0
def setPhysicalMaximum(self, s: int, phys_max: float) -> int:
"""Sets the maximum physical value of signal s.
This is the value of the input of the ADC when the output equals the value of "digital maximum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
phys_max is the maximum input value.
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_phys_max[s] = phys_max
return 0
def setPhysicalMinimum(self, s: int, phys_min: float) -> int:
"""Sets the minimum physical value of signal s.
This is the value of the input of the ADC when the output equals the value of "digital minimum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
phys_min is the minimum input value.
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_phys_min[s] = phys_min
return 0
def setDigitalMaximum(self, s: int, dig_max: int) -> int:
"""Sets the maximum digital value of signal s.
This is the value of the output of the ADC when the input equals the value of "physical maximum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
dig_max is the maximum output value (<= 32767 for EDF and <= 8388607 for BDF).
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
if self.__edf != 0:
if dig_max > 32767:
return -1
else:
if dig_max > 8388607:
return -1
self.__param_dig_max[s] = dig_max
return 0
def setDigitalMinimum(self, s: int, dig_min: int) -> int:
"""Sets the minimum digital value of signal s.
This is the value of the output of the ADC when the input equals the value of "physical minimum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
dig_min is the minimum output value (>= -32768 for EDF and >= -8388608 for BDF).
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
if self.__edf != 0:
if dig_min < -32768:
return -1
else:
if dig_min < -8388608:
return -1
self.__param_dig_min[s] = dig_min
return 0
def setSignalLabel(self, s: int, label: str) -> int:
"""Sets the label (name) of signal s.
(e.g. "FP1", "SaO2", etc.) String must contain printable ASCII only.
This function is recommended for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
label is the signallabel.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_label[s] = label
return 0
def setPreFilter(self, s: int, prefilter: str) -> int:
"""Sets the prefilter description of signal s.
(e.g. "HP:0.05Hz", "LP:250Hz", "N:60Hz", etc.) String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
s is the signal number (zero-based).
prefilter is the prefilter description.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_prefilter[s] = prefilter
return 0
def setTransducer(self, s: int, transducer: str) -> int:
"""Sets the transducer description of signal s.
("AgAgCl cup electrodes", etc.) String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
s is the signal number (zero-based).
transducer is the transducer description.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_transducer[s] = transducer
return 0
def setPhysicalDimension(self, s: int, physical_dimension: str) -> int:
"""Sets the physical_dimension (unit) of signal s.
("uV", "BPM", "mA", "Degr.", etc.) String must contain printable ASCII only.
This function recommended for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
physical_dimension is the physical dimension description.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_physdimension[s] = physical_dimension
return 0
def setStartDateTime(self, year: int, month: int, day: int, hour: int, minute: int, second: int, subsecond: int) -> int:
"""
Sets the startdate and starttime.
If not called, the system date and time at runtime will be used.
This function is optional and can be called only before the first sample write action.
If subsecond precision is not needed or not applicable, leave it at zero.
year: 1970 - 3000
month: 1 - 12
day: 1 - 31
hour: 0 - 23
minute: 0 - 59
second: 0 - 59
subsecond: 0 - 9999 expressed in units of 100 microSeconds
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
if (year < 1970) or (year > 3000) or \
(month < 1) or (month > 12) or \
(day < 1) or (day > 31) or \
(hour < 0) or (hour > 23) or \
(minute < 0) or (minute > 59) or \
(second < 0) or (second > 59) or \
(subsecond < 0) or (subsecond > 9999):
return -1
self.__startdate_year = year
self.__startdate_month = month
self.__startdate_day = day
self.__starttime_hour = hour
self.__starttime_minute = minute
self.__starttime_second = second
self.__starttime_offset = subsecond * 1000
return 0
def setPatientName(self, name: str) -> int:
"""Sets the patientname.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_patient_name = name
return 0
def setPatientCode(self, code: str) -> int:
"""Sets the patientcode.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_patientcode = code
return 0
def setPatientGender(self, gender: int) -> int:
"""Sets the patient's gender.
gender: 0 = female, 1 = male, 2 = unknown or not applicable (default)
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
if (gender < 0) or (gender > 2):
return -1
self.__plus_gender = gender
return 0
def setPatientBirthDate(self, year: int, month: int, day: int) -> int:
"""Sets the patients' birthdate.
This function is optional and can be called only before the first sample write action.
year: 1800 - 3000
month: 1 - 12
day: 1 - 31
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
if (year < 1800) or (year > 3000) or \
(month < 1) or (month > 12) or \
(day < 1) or (day > 31):
return -1
self.__plus_birthdate_year = year
self.__plus_birthdate_month = month
self.__plus_birthdate_day = day
return 0
def setAdditionalPatientInfo(self, additional: str) -> int:
"""Sets the additional information related to the patient.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_patient_additional = additional
return 0
def setAdministrationCode(self, admin_code: str) -> int:
"""Sets the administration code.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_admincode = admin_code
return 0
def setTechnician(self, technician: str) -> int:
"""Sets the name or id of the technician who performed the recording.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_technician = technician
return 0
def setEquipment(self, equipment: str) -> int:
"""Sets the description of the equipment used for the recording.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_equipment = equipment
return 0
def setAdditionalRecordingInfo(self, additional: str) -> int:
"""Sets the additional info related to the recording.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_recording_additional = additional
return 0
def writeSamples(self, buf: np.array) -> int:
"""Write samples.
Writes sf samples into the file.
Buf must be a one-dimensional numpy array containing samples of one signal of datatype int32, float_ or float64.
For EDF, dataype int16 can also be used.
If buf is of type integer, the samples are written into the file without any conversion.
If buf is of type float, the physical samples will be converted to digital samples using the
values of physical maximum, physical minimum, digital maximum and digital minimum.
The number of samples written is equal to the samplefrequency of the signal.
(actually, it's the value that is set with setSampleFrequency()).
Size of buf should be equal to or bigger than the samplefrequency.
Call this function for every signal in the file. The order is important!
When there are 4 signals in the file, the order of calling this function
must be: signal 0, signal 1, signal 2, signal 3, signal 0, signal 1, signal 2, etc.
The end of a recording must always be at the end of a complete cycle.
buf is a one-dimensional numpy array of datatype int32, float_ or float64. For EDF, dataype int16 can also be used.
Returns 0 on success, otherwise -1.
"""
if self.__status_ok == 0:
return -1
if buf.ndim != 1:
return -1
if (buf.dtype != np.int16) and (buf.dtype != np.int32) and (buf.dtype != np.float_) and (buf.dtype != np.float64):
return -1
if (buf.dtype == np.int16) and (self.__bdf != 0):
return -1
edfsignal = self.__signal_write_sequence_pos
if self.__datarecords == 0:
if edfsignal == 0:
error = self.__write_edf_header()
if error != 0:
return error
sf = self.__param_smp_per_record[edfsignal]
digmax = self.__param_dig_max[edfsignal]
digmin = self.__param_dig_min[edfsignal]
if sf > buf.size:
return -1
if self.__edf != 0:
if (buf.dtype == np.int16) or (buf.dtype == np.int32):
for i in range(0, sf):
if buf[i] > digmax:
buf[i] = digmax
if buf[i] < digmin:
buf[i] = digmin
self.__file_out.write(buf[i].astype("int16").tobytes(order="C"))
else:
for i in range(0, sf):
value = int((buf[i] / self.__param_bitvalue[edfsignal]) - self.__param_offset[edfsignal])
if value > digmax:
value = digmax
if value < digmin:
value = digmin
self.__file_out.write(value.to_bytes(2, byteorder="little", signed=True))
else:
if buf.dtype == np.int32:
for i in range(0, sf):
value = int(buf[i])
if value > digmax:
value = digmax
if value < digmin:
value = digmin
self.__file_out.write(value.to_bytes(3, byteorder="little", signed=True))
else:
for i in range(0, sf):
value = int((buf[i] / self.__param_bitvalue[edfsignal]) - self.__param_offset[edfsignal])
if value > digmax:
value = digmax
if value < digmin:
value = digmin
self.__file_out.write(value.to_bytes(3, byteorder="little", signed=True))
self.__signal_write_sequence_pos += 1
if self.__signal_write_sequence_pos == self.__edfsignals:
self.__signal_write_sequence_pos = 0
if self.__write_tal(self.__file_out) != 0:
return -1
self.__datarecords += 1
return 0
def setDataRecordDuration(self, duration: int) -> int:
"""Sets the datarecord duration.
This function is optional, normally you don't need to change the default value of one second.
This function is NOT REQUIRED but can be called only before the first sample write action.
This function can be used when you want to use a non-integer samplerate.
For example, if you want to use a samplerate of 0.5 Hz, set the samplefrequency to 5 Hz and
the datarecord duration to 10 seconds, or alternatively, set the samplefrequency to 1 Hz and
the datarecord duration to 2 seconds.
This function can also be used when you want to use a very high samplerate.
For example, if you want to use a samplerate of 5 GHz,
set the samplefrequency to 5000 Hz and the datarecord duration to 1 microSecond.
Do not use this function if not necessary.
duration is expressed in microSeconds, range: 1 - 60000000 (1uSec. - 60 sec.)
Returns 0 on success, otherwise -1.
"""
if (duration < 1) or (duration > 60000000) or (self.__datarecords != 0):
return -1
self.__long_data_record_duration = duration * 10
return 0
def setNumberOfAnnotationSignals(self, annot_signals: int) -> int:
"""Sets the number of annotation signals.
The default value is 1.
This function is optional and, if used, must be called before the first sample write action.
Normally you don't need to change the default value. Only when the number of annotations
you want to write is higher than the number of datarecords in the recording, you can use
this function to increase the storage space for annotations.
"""
if (annot_signals < 1) or (annot_signals >= self.__EDFLIB_MAX_ANNOTATION_CHANNELS) or (self.__datarecords != 0):
return -1
self.__nr_annot_chns = annot_signals
return 0
def writeAnnotation(self, onset: int, duration: int, description: str) -> int:
"""Writes an annotation/event to the file.
onset is relative to the starttime of the recording and must be >= 0.
onset and duration are in units of 100 microSeconds. Resolution is 0.0001 second.
E.g. 34.071 seconds must be written as 340710.
If duration is unknown or not applicable: set a negative number (-1).
Description is a string containing the text that describes the event.
This function is optional.
"""
if (self.__status_ok == 0) or (onset < 0):
return -1
self.__annotationslist.append(self.__EDFAnnotationStruct(onset = onset, duration = duration, description = description))
self.__annots_in_file += 1
return 0
################################################################################
# from here only internal utils
################################################################################
# writes the EDF header
# writes a TAL
# writes the annotations to the file
# minimum is the minimum digits that will be printed (minus sign not included), leading zero's will be added if necessary
# if sign is zero, only negative numbers will have the sign '-' character
# if sign is one, the sign '+' or '-' character will always be printed
# returns the number of characters printed
# minimum is the minimum digits that will be printed (minus sign not included), leading zero's will be added if necessary
# if sign is zero, only negative numbers will have the sign '-' character
# if sign is one, the sign '+' or '-' character will always be printed
# returns the number of characters printed
# minimum is the minimum digits that will be printed (minus sign not included), leading zero's will be added if necessary
# if sign is zero, only negative numbers will have the sign '-' character
# if sign is one, the sign '+' or '-' character will always be printed
# returns the amount of characters printed
# get string length
# copy a string
# converts Latin-1 to ASCII
################################################################################
# END class EDFwriter
################################################################################
################################################################################
# START class EDFexception
################################################################################
################################################################################
# END class EDFexception
################################################################################
| 33.232305 | 182 | 0.615623 | #############################################################################
#
# Copyright (c) 2020 Teunis van Beelen
# All rights reserved.
#
# Email: teuniz@protonmail.com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
# LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#############################################################################
import sys
import io
import os
import string
import array
from collections import namedtuple
import numpy as np
from datetime import datetime
if sys.version_info[0] != 3 or sys.version_info[1] < 5:
print("Must be using Python version >= 3.5.0")
sys.exit()
if np.__version__ < "1.17.0":
print("Must be using NumPy version >= 1.17.0")
sys.exit()
################################################################################
# START class EDFwriter
################################################################################
class EDFwriter:
"""A writer for EDF+ and BDF+ files.
EDF header:
offset (hex, dec) length
---------------------------------------------------------------------
0x00 0 8 ascii : version of this data format (0)
0x08 8 80 ascii : local patient identification
0x58 88 80 ascii : local recording identification
0xA8 168 8 ascii : startdate of recording (dd.mm.yy)
0xB0 176 8 ascii : starttime of recording (hh.mm.ss)
0xB8 184 8 ascii : number of bytes in header record
0xC0 192 44 ascii : reserved
0xEC 236 8 ascii : number of data records (-1 if unknown)
0xF4 244 8 ascii : duration of a data record, in seconds
0xFC 252 4 ascii : number of signals
0x00 0 ns * 16 ascii : ns * label (e.g. EEG Fpz-Cz or Body temp)
ns * 0x10 ns * 16 ns * 80 ascii : ns * transducer type (e.g. AgAgCl electrode)
ns * 0x60 ns * 96 ns * 8 ascii : ns * physical dimension (e.g. uV or degreeC)
ns * 0x68 ns * 104 ns * 8 ascii : ns * physical minimum (e.g. -500 or 34)
ns * 0x70 ns * 112 ns * 8 ascii : ns * physical maximum (e.g. 500 or 40)
ns * 0x78 ns * 120 ns * 8 ascii : ns * digital minimum (e.g. -2048)
ns * 0x80 ns * 128 ns * 8 ascii : ns * digital maximum (e.g. 2047)
ns * 0x88 ns * 136 ns * 80 ascii : ns * prefiltering (e.g. HP:0.1Hz LP:75Hz N:60)
ns * 0xD8 ns * 216 ns * 8 ascii : ns * nr of samples in each data record
ns * 0xE0 ns * 224 ns * 32 ascii : ns * reserved
ns: number of signals
All fields are left aligned and filled up with spaces, no NULL's.
Only printable ASCII characters are allowed.
Decimal separator (if any) must be a dot. No grouping characters in numbers.
For more info about the EDF and EDF+ format, visit: https://edfplus.info/specs/
For more info about the BDF and BDF+ format, visit: https://www.teuniz.net/edfbrowser/bdfplus%20format%20description.html
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
For a better explanation about the relation between digital data and physical data,
read the document "Coding Schemes Used with Data Converters" (PDF):
https://www.ti.com/general/docs/lit/getliterature.tsp?baseLiteratureNumber=sbaa042
note: An EDF file usually contains multiple so-called datarecords. One datarecord usually has a duration of one second (this is the default but it is not mandatory!).
In that case a file with a duration of five minutes contains 300 datarecords. The duration of a datarecord can be freely choosen but, if possible, use values from
0.1 to 1 second for easier handling. Just make sure that the total size of one datarecord, expressed in bytes, does not exceed 10MByte (15MBytes for BDF(+)).
The RECOMMENDATION of a maximum datarecordsize of 61440 bytes in the EDF and EDF+ specification was usefull in the time people were still using DOS as their main operating system.
Using DOS and fast (near) pointers (16-bit pointers), the maximum allocatable block of memory was 64KByte.
This is not a concern anymore so the maximum datarecord size now is limited to 10MByte for EDF(+) and 15MByte for BDF(+). This helps to accommodate for higher samplingrates
used by modern Analog to Digital Converters.
EDF header character encoding: The EDF specification says that only (printable) ASCII characters are allowed.
When writing the header info, EDFlib will assume you are using Latin1 encoding and it will automatically convert
characters with accents, umlauts, tilde, etc. to their "normal" equivalent without the accent/umlaut/tilde/etc.
in order to create a valid EDF file.
The description/name of an EDF+ annotation on the other hand, is encoded in UTF-8.
author: Teunis van Beelen
"""
EDFLIB_TIME_DIMENSION = 10000000
EDFLIB_MAXSIGNALS = 640
EDFLIB_MAX_ANNOTATION_LEN = 512
EDFSEEK_SET = 0
EDFSEEK_CUR = 1
EDFSEEK_END = 2
EDFLIB_FILETYPE_EDF = 0
EDFLIB_FILETYPE_EDFPLUS = 1
EDFLIB_FILETYPE_BDF = 2
EDFLIB_FILETYPE_BDFPLUS = 3
EDFLIB_MALLOC_ERROR = -1
EDFLIB_NO_SUCH_FILE_OR_DIRECTORY = -2
EDFLIB_FILE_CONTAINS_FORMAT_ERRORS = -3
EDFLIB_MAXFILES_REACHED = -4
EDFLIB_FILE_READ_ERROR = -5
EDFLIB_FILE_ALREADY_OPENED = -6
EDFLIB_FILETYPE_ERROR = -7
EDFLIB_FILE_WRITE_ERROR = -8
EDFLIB_NUMBER_OF_SIGNALS_INVALID = -9
EDFLIB_FILE_IS_DISCONTINUOUS = -10
EDFLIB_INVALID_READ_ANNOTS_VALUE = -11
EDFLIB_INVALID_ARGUMENT = -12
EDFLIB_FILE_CLOSED = -13
EDFLIB_DO_NOT_READ_ANNOTATIONS = 0
EDFLIB_READ_ANNOTATIONS = 1
EDFLIB_READ_ALL_ANNOTATIONS = 2
EDFLIB_NO_SIGNALS = -20
EDFLIB_TOO_MANY_SIGNALS = -21
EDFLIB_NO_SAMPLES_IN_RECORD = -22
EDFLIB_DIGMIN_IS_DIGMAX = -23
EDFLIB_DIGMAX_LOWER_THAN_DIGMIN = -24
EDFLIB_PHYSMIN_IS_PHYSMAX = -25
EDFLIB_DATARECORD_SIZE_TOO_BIG = -26
EDFLIB_VERSION = 100
# max size of annotationtext
__EDFLIB_WRITE_MAX_ANNOTATION_LEN = 40
# bytes in datarecord for EDF annotations, must be an integer multiple of three and two
__EDFLIB_ANNOTATION_BYTES = 114
# for writing only
__EDFLIB_MAX_ANNOTATION_CHANNELS = 64
__EDFLIB_ANNOT_MEMBLOCKSZ = 1000
__EDFAnnotationStruct = namedtuple("annotation", ["onset", "duration", "description"])
def __init__(self, p_path: str, f_file_type: int, number_of_signals: int):
self.__path = p_path
self.__filetype = f_file_type
self.__edfsignals = number_of_signals
self.__status_ok = 0
self.__edf = 0
self.__bdf = 0
self.__plus_patientcode = ""
self.__plus_gender = ""
self.__plus_birthdate = ""
self.__plus_patient_name = ""
self.__plus_patient_additional = ""
self.__plus_startdate = ""
self.__plus_admincode = ""
self.__plus_technician = ""
self.__plus_equipment = ""
self.__plus_recording_additional = ""
self.__annotationslist = []
self.__nr_annot_chns = 1
self.__long_data_record_duration = self.EDFLIB_TIME_DIMENSION
self.__annotlist_sz = 0
self.__annots_in_file = 0
self.__plus_gender = 2
self.__datarecords = 0
self.__recordsize = 0
self.__signal_write_sequence_pos = 0
self.__total_annot_bytes = 0
self.__startdate_year = 0
self.__startdate_month = 0
self.__startdate_day = 0
self.__starttime_hour = 0
self.__starttime_minute = 0
self.__starttime_second = 0
self.__starttime_offset = 0
self.__plus_birthdate_year = 0
self.__plus_birthdate_month = 0
self.__plus_birthdate_day = 0
if sys.version_info[0] != 3 or sys.version_info[1] < 5:
raise EDFexception("Must be using Python version >= 3.5.0")
if (self.__edfsignals < 1) or (self.__edfsignals > self.EDFLIB_MAXSIGNALS):
raise EDFexception("Invalid number of signals.")
if (self.__filetype != self.EDFLIB_FILETYPE_EDFPLUS) and (self.__filetype != self.EDFLIB_FILETYPE_BDFPLUS):
raise EDFexception("Invalid filetype.")
try:
self.__file_out = open(self.__path, "wb")
except OSError as e:
raise EDFexception("Can not open file for writing: %s" %(e.strerror))
if self.__filetype == self.EDFLIB_FILETYPE_EDFPLUS:
self.__edf = 1
else:
self.__bdf = 1
self.__param_label = [""] * self.__edfsignals
self.__param_transducer = [""] * self.__edfsignals
self.__param_physdimension = [""] * self.__edfsignals
self.__param_phys_min = [0.0] * self.__edfsignals
self.__param_phys_max = [0.0] * self.__edfsignals
self.__param_dig_min = [0] * self.__edfsignals
self.__param_dig_max = [0] * self.__edfsignals
self.__param_prefilter = [""] * self.__edfsignals
self.__param_smp_per_record = [0] * self.__edfsignals
self.__param_offset = [0.0] * self.__edfsignals
self.__param_buf_offset = [0] * self.__edfsignals
self.__param_bitvalue = [0.0] * self.__edfsignals
self.__status_ok = 1
def close(self) -> int:
"""Finalizes and closes the file.
This function is required after writing. Failing to do so will cause a corrupted and incomplete file.
Returns 0 on success, otherwise -1.
"""
if self.__status_ok:
if self.__datarecords < 100000000:
self.__file_out.seek(236, io.SEEK_SET)
if self.__fprint_int_number_nonlocalized(self.__file_out, self.__datarecords, 0, 0) < 2:
self.__file_out.write(bytes(" ", encoding="ascii"))
self.__write_annotations()
self.__file_out.close()
self.__status_ok = 0
return 0
else:
return -1
def version(self) -> int:
"""If version is 1.00 then it will return 100."""
return self.EDFLIB_VERSION
def setSampleFrequency(self, s: int, sf: int) -> int:
"""Sets the samplefrequency of signal s.
(In reallity, it sets the number of samples in a datarecord.)
The samplefrequency of a signal is determined as: sf = number of samples in a datarecord / datarecord duration.
The samplefrequency equals the number of samples in a datarecord only when the datarecord duration is set to the default of one second.
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
sf is the samplefrequency.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0) or (sf < 1):
return -1
self.__param_smp_per_record[s] = sf
return 0
def setPhysicalMaximum(self, s: int, phys_max: float) -> int:
"""Sets the maximum physical value of signal s.
This is the value of the input of the ADC when the output equals the value of "digital maximum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
phys_max is the maximum input value.
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_phys_max[s] = phys_max
return 0
def setPhysicalMinimum(self, s: int, phys_min: float) -> int:
"""Sets the minimum physical value of signal s.
This is the value of the input of the ADC when the output equals the value of "digital minimum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
phys_min is the minimum input value.
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_phys_min[s] = phys_min
return 0
def setDigitalMaximum(self, s: int, dig_max: int) -> int:
"""Sets the maximum digital value of signal s.
This is the value of the output of the ADC when the input equals the value of "physical maximum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
dig_max is the maximum output value (<= 32767 for EDF and <= 8388607 for BDF).
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
if self.__edf != 0:
if dig_max > 32767:
return -1
else:
if dig_max > 8388607:
return -1
self.__param_dig_max[s] = dig_max
return 0
def setDigitalMinimum(self, s: int, dig_min: int) -> int:
"""Sets the minimum digital value of signal s.
This is the value of the output of the ADC when the input equals the value of "physical minimum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
dig_min is the minimum output value (>= -32768 for EDF and >= -8388608 for BDF).
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
if self.__edf != 0:
if dig_min < -32768:
return -1
else:
if dig_min < -8388608:
return -1
self.__param_dig_min[s] = dig_min
return 0
def setSignalLabel(self, s: int, label: str) -> int:
"""Sets the label (name) of signal s.
(e.g. "FP1", "SaO2", etc.) String must contain printable ASCII only.
This function is recommended for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
label is the signallabel.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_label[s] = label
return 0
def setPreFilter(self, s: int, prefilter: str) -> int:
"""Sets the prefilter description of signal s.
(e.g. "HP:0.05Hz", "LP:250Hz", "N:60Hz", etc.) String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
s is the signal number (zero-based).
prefilter is the prefilter description.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_prefilter[s] = prefilter
return 0
def setTransducer(self, s: int, transducer: str) -> int:
"""Sets the transducer description of signal s.
("AgAgCl cup electrodes", etc.) String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
s is the signal number (zero-based).
transducer is the transducer description.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_transducer[s] = transducer
return 0
def setPhysicalDimension(self, s: int, physical_dimension: str) -> int:
"""Sets the physical_dimension (unit) of signal s.
("uV", "BPM", "mA", "Degr.", etc.) String must contain printable ASCII only.
This function recommended for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
physical_dimension is the physical dimension description.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_physdimension[s] = physical_dimension
return 0
def setStartDateTime(self, year: int, month: int, day: int, hour: int, minute: int, second: int, subsecond: int) -> int:
"""
Sets the startdate and starttime.
If not called, the system date and time at runtime will be used.
This function is optional and can be called only before the first sample write action.
If subsecond precision is not needed or not applicable, leave it at zero.
year: 1970 - 3000
month: 1 - 12
day: 1 - 31
hour: 0 - 23
minute: 0 - 59
second: 0 - 59
subsecond: 0 - 9999 expressed in units of 100 microSeconds
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
if (year < 1970) or (year > 3000) or \
(month < 1) or (month > 12) or \
(day < 1) or (day > 31) or \
(hour < 0) or (hour > 23) or \
(minute < 0) or (minute > 59) or \
(second < 0) or (second > 59) or \
(subsecond < 0) or (subsecond > 9999):
return -1
self.__startdate_year = year
self.__startdate_month = month
self.__startdate_day = day
self.__starttime_hour = hour
self.__starttime_minute = minute
self.__starttime_second = second
self.__starttime_offset = subsecond * 1000
return 0
def setPatientName(self, name: str) -> int:
"""Sets the patientname.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_patient_name = name
return 0
def setPatientCode(self, code: str) -> int:
"""Sets the patientcode.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_patientcode = code
return 0
def setPatientGender(self, gender: int) -> int:
"""Sets the patient's gender.
gender: 0 = female, 1 = male, 2 = unknown or not applicable (default)
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
if (gender < 0) or (gender > 2):
return -1
self.__plus_gender = gender
return 0
def setPatientBirthDate(self, year: int, month: int, day: int) -> int:
"""Sets the patients' birthdate.
This function is optional and can be called only before the first sample write action.
year: 1800 - 3000
month: 1 - 12
day: 1 - 31
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
if (year < 1800) or (year > 3000) or \
(month < 1) or (month > 12) or \
(day < 1) or (day > 31):
return -1
self.__plus_birthdate_year = year
self.__plus_birthdate_month = month
self.__plus_birthdate_day = day
return 0
def setAdditionalPatientInfo(self, additional: str) -> int:
"""Sets the additional information related to the patient.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_patient_additional = additional
return 0
def setAdministrationCode(self, admin_code: str) -> int:
"""Sets the administration code.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_admincode = admin_code
return 0
def setTechnician(self, technician: str) -> int:
"""Sets the name or id of the technician who performed the recording.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_technician = technician
return 0
def setEquipment(self, equipment: str) -> int:
"""Sets the description of the equipment used for the recording.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_equipment = equipment
return 0
def setAdditionalRecordingInfo(self, additional: str) -> int:
"""Sets the additional info related to the recording.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_recording_additional = additional
return 0
def writeSamples(self, buf: np.array) -> int:
"""Write samples.
Writes sf samples into the file.
Buf must be a one-dimensional numpy array containing samples of one signal of datatype int32, float_ or float64.
For EDF, dataype int16 can also be used.
If buf is of type integer, the samples are written into the file without any conversion.
If buf is of type float, the physical samples will be converted to digital samples using the
values of physical maximum, physical minimum, digital maximum and digital minimum.
The number of samples written is equal to the samplefrequency of the signal.
(actually, it's the value that is set with setSampleFrequency()).
Size of buf should be equal to or bigger than the samplefrequency.
Call this function for every signal in the file. The order is important!
When there are 4 signals in the file, the order of calling this function
must be: signal 0, signal 1, signal 2, signal 3, signal 0, signal 1, signal 2, etc.
The end of a recording must always be at the end of a complete cycle.
buf is a one-dimensional numpy array of datatype int32, float_ or float64. For EDF, dataype int16 can also be used.
Returns 0 on success, otherwise -1.
"""
if self.__status_ok == 0:
return -1
if buf.ndim != 1:
return -1
if (buf.dtype != np.int16) and (buf.dtype != np.int32) and (buf.dtype != np.float_) and (buf.dtype != np.float64):
return -1
if (buf.dtype == np.int16) and (self.__bdf != 0):
return -1
edfsignal = self.__signal_write_sequence_pos
if self.__datarecords == 0:
if edfsignal == 0:
error = self.__write_edf_header()
if error != 0:
return error
sf = self.__param_smp_per_record[edfsignal]
digmax = self.__param_dig_max[edfsignal]
digmin = self.__param_dig_min[edfsignal]
if sf > buf.size:
return -1
if self.__edf != 0:
if (buf.dtype == np.int16) or (buf.dtype == np.int32):
for i in range(0, sf):
if buf[i] > digmax:
buf[i] = digmax
if buf[i] < digmin:
buf[i] = digmin
self.__file_out.write(buf[i].astype("int16").tobytes(order="C"))
else:
for i in range(0, sf):
value = int((buf[i] / self.__param_bitvalue[edfsignal]) - self.__param_offset[edfsignal])
if value > digmax:
value = digmax
if value < digmin:
value = digmin
self.__file_out.write(value.to_bytes(2, byteorder="little", signed=True))
else:
if buf.dtype == np.int32:
for i in range(0, sf):
value = int(buf[i])
if value > digmax:
value = digmax
if value < digmin:
value = digmin
self.__file_out.write(value.to_bytes(3, byteorder="little", signed=True))
else:
for i in range(0, sf):
value = int((buf[i] / self.__param_bitvalue[edfsignal]) - self.__param_offset[edfsignal])
if value > digmax:
value = digmax
if value < digmin:
value = digmin
self.__file_out.write(value.to_bytes(3, byteorder="little", signed=True))
self.__signal_write_sequence_pos += 1
if self.__signal_write_sequence_pos == self.__edfsignals:
self.__signal_write_sequence_pos = 0
if self.__write_tal(self.__file_out) != 0:
return -1
self.__datarecords += 1
return 0
def setDataRecordDuration(self, duration: int) -> int:
"""Sets the datarecord duration.
This function is optional, normally you don't need to change the default value of one second.
This function is NOT REQUIRED but can be called only before the first sample write action.
This function can be used when you want to use a non-integer samplerate.
For example, if you want to use a samplerate of 0.5 Hz, set the samplefrequency to 5 Hz and
the datarecord duration to 10 seconds, or alternatively, set the samplefrequency to 1 Hz and
the datarecord duration to 2 seconds.
This function can also be used when you want to use a very high samplerate.
For example, if you want to use a samplerate of 5 GHz,
set the samplefrequency to 5000 Hz and the datarecord duration to 1 microSecond.
Do not use this function if not necessary.
duration is expressed in microSeconds, range: 1 - 60000000 (1uSec. - 60 sec.)
Returns 0 on success, otherwise -1.
"""
if (duration < 1) or (duration > 60000000) or (self.__datarecords != 0):
return -1
self.__long_data_record_duration = duration * 10
return 0
def setNumberOfAnnotationSignals(self, annot_signals: int) -> int:
"""Sets the number of annotation signals.
The default value is 1.
This function is optional and, if used, must be called before the first sample write action.
Normally you don't need to change the default value. Only when the number of annotations
you want to write is higher than the number of datarecords in the recording, you can use
this function to increase the storage space for annotations.
"""
if (annot_signals < 1) or (annot_signals >= self.__EDFLIB_MAX_ANNOTATION_CHANNELS) or (self.__datarecords != 0):
return -1
self.__nr_annot_chns = annot_signals
return 0
def writeAnnotation(self, onset: int, duration: int, description: str) -> int:
"""Writes an annotation/event to the file.
onset is relative to the starttime of the recording and must be >= 0.
onset and duration are in units of 100 microSeconds. Resolution is 0.0001 second.
E.g. 34.071 seconds must be written as 340710.
If duration is unknown or not applicable: set a negative number (-1).
Description is a string containing the text that describes the event.
This function is optional.
"""
if (self.__status_ok == 0) or (onset < 0):
return -1
self.__annotationslist.append(self.__EDFAnnotationStruct(onset = onset, duration = duration, description = description))
self.__annots_in_file += 1
return 0
################################################################################
# from here only internal utils
################################################################################
# writes the EDF header
def __write_edf_header(self):
if self.__status_ok == 0:
return -1
eq_sf = 1
self.__recordsize = 0
str_ = bytearray(256)
self.__total_annot_bytes = self.__EDFLIB_ANNOTATION_BYTES * self.__nr_annot_chns
for i in range(0, self.__edfsignals):
if self.__param_smp_per_record[i] < 1:
return self.EDFLIB_NO_SAMPLES_IN_RECORD
if self.__param_dig_max[i] == self.__param_dig_min[i]:
return self.EDFLIB_DIGMIN_IS_DIGMAX
if self.__param_dig_max[i] < self.__param_dig_min[i]:
return self.EDFLIB_DIGMAX_LOWER_THAN_DIGMIN
if self.__param_phys_max[i] == self.__param_phys_min[i]:
return self.EDFLIB_PHYSMIN_IS_PHYSMAX
self.__recordsize += self.__param_smp_per_record[i]
if i > 0:
if self.__param_smp_per_record[i] != self.__param_smp_per_record[i-1]:
eq_sf = 0
if self.__edf != 0:
self.__recordsize *= 2
self.__recordsize += self.__total_annot_bytes
if self.__recordsize > (10 * 1024 * 1024): # datarecord size should not exceed 10MB for EDF
return self.EDFLIB_DATARECORD_SIZE_TOO_BIG
# if your application gets hit by this limitation, lower the value for the datarecord duration
# using the function edf_set_datarecord_duration()
else:
self.__recordsize *= 3
self.__recordsize += self.__total_annot_bytes
if self.__recordsize > (15 * 1024 * 1024): #datarecord size should not exceed 15MB for BDF
return self.EDFLIB_DATARECORD_SIZE_TOO_BIG
# if your application gets hit by this limitation, lower the value for the datarecord duration
# using the function edf_set_datarecord_duration()
for i in range(0, self.__edfsignals):
self.__param_bitvalue[i] = (self.__param_phys_max[i] - self.__param_phys_min[i]) / (self.__param_dig_max[i] - self.__param_dig_min[i])
self.__param_offset[i] = self.__param_phys_max[i] / self.__param_bitvalue[i] - self.__param_dig_max[i]
total_signals = self.__edfsignals + self.__nr_annot_chns
hdr_sz = (total_signals + 1) * 256
header = bytearray(hdr_sz)
for i in range(0, hdr_sz):
header[i] = 32
self.__file_out.seek(0, io.SEEK_SET)
if self.__edf != 0:
header[0 : 8] = bytes("0 ", encoding="ascii")
else:
header[0 : 8] = bytes("\xffBIOSEMI", encoding="latin_1")
p = 0
if self.__plus_birthdate_year == 0:
rest = 73
else:
rest = 63
self.__plus_patientcode.lstrip(" ")
self.__plus_patientcode.rstrip(" ")
l = len(self.__plus_patientcode)
if (l != 0) and (rest > 0):
str_ = bytearray(self.__plus_patientcode, encoding="latin_1")
l = len(str_)
if l > rest:
l = rest
rest = 0
else:
rest -= l
self.__latin1_to_ascii(str_, l)
for i in range(0 , l):
if str_[i] == 32:
str_[i] = 95
header[8 + p : 8 + p + l] = str_[0 : l]
p += l
header[8 + p] = 32
p += 1
else:
header[8 + p : 8 + p + 2] = bytes("X ", encoding="ascii")
p += 2
rest -= 1
if self.__plus_gender == 1:
header[8 + p : 8 + p + 2] = bytes("M ", encoding="ascii")
else:
if self.__plus_gender == 0:
header[8 + p : 8 + p + 2] = bytes("F ", encoding="ascii")
else:
header[8 + p : 8 + p + 2] = bytes("X ", encoding="ascii")
p += 2
if self.__plus_birthdate_year == 0:
header[8 + p : 8 + p + 2] = bytes("X ", encoding="ascii")
p += 2
else:
month_str = ["JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"]
header[8 + p : 8 + p + 12] = bytes("%02d-%s-%04d " \
%(self.__plus_birthdate_day, month_str[self.__plus_birthdate_month - 1], self.__plus_birthdate_year), encoding="ascii")
p += 12
self.__plus_patient_name.lstrip(" ")
self.__plus_patient_name.rstrip(" ")
l = len(self.__plus_patient_name)
if (l != 0) and (rest > 0):
str_ = bytearray(self.__plus_patient_name, encoding="latin_1")
l = len(str_)
if l > rest:
l = rest
rest = 0
else:
rest -= l
self.__latin1_to_ascii(str_, l)
for i in range(0 , l):
if str_[i] == 32:
str_[i] = 95
header[8 + p : 8 + p + l] = str_[0 : l]
p += l
header[8 + p] = 32
p += 1
else:
header[8 + p : 8 + p + 2] = bytes("X ", encoding="ascii")
p += 2
rest -= 1
self.__plus_patient_additional.lstrip(" ")
self.__plus_patient_additional.rstrip(" ")
l = len(self.__plus_patient_additional)
if (l != 0) and (rest > 0):
str_ = bytearray(self.__plus_patient_additional, encoding="latin_1")
l = len(str_)
if l > rest:
l = rest
self.__latin1_to_ascii(str_, l)
header[8 + p : 8 + p + l] = str_[0 : l]
p += l
for j in range(p, 80):
header[8 + j] = 32
if self.__startdate_year == 0:
dt = datetime.now()
self.__startdate_year = dt.year
self.__startdate_month = dt.month
self.__startdate_day = dt.day
self.__starttime_hour = dt.hour
self.__starttime_minute = dt.minute
self.__starttime_second = dt.second
month_str = ["JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"]
header[88 : 110] = bytes("Startdate %02d-%s-%04d " \
%(self.__startdate_day, month_str[self.__startdate_month - 1], self.__startdate_year), encoding="ascii")
p = 22
rest = 50
self.__plus_admincode.lstrip(" ")
self.__plus_admincode.rstrip(" ")
l = len(self.__plus_admincode)
if (l != 0) and (rest > 0):
str_ = bytearray(self.__plus_admincode, encoding="latin_1")
l = len(str_)
if l > rest:
l = rest
rest = 0
else:
rest -= l
self.__latin1_to_ascii(str_, l)
for i in range(0 , l):
if str_[i] == 32:
str_[i] = 95
header[88 + p : 88 + p + l] = str_[0 : l]
p += l
header[88 + p] = 32
p += 1
else:
header[88 + p : 88 + p + 2] = bytes("X ", encoding="ascii")
p += 2
rest -= 1
self.__plus_technician.lstrip(" ")
self.__plus_technician.rstrip(" ")
l = len(self.__plus_technician)
if (l != 0) and (rest > 0):
str_ = bytearray(self.__plus_technician, encoding="latin_1")
l = len(str_)
if l > rest:
l = rest
rest = 0
else:
rest -= l
self.__latin1_to_ascii(str_, l)
for i in range(0 , l):
if str_[i] == 32:
str_[i] = 95
header[88 + p : 88 + p + l] = str_[0 : l]
p += l
header[88 + p] = 32
p += 1
else:
header[88 + p : 88 + p + 2] = bytes("X ", encoding="ascii")
p += 2
rest -= 1
self.__plus_equipment.lstrip(" ")
self.__plus_equipment.rstrip(" ")
l = len(self.__plus_equipment)
if (l != 0) and (rest > 0):
str_ = bytearray(self.__plus_equipment, encoding="latin_1")
l = len(str_)
if l > rest:
l = rest
rest = 0
else:
rest -= l
self.__latin1_to_ascii(str_, l)
for i in range(0 , l):
if str_[i] == 32:
str_[i] = 95
header[88 + p : 88 + p + l] = str_[0 : l]
p += l
header[88 + p] = 32
p += 1
else:
header[88 + p : 88 + p + 2] = bytes("X ", encoding="ascii")
p += 2
rest -= 1
self.__plus_recording_additional.lstrip(" ")
self.__plus_recording_additional.rstrip(" ")
l = len(self.__plus_recording_additional)
if (l != 0) and (rest > 0):
str_ = bytearray(self.__plus_recording_additional, encoding="latin_1")
l = len(str_)
if l > rest:
l = rest
self.__latin1_to_ascii(str_, l)
header[88 + p : 88 + p + l] = str_[0 : l]
p += l
header[168 : 168 + 16] = bytes("%02d.%02d.%02d%02d.%02d.%02d" \
%(self.__startdate_day, self.__startdate_month, self.__startdate_year % 100, self.__starttime_hour, self.__starttime_minute, self.__starttime_second), encoding="ascii")
str_ = bytearray(256)
l = self.__sprint_int_number_nonlocalized(str_, (total_signals + 1) * 256, 0, 0)
if l > 8:
l = 8
header[184 : 184 + l] = str_[0 : l]
if self.__edf != 0:
header[192 : 192 + 5] = bytes("EDF+C", encoding="ascii")
else:
header[192 : 192 + 5] = bytes("BDF+C", encoding="ascii")
header[236 : 236 + 8] = bytes("-1 ", encoding="ascii")
if self.__long_data_record_duration == self.EDFLIB_TIME_DIMENSION:
header[244 : 244 + 8] = bytes("1 ", encoding="ascii")
else:
l = self.__sprint_number_nonlocalized(str_, self.__long_data_record_duration / self.EDFLIB_TIME_DIMENSION)
if l > 8:
l = 8
header[244 : 244 + l] = str_[0 : l]
l = self.__sprint_int_number_nonlocalized(str_, total_signals, 0, 0)
if l > 4:
l = 4
header[252 : 252 + l] = str_[0 : l]
for i in range(0, self.__edfsignals):
l = len(self.__param_label[i])
if l != 0:
str_ = bytearray(self.__param_label[i], encoding="latin_1")[0 : 16]
l = len(str_)
if l > 16:
l = 16
self.__latin1_to_ascii(str_, l)
header[256 + (i * 16) : 256 + (i * 16) + l] = str_[0 : l]
for i in range(self.__edfsignals, total_signals):
if(self.__edf != 0):
header[256 + (i * 16) : 256 + (i * 16) + 16] = bytes("EDF Annotations ", encoding="ascii")
else:
header[256 + (i * 16) : 256 + (i * 16) + 16] = bytes("BDF Annotations ", encoding="ascii")
for i in range(0, self.__edfsignals):
l = len(self.__param_transducer[i])
if l != 0:
str_ = bytearray(self.__param_transducer[i], encoding="latin_1")[0 : 80]
l = len(str_)
if l > 80:
l = 80
self.__latin1_to_ascii(str_, l)
header[256 + (total_signals * 16) + (i * 80) : 256 + (total_signals * 16) + (i * 80) + l] = str_[0 : l]
for i in range(0, self.__edfsignals):
l = len(self.__param_physdimension[i])
if l != 0:
str_ = bytearray(self.__param_physdimension[i], encoding="latin_1")[0 : 8]
l = len(str_)
if l > 8:
l = 8
self.__latin1_to_ascii(str_, l)
header[256 + (total_signals * 96) + (i * 8) : 256 + (total_signals * 96) + (i * 8) + l] = str_[0 : l]
str_ = bytearray(256)
for i in range(0, self.__edfsignals):
l = self.__sprint_number_nonlocalized(str_, self.__param_phys_min[i])
if l > 8:
l = 8
header[256 + (total_signals * 104) + (i * 8) : 256 + (total_signals * 104) + (i * 8) + l] = str_[0 : l]
for i in range(self.__edfsignals, total_signals):
header[256 + (total_signals * 104) + (i * 8) : 256 + (total_signals * 104) + (i * 8) + 2] = bytes("-1", encoding="ascii")
for i in range(0, self.__edfsignals):
l = self.__sprint_number_nonlocalized(str_, self.__param_phys_max[i])
if l > 8:
l = 8
header[256 + (total_signals * 112) + (i * 8) : 256 + (total_signals * 112) + (i * 8) + l] = str_[0 : l]
for i in range(self.__edfsignals, total_signals):
header[256 + (total_signals * 112) + (i * 8) : 256 + (total_signals * 112) + (i * 8) + 1] = bytes("1", encoding="ascii")
for i in range(0, self.__edfsignals):
l = self.__sprint_int_number_nonlocalized(str_, self.__param_dig_min[i], 0, 0)
if l > 8:
l = 8
header[256 + (total_signals * 120) + (i * 8) : 256 + (total_signals * 120) + (i * 8) + l] = str_[0 : l]
for i in range(self.__edfsignals, total_signals):
if self.__edf != 0:
header[256 + (total_signals * 120) + (i * 8) : 256 + (total_signals * 120) + (i * 8) + 6] = bytes("-32768", encoding="ascii")
else:
header[256 + (total_signals * 120) + (i * 8) : 256 + (total_signals * 120) + (i * 8) + 8] = bytes("-8388608", encoding="ascii")
for i in range(0, self.__edfsignals):
l = self.__sprint_int_number_nonlocalized(str_, self.__param_dig_max[i], 0, 0)
if l > 8:
l = 8
header[256 + (total_signals * 128) + (i * 8) : 256 + (total_signals * 128) + (i * 8) + l] = str_[0 : l]
for i in range(self.__edfsignals, total_signals):
if self.__edf != 0:
header[256 + (total_signals * 128) + (i * 8) : 256 + (total_signals * 128) + (i * 8) + 5] = bytes("32767", encoding="ascii")
else:
header[256 + (total_signals * 128) + (i * 8) : 256 + (total_signals * 128) + (i * 8) + 7] = bytes("8388607", encoding="ascii")
for i in range(0, self.__edfsignals):
l = len(self.__param_prefilter[i])
if l != 0:
str_ = bytearray(self.__param_prefilter[i], encoding="latin_1")[0 : 80]
l = len(str_)
if l > 80:
l = 80
self.__latin1_to_ascii(str_, l)
header[256 + (total_signals * 136) + (i * 80) : 256 + (total_signals * 136) + (i * 80) + l] = str_[0 : l]
str_ = bytearray(256)
for i in range(0, self.__edfsignals):
l = self.__sprint_int_number_nonlocalized(str_, self.__param_smp_per_record[i], 0, 0)
if l > 8:
l = 8
header[256 + (total_signals * 216) + (i * 8) : 256 + (total_signals * 216) + (i * 8) + l] = str_[0 : l]
for i in range(self.__edfsignals, total_signals):
if self.__edf != 0:
l = self.__sprint_int_number_nonlocalized(str_, self.__EDFLIB_ANNOTATION_BYTES // 2, 0, 0)
else:
l = self.__sprint_int_number_nonlocalized(str_, self.__EDFLIB_ANNOTATION_BYTES // 3, 0, 0)
if l > 8:
l = 8
header[256 + (total_signals * 216) + (i * 8) : 256 + (total_signals * 216) + (i * 8) + l] = str_[0 : l]
self.__file_out.write(header)
return 0
# writes a TAL
def __write_tal(self, f):
scratchpad = bytearray(self.__total_annot_bytes)
p = self.__snprint_ll_number_nonlocalized(scratchpad, 0, (self.__datarecords * self.__long_data_record_duration + self.__starttime_offset) / self.EDFLIB_TIME_DIMENSION, 0, 1)
if ((self.__long_data_record_duration % self.EDFLIB_TIME_DIMENSION) != 0) or (self.__starttime_offset != 0):
scratchpad[p] = 46
p += 1
p += self.__snprint_ll_number_nonlocalized(scratchpad, p, (self.__datarecords * self.__long_data_record_duration + self.__starttime_offset) % self.EDFLIB_TIME_DIMENSION, 7, 0)
scratchpad[p] = 20
p += 1
scratchpad[p] = 20
p += 1
for i in range(p, self.__total_annot_bytes):
scratchpad[i] = 0
f.write(scratchpad)
return 0
# writes the annotations to the file
def __write_annotations(self):
err = 0
datrecs = 0
str_ = bytearray(self.__EDFLIB_ANNOTATION_BYTES)
offset = (self.__edfsignals + self.__nr_annot_chns + 1) * 256
file_sz = offset + (self.__datarecords * self.__recordsize)
datrecsize = self.__total_annot_bytes
for i in range(0, self.__edfsignals):
if self.__edf != 0:
offset += self.__param_smp_per_record[i] * 2
datrecsize += self.__param_smp_per_record[i] * 2
else:
offset += self.__param_smp_per_record[i] * 3
datrecsize += self.__param_smp_per_record[i] * 3
j = 0
for k in range(0, self.__annots_in_file):
annot2 = self.__annotationslist[k]
onset = annot2.onset + (self.__starttime_offset // 1000)
p = 0
if j == 0: # first annotation signal
if (offset + self.__total_annot_bytes) > file_sz:
break
self.__file_out.seek(offset, io.SEEK_SET)
p += self.__snprint_ll_number_nonlocalized(str_, 0, (datrecs * self.__long_data_record_duration + self.__starttime_offset) // self.EDFLIB_TIME_DIMENSION, 0, 1)
if ((self.__long_data_record_duration % self.EDFLIB_TIME_DIMENSION) != 0) or (self.__starttime_offset != 0):
str_[p] = 46
p += 1
n = self.__snprint_ll_number_nonlocalized(str_, p, (datrecs * self.__long_data_record_duration + self.__starttime_offset) % self.EDFLIB_TIME_DIMENSION, 7, 0)
p += n
str_[p] = 20
p += 1
str_[p] = 20
p += 1
str_[p] = 0
p += 1
n = self.__snprint_ll_number_nonlocalized(str_, p, onset // 10000, 0, 1)
p += n
if (onset % 10000) != 0:
str_[p] = 46
p += 1
n = self.__snprint_ll_number_nonlocalized(str_, p, onset % 10000, 4, 0)
p += n
if annot2.duration >= 0:
str_[p] = 21
p += 1
n = self.__snprint_ll_number_nonlocalized(str_, p, annot2.duration // 10000, 0, 0)
p += n
if (annot2.duration % 10000) != 0:
str_[p] = 46
p += 1
n = self.__snprint_ll_number_nonlocalized(str_, p, annot2.duration % 10000, 4, 0)
p += n
str_[p] = 20
p += 1
annot2.description.lstrip(" ")
annot2.description.rstrip(" ")
ba_tmp = bytearray(annot2.description, encoding="utf-8")
l = self.__strlen(ba_tmp)
if l > self.__EDFLIB_WRITE_MAX_ANNOTATION_LEN:
l = self.__EDFLIB_WRITE_MAX_ANNOTATION_LEN
str_[p : p + l] = ba_tmp[0 : l]
p += l
str_[p] = 20
p += 1
for p in range(p, self.__EDFLIB_ANNOTATION_BYTES):
str_[p] = 0
self.__file_out.write(str_)
j += 1
if j >= self.__nr_annot_chns:
j = 0
offset += datrecsize
datrecs += 1
if datrecs >= self.__datarecords:
break
return 0
# minimum is the minimum digits that will be printed (minus sign not included), leading zero's will be added if necessary
# if sign is zero, only negative numbers will have the sign '-' character
# if sign is one, the sign '+' or '-' character will always be printed
# returns the number of characters printed
def __sprint_int_number_nonlocalized(self, str_, q, minimum, sign):
flag = 0
z = 0
i = 0
j = 0
base = 1000000000
if minimum < 0:
minimum = 0
if minimum > 9:
flag = 1
if q < 0:
str_[j] = 45
j += 1
q = -q
else:
if sign != 0:
str_[j] = 43
j += 1
for i in range(10, 0, -1):
if minimum == i:
flag = 1
z = q // base
q = int(q % base)
if (z != 0) or (flag != 0):
str_[j] = 48 + z
j += 1
flag = 1
base //= 10
if flag == 0:
str_[j] = 48
j += 1
str_[j] = 0
return j
def __sprint_number_nonlocalized(self, dest, val):
flag = 0
z = 0
i = 0
j = 0
base = 1000000000
sz = len(dest)
if sz < 1:
return 0
q = int(val)
var = val - q
if val < 0.0:
dest[j] = 45
j += 1
if q < 0:
q = -q
if j == sz:
j -= 1
dest[j] = 0
return j
for i in range(10, 0, -1):
z = q // base
q = int(q % base)
if (z != 0) or (flag != 0):
dest[j] = 48 + z
j += 1
if j == sz:
j -= 1
dest[j] = 0
return j
flag = 1
base //= 10
if flag == 0:
dest[j] = 48
j += 1
if j == sz:
j -= 1
dest[j] = 0
return j
base = 100000000
var *= (base * 10)
q = int(var)
if q < 0:
q = -q
if q == 0:
dest[j] = 0
return j
dest[j] = 46
j += 1
if j == sz:
j -= 1
dest[j] = 0
return j
for i in range(9, 0, -1):
z = q // base
q = int(q % base)
dest[j] = 48 + z
j += 1
if j == sz:
j -= 1
dest[j] = 0
return j
base //= 10
dest[j] = 0
j -= 1
for j in range(j, 0, -1):
if dest[j] == 48:
dest[j] = 0
else:
j += 1
break
return j
# minimum is the minimum digits that will be printed (minus sign not included), leading zero's will be added if necessary
# if sign is zero, only negative numbers will have the sign '-' character
# if sign is one, the sign '+' or '-' character will always be printed
# returns the number of characters printed
def __snprint_ll_number_nonlocalized(self, dest, offset, q, minimum, sign):
flag = 0
z = 0
i = 0
j = offset
sz = 0
base = 1000000000000000000
sz = len(dest)
if (sz - offset) < 1:
return 0
if minimum < 0:
minimum = 0
if minimum > 18:
flag = 1
if q < 0:
dest[j] = 45
j += 1
q = -q
else:
if sign != 0:
dest[j] = 43
j += 1
if j == sz:
j -= 1
dest[j] = 0
return (j - offset)
for i in range(19, 0, -1):
if minimum == i:
flag = 1
z = q // base
q = int(q % base)
if (z != 0) or (flag != 0):
dest[j] = 48 + z
j += 1
if j == sz:
dest[j] = 0
j -= 1
return (j - offset)
flag = 1
base = base // 10
if flag == 0:
dest[j] = 48
j += 1
if j == sz:
dest[j] = 0
j -= 1
return (j - offset)
dest[j] = 0
return (j - offset)
# minimum is the minimum digits that will be printed (minus sign not included), leading zero's will be added if necessary
# if sign is zero, only negative numbers will have the sign '-' character
# if sign is one, the sign '+' or '-' character will always be printed
# returns the amount of characters printed
def __fprint_int_number_nonlocalized(self, f, q, minimum, sign):
flag = 0
z = 0
i = 0
j = 0
base = 1000000000
if minimum < 0:
minimum = 0
if minimum > 9:
flag = 1
if q < 0:
f.write(bytes("-", encoding="ascii"))
j += 1
q = -q
else:
if sign != 0:
f.write(bytes("+", encoding="ascii"))
j += 1
for i in range(10, 0, -1):
if minimum == i:
flag = 1
z = q // base
q = int(q % base)
if (z != 0) or (flag != 0):
f.write(bytes(chr(48 + z), encoding="ascii"))
j += 1
flag = 1
base = base // 10
if flag == 0:
f.write(bytes("0", encoding="ascii"))
j += 1
return j
# get string length
def __strlen(self, str_):
l = len(str_)
for i in range(0, l):
if str_[i] == 0:
return i
return (i + 1)
# copy a string
def __strcpy(self, dest, src):
sz = len(dest) - 1
srclen = self.__strlen(src)
if srclen > sz:
srclen = sz
if srclen < 0:
return 0
for i in range(0, srclen):
dest[i] = src[i]
dest[srclen] = 0
return srclen
# converts Latin-1 to ASCII
def __latin1_to_ascii(self, str_, l):
i = 0
value = 0
if l > len(str_):
l = len(str_)
conv_table = bytearray(" E ,F\".++^mS<E Z `\'\"\".--~ s>e zY < \'u > ?AAAAAAECEEEEIIIIDNOOOOOxOUUUUYIsaaaaaaeceeeeiiiidnooooo-0uuuuyty", encoding="ascii")
for i in range(0, l):
value = str_[i]
if (value > 31) and (value < 127):
continue
if value < 0:
value += 256
if value < 32:
str_[i] = 32
continue
str_[i] = conv_table[value - 127]
################################################################################
# END class EDFwriter
################################################################################
################################################################################
# START class EDFexception
################################################################################
class EDFexception(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message)
################################################################################
# END class EDFexception
################################################################################
| 24,396 | 9 | 313 |
420095f4362447fc720d1fd33b252646beb57b3f | 208 | py | Python | 001146StepikPyBegin/Stepik001146PyBeginсh07p03st13С09_01_my_20200421.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 001146StepikPyBegin/Stepik001146PyBeginсh07p03st13С09_01_my_20200421.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 001146StepikPyBegin/Stepik001146PyBeginсh07p03st13С09_01_my_20200421.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | n = int(input())
nums = []
nums2 = []
for i in range(n):
nums.append(int(input()))
print("nums.append", nums)
nums2 = sorted(nums)
print("sorted(nums)", nums2)
print(nums2[n - 2])
print(nums2[n - 1])
| 18.909091 | 30 | 0.605769 | n = int(input())
nums = []
nums2 = []
for i in range(n):
nums.append(int(input()))
print("nums.append", nums)
nums2 = sorted(nums)
print("sorted(nums)", nums2)
print(nums2[n - 2])
print(nums2[n - 1])
| 0 | 0 | 0 |
254ccfaca8edcb40b06fda26e201fd147b513ea3 | 4,127 | py | Python | src/rendering/SceneLib/Resize_background.py | whong92/3D_DL | 3c15bca3cc87c3197d38a785f6d1146911a82921 | [
"MIT"
] | 35 | 2019-03-04T00:06:20.000Z | 2022-02-04T22:34:17.000Z | src/rendering/SceneLib/Resize_background.py | 921kiyo/C530 | e64402575661a1e534cb8effe122d8fe8aed156e | [
"MIT"
] | 3 | 2021-03-12T13:12:26.000Z | 2022-01-17T08:47:49.000Z | src/rendering/SceneLib/Resize_background.py | 921kiyo/C530 | e64402575661a1e534cb8effe122d8fe8aed156e | [
"MIT"
] | 12 | 2019-09-26T08:35:18.000Z | 2021-12-09T05:39:59.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 17:01:13 2018
@author: Pavel
"""
"""
This function will take the whole of SUN database and flaten it into a single
folder while resizing and cropping all images into given square shape.
If the file is smaller than that, it will be ignored.
"""
import os
from PIL import Image
from resizeimage import resizeimage
#the below should point to the file containing the alphabet letter folders
SUN_images_dir = "E:/LabelMeToolbox/real_data/images"
# The below folder will contain the resized images
resized_address = "D:/old_files/aaaaa/Anglie/imperial/2017-2018/group_project/OcadoLobster/data/resized_background/new_test/"
indoor_address = "D:/old_files/aaaaa/Anglie/imperial/2017-2018/group_project/OcadoLobster/data/resized_background/indoor/"
outdoor_address = "D:/old_files/aaaaa/Anglie/imperial/2017-2018/group_project/OcadoLobster/data/resized_background/outdoor/"
def resize_and_crop(image_address, output_address, f_widht, f_height):
"""
Function for resizing and cropping of single image.
The image has to be bigger than the desired size.
If smaller in any dimension, the image will be discarded
Args:
image_address (string): Image to be resized
output_address (string): Final destination of the resized image
f_widht (int): Final desired widht in pixels
f_height (int): Final desired height in pixels
Returns:
Nothing
"""
with open(image_address, 'r+b') as f:
with Image.open(f) as image:
widht, height = image.size
if(widht >= f_widht and height >= f_height):
cover = resizeimage.resize_cover(image, [f_widht, f_height])
cover.save(output_address, image.format)
else:
print("Image too small to be resized")
def find_all_files(min_pixels, origin_folder, target_folder):
"""
Function that searches all subfolders of given folder.
This function assumes that all files in that folder are image files
If this is not the case errors will occur as no check is carried out.
For each file, it checks that both of its dimensions are bigger than
min_pixels. If so, it will rescale and crop the image to
min_pixels*min_pixels and save the file to the destination given
in the top of this file
There is a testing feature count, which allows only few subfolders
to be searched, so that this function can be tested
Args:
min_pixels (int): The final image will be square of this number of pixels
origin_folder (string): Path to a folder, which will be searched for
any images in it or any of its subdirectories
target_folder (string): path to folder to which the resized images
should be saved to. This folder will have flat structure.
Returns:
root (string): Returns the root address of the original folder
"""
#count = 0
for root, dirs, files in os.walk(origin_folder):
vis_files = [f for f in files if not f[0] == '.']
copy = True
"""
copy = False
if(root.endswith("indoor")):
print("I am indoor")
target_folder = indoor_address
copy = True
if(root.endswith("outdoor")):
print("I am outdoor")
target_folder = outdoor_address
copy = True
"""
if(len(vis_files)>0 and copy):
for image_name in vis_files:
#print(root, dirs, image_name)
with Image.open(root+"/"+ image_name) as tested_image:
width, height = tested_image.size
if(width>=min_pixels and height>= min_pixels):
cover = resizeimage.resize_cover(tested_image, [min_pixels, min_pixels])
cover.convert('RGB').save(target_folder+image_name, 'JPEG')
return root
if __name__ == "__main__":
roots= find_all_files(300,SUN_images_dir, resized_address) | 39.682692 | 125 | 0.650594 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 17:01:13 2018
@author: Pavel
"""
"""
This function will take the whole of SUN database and flaten it into a single
folder while resizing and cropping all images into given square shape.
If the file is smaller than that, it will be ignored.
"""
import os
from PIL import Image
from resizeimage import resizeimage
#the below should point to the file containing the alphabet letter folders
SUN_images_dir = "E:/LabelMeToolbox/real_data/images"
# The below folder will contain the resized images
resized_address = "D:/old_files/aaaaa/Anglie/imperial/2017-2018/group_project/OcadoLobster/data/resized_background/new_test/"
indoor_address = "D:/old_files/aaaaa/Anglie/imperial/2017-2018/group_project/OcadoLobster/data/resized_background/indoor/"
outdoor_address = "D:/old_files/aaaaa/Anglie/imperial/2017-2018/group_project/OcadoLobster/data/resized_background/outdoor/"
def resize_and_crop(image_address, output_address, f_widht, f_height):
"""
Function for resizing and cropping of single image.
The image has to be bigger than the desired size.
If smaller in any dimension, the image will be discarded
Args:
image_address (string): Image to be resized
output_address (string): Final destination of the resized image
f_widht (int): Final desired widht in pixels
f_height (int): Final desired height in pixels
Returns:
Nothing
"""
with open(image_address, 'r+b') as f:
with Image.open(f) as image:
widht, height = image.size
if(widht >= f_widht and height >= f_height):
cover = resizeimage.resize_cover(image, [f_widht, f_height])
cover.save(output_address, image.format)
else:
print("Image too small to be resized")
def find_all_files(min_pixels, origin_folder, target_folder):
"""
Function that searches all subfolders of given folder.
This function assumes that all files in that folder are image files
If this is not the case errors will occur as no check is carried out.
For each file, it checks that both of its dimensions are bigger than
min_pixels. If so, it will rescale and crop the image to
min_pixels*min_pixels and save the file to the destination given
in the top of this file
There is a testing feature count, which allows only few subfolders
to be searched, so that this function can be tested
Args:
min_pixels (int): The final image will be square of this number of pixels
origin_folder (string): Path to a folder, which will be searched for
any images in it or any of its subdirectories
target_folder (string): path to folder to which the resized images
should be saved to. This folder will have flat structure.
Returns:
root (string): Returns the root address of the original folder
"""
#count = 0
for root, dirs, files in os.walk(origin_folder):
vis_files = [f for f in files if not f[0] == '.']
copy = True
"""
copy = False
if(root.endswith("indoor")):
print("I am indoor")
target_folder = indoor_address
copy = True
if(root.endswith("outdoor")):
print("I am outdoor")
target_folder = outdoor_address
copy = True
"""
if(len(vis_files)>0 and copy):
for image_name in vis_files:
#print(root, dirs, image_name)
with Image.open(root+"/"+ image_name) as tested_image:
width, height = tested_image.size
if(width>=min_pixels and height>= min_pixels):
cover = resizeimage.resize_cover(tested_image, [min_pixels, min_pixels])
cover.convert('RGB').save(target_folder+image_name, 'JPEG')
return root
if __name__ == "__main__":
roots= find_all_files(300,SUN_images_dir, resized_address) | 0 | 0 | 0 |
72a88471bb9c6af600de78e7cd1a66ac516acbbc | 3,133 | py | Python | UNetDataset.py | maxgraf96/DLAM_Assignment | cbd2ce1fbc39c187ff2b1a4259a36559dd50e772 | [
"MIT"
] | 2 | 2020-09-19T08:17:46.000Z | 2021-07-16T08:25:57.000Z | UNetDataset.py | maxgraf96/DLAM_Assignment | cbd2ce1fbc39c187ff2b1a4259a36559dd50e772 | [
"MIT"
] | null | null | null | UNetDataset.py | maxgraf96/DLAM_Assignment | cbd2ce1fbc39c187ff2b1a4259a36559dd50e772 | [
"MIT"
] | null | null | null | from pathlib import Path
import numpy as np
import torch
from torch.utils.data import Dataset
from Hyperparameters import sep, unet_width
from Util import get_spectrogram
class UNetDataset(Dataset):
"""
Dataset for accessing data opints of the autoencoder output.
"""
def __init__(self, root_dir, gt_dir, transform=None):
"""
Initialise the dataset.
:param root_dir: The path to the data points
:param gt_dir: The path to the ground truth versions of the data points
:param transform: Transformation to apply to the data points
"""
self.root_dir = root_dir
self.transform = transform
# The input for this dataset is the output from the autoencoder
input_mel_npys = Path(root_dir).rglob("*_output.npy")
# The U-Net is trained to minimise the error between the autoencoder output
# and the clean ("ground truth") versions of the synthesised files
gt_mel_npys = Path(gt_dir).rglob("*_synth_mel.npy")
self.input_mel_filenames = [str(npy) for npy in input_mel_npys]
self.gt_mel_filenames = [str(npy) for npy in gt_mel_npys]
# Create mappings between input and ground truth names (so that the order is correct)
self.input_to_gt = {}
len_suffix = len("_output.npy")
for input_path in self.input_mel_filenames:
input_filename = input_path.split(sep)[-1][:-len_suffix]
for gt_path in self.gt_mel_filenames:
if input_filename in gt_path:
self.input_to_gt[input_path] = gt_path
self.length = len(self.input_mel_filenames)
class ToTensor(object):
"""
Transformation used to convert ndarrays in sample to PyTorch tensors.
"""
| 36.858824 | 111 | 0.659751 | from pathlib import Path
import numpy as np
import torch
from torch.utils.data import Dataset
from Hyperparameters import sep, unet_width
from Util import get_spectrogram
class UNetDataset(Dataset):
"""
Dataset for accessing data opints of the autoencoder output.
"""
def __init__(self, root_dir, gt_dir, transform=None):
"""
Initialise the dataset.
:param root_dir: The path to the data points
:param gt_dir: The path to the ground truth versions of the data points
:param transform: Transformation to apply to the data points
"""
self.root_dir = root_dir
self.transform = transform
# The input for this dataset is the output from the autoencoder
input_mel_npys = Path(root_dir).rglob("*_output.npy")
# The U-Net is trained to minimise the error between the autoencoder output
# and the clean ("ground truth") versions of the synthesised files
gt_mel_npys = Path(gt_dir).rglob("*_synth_mel.npy")
self.input_mel_filenames = [str(npy) for npy in input_mel_npys]
self.gt_mel_filenames = [str(npy) for npy in gt_mel_npys]
# Create mappings between input and ground truth names (so that the order is correct)
self.input_to_gt = {}
len_suffix = len("_output.npy")
for input_path in self.input_mel_filenames:
input_filename = input_path.split(sep)[-1][:-len_suffix]
for gt_path in self.gt_mel_filenames:
if input_filename in gt_path:
self.input_to_gt[input_path] = gt_path
self.length = len(self.input_mel_filenames)
def __len__(self):
return self.length
def __getitem__(self, idx):
# This is included for completeness. Future versions could handle the retrieval of multiple data points
# simultaneously
if torch.is_tensor(idx):
idx = idx.tolist()
# Get spectrogram
# Convert idx to filename
input_mel_path = self.input_mel_filenames[idx]
gt_mel_path = self.input_to_gt[self.input_mel_filenames[idx]]
# Get the spectrograms and trim to the length required by the U-Net
input_mel = get_spectrogram(input_mel_path)[:, :unet_width]
gt_mel = get_spectrogram(gt_mel_path)[:, :unet_width]
# Add extra channel dimension for pytorch
input_mel = np.expand_dims(input_mel, axis=0)
gt_mel = np.expand_dims(gt_mel, axis=0)
sample = {'input_mel': input_mel, 'gt_mel': gt_mel, 'filename': idx}
if self.transform:
sample = self.transform(sample)
return sample
class ToTensor(object):
"""
Transformation used to convert ndarrays in sample to PyTorch tensors.
"""
def __call__(self, sample):
# Get ndarrays
input_mel = sample['input_mel']
gt_mel = sample['gt_mel']
# Convert to float tensors
input_mel = torch.from_numpy(input_mel).float()
gt_mel = torch.from_numpy(gt_mel).float()
return {'input_mel': input_mel, 'gt_mel': gt_mel, 'filename': sample['filename']}
| 1,283 | 0 | 80 |
f00f66813b04f934b6b0813a40351b94837d4d46 | 10,764 | py | Python | src/ipycbm/plugins/foi/foi_help.py | VP-GEO/cbm | 4ed229f6b6455435b6d032deb8a39dba4ecee7a2 | [
"BSD-3-Clause"
] | null | null | null | src/ipycbm/plugins/foi/foi_help.py | VP-GEO/cbm | 4ed229f6b6455435b6d032deb8a39dba4ecee7a2 | [
"BSD-3-Clause"
] | null | null | null | src/ipycbm/plugins/foi/foi_help.py | VP-GEO/cbm | 4ed229f6b6455435b6d032deb8a39dba4ecee7a2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Gilbert Voican, Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import (HTML, HBox, VBox, Checkbox, Layout, widgets)
| 66.444444 | 551 | 0.764028 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Gilbert Voican, Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import (HTML, HBox, VBox, Checkbox, Layout, widgets)
def widget_box():
wbox = VBox(children=[ipycbm_help(), about()])
return wbox
def ipycbm_help():
html = """
<H2>'Get' and 'View' functions.</H2>
With the 'get' function you can download data from the server to your local jupyter environment.<br>
The 'view' function is to load local files and display them with different methods, or provide example code for each selected dataset.<br>
<H4>Available options:</H4>
<b>Get data example:</b><br>
<code>import src.ipycbm</code><br>
<code>ipycbm.get()</code>
<br>
<b>View data example:</b><br>
<code>import src.ipycbm</code><br>
<code>ipycbm.view()</code><br>
<br>
'**tmp**' folder structure example for parcel with ID 12345:<br>
<code>tmp/
cat2019/parcel_12345/12345_information.json
cat2019/parcel_12345/12345_time_series.csv
cat2019/parcel_12345/12345_chipimages/12345_images_list.csv
cat2019/parcel_12345/12345_chipimages/S2A_MSIL2A_2019---.B04.tif
cat2019/parcel_12345/12345_chipimages/...</code>
"""
wbox = widgets.HTML(
value=html,
placeholder="Documantation",
description="")
return wbox
def about():
from src import __version__
html = f"""
<H1>About</H1>
<H3>JRC D5 Food security - GTCAP</H3>
<H4>DIAS for CAP Checks by Monitoring, development platforms and services.</H4>
Authors:<br>
Guido Lemoine<br>
Konstantinos Anastasakis<br>
<br>
Copyright 2021, Joint Research Centre (JRC) European Commission<br>
License: 3-Clause BSD , Version: {__version__}
"""
wbox = HTML(
value=html,
placeholder='About',
description='',
)
return wbox
def widget_box_foi():
wbox = VBox(children=[ipycbm_help_foi(), about()])
return wbox
def ipycbm_help_foi():
html = """
<H2>FOI Assessment: Heterogeneity and Cardinality</H2>
The FOI assessment notebook is based on the principle that inside of a homogenous FOI there should be only one type of pixels. In the same idea, a FOI which respects the 1-1 cardinalityshould not include clusters of pixels larger than a specified threshold (we can consider dispersed pixels different than the main class as “noise”).<br>
The FOI Assessment performs a spatial analysis on a "thematic raster" produced in advance. The thematic raster can be the result of any image/raster processing method yielding a class label for each pixel - crop classification, behavior analysis of land phenomenon, gridded data on soil, slope, humidity, etc.<br>
As an example, if the thematic raster is the result of a crop classification, a homogeneous FOI should have only one type of pixels that represent the respective crop, a cardinal FOI should not include any cluster of pixels from other class larger than a specified threshold.
If the thematic raster is the result of a behavior analysis, all the pixels inside an FOI should behave in the same way during a period of time.<br>
For both heterogeneity and cardinality, the notebook provides two methods for the analysis: one based area calculation (version 1) and one based on cluster size calculation (version 2). Both methods have similar results.
<br>
<H2>Version 1</H2>
The first version requires the connection to a database server (PostgreSQL with PostGIS extension)<br>
For the heterogeneity analysis the following steps are required (the steps correspond to the numbering on the interface):<br>
1. Connect to the database (at the moment only „Database connection settings” are required)<br>
a) Upload the reference data shapefile to the server. It is provided a graphical interface for upload.<br>
b) Import uploaded shapefile to the database, specifying the name for the table that will be created in the database.<br>
2. Upload the raster „thematic” image. A graphical interface is provided. The accepted files are tif or tiff files. The thematic raster should be a one band raster file, with the pixel values representing the classes (like crop type or type of behaviour)<br>
3. Prepare FOI procedure – Allows the user to create the database functions on the database server. This procedure creates the necessary function and stored procedures on the database server.<br>
4. Select the required files for analysis:<br>
a) Vector file: the data on which the analysis will be applied. In case that we have more shapefiles uploaded on the server, this functionality allows us to select the one that we want to analyze.<br>
b) Thematic raster: the thematic raster provided. In case that we have more rasters uploaded on the server, this functionality allows us to select the one that we want to use on the analysis.<br>
c) YAML file that holds the classes form the thematic raster file: this file specifies the classes of pixels from the thematic raster and can also provide the meaning of those classes. It should have the following structure:<br>
<code>example.yml</code><br>
<code>category_map:
0: Unclasified
1: Class1
2: Class2
3: Class3
4: Class4
5: Class5
6: Class6
7: Class7
8: Class8
9: Class9
10: Class10</code><br>
Class1, Class2 can be replaced by the meaning of the class (like Wheat, Maize, etc. or by behavior name or any other ….).<br>
The YAML file should include all the classes that exist in the thematic raster. It is provided a graphical interface for upload.<br>
5. Analysis parameters:<br>
Heterogeneity thresholds: in order to exclude the influence of „noise” pixels, the user can specify the heterogeneity thresholds (for example only the FOIs where one class of pixels have a percentage between 30 and 70 is considered heterogeneous).<br>
Minimum area for clusters selection: the user can specify the minimum area of the cluster that are considered a cardinality issue, in square meters. Of example the clusters smaller than 2000 square meters can be considered as not influencing the FOI cardinality.<br>
6. Run FOI procedure.<br>
Starts the FOI analysis. The result of the analysis is represented by three shapefiles that are stored on the “output_data” folder (/cbm/tmp/foi/output_data).<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foih_v1.shp</b> – represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_h – heterogeneity flag (0 for homogeneous FOIs and 1 for heterogeneous FOIs)<br>
• number of pixels for each class (the name of the attribute is the name of the class)<br>
• total number of pixel for the respective FOI<br>
• percentage of pixels from each class (number of pixels for each class / total number of pixels inside the FOI)<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foic_v1.shp</b> - represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_c – cardinality flag (0 for FOIs respecting the 1-1 cardinality and 1 for FOIs not respecting the 1-1 cardinality). As a result of this analysis, the FOIs that include more than one cluster of pixel from different classes bigger than the threshold are considered non-cardinal. For example and FOI that includes two clusters of pixels from different classes (one arable land and non-agricultural area), each of the clusters bigger than the threshold (ex. 2000 square meters), is considered as not respecting the 1-1 cardinality.<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foic_clusters_v1.shp</b> – represents only the clusters of pixels that are setting the FOI cardinality (for example if an FOI includes three clusters of pixels bigger that the threshold, only those clusters will be saved in this shapefile)<br>
<H2>Version 2</H2>
The second version does not require a database server. All the calculations are made at pixel level using Python function.<br>
The interface and the steps are similar to the ones from the Version 1. The main difference is that it does not include the functionality for database connection and creating the functions on the database server.<br>
The different options available:<br>
Connectivity type: 8 or 4 connected pixels (4 indicating that diagonal pixels are not considered directly adjacent for polygon membership purposes or 8 indicating they are)<br>
Negative buffer: user can apply a negative buffer on the FOI in order to reduce the influence of boundary influence on the analysis (roads, adjacent FOIs, etc.)<br>
Cluster size (in pixels): the minimum number of pixels for which a cluster is taken into account.<br>
The result of the analysis is represented by two shapefiles that are stored on the “output_data” folder (/cbm/tmp/foi/output_data).<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foih_v2.shp</b> – represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_h – heterogeneity flag (0 for homogeneous FOIs and 1 for heterogeneous FOIs)<br>
• number of pixels for each class (the name of the attribute is the name of the class)<br>
• total number of pixel for the respective FOI<br>
• percentage of pixels from each class (number of pixels for each class / total number of pixels inside the FOI)<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foic_v2.shp</b> - represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_c – cardinality flag (0 for FOIs respecting the 1-1 cardinality and 1 for FOIs not respecting the 1-1 cardinality). As a result of this analysis, the FOIs that include more than one cluster of pixels from different classes bigger than the threshold are considered not respecting the 1-1 cardinality. For example and FOI that includes two clusters of pixels from different classes (one arable land and non-agricultural area), each of the clusters bigger than the threshold (ex. 20 pixels), is considered as not respecting the 1-1 cardinality.<br>
• Clusters – the information about the clusters of pixels identified inside the FOI, as pair of pixel class and cluster size: for example (3, 25), (5, 120) means that inside the FOI we have identified two clusters: one of pixels from class 3 and the cluster size is 25 pixels and another one with pixels of class 5 and cluster size 120 pixels.<br>
Author:<br>
Gilbert Voican
"""
wbox = widgets.HTML(
value=html,
placeholder="Documentation",
description="")
return wbox
| 10,363 | 0 | 115 |
3ea000e536560e123345143a404fffc37a3b77ef | 242 | gyp | Python | gyp/catch.cc.gyp | anvaka/Catch | 7fa9ecd62bd1ecf2f479e6c0b1416e400112c51b | [
"BSL-1.0"
] | null | null | null | gyp/catch.cc.gyp | anvaka/Catch | 7fa9ecd62bd1ecf2f479e6c0b1416e400112c51b | [
"BSL-1.0"
] | null | null | null | gyp/catch.cc.gyp | anvaka/Catch | 7fa9ecd62bd1ecf2f479e6c0b1416e400112c51b | [
"BSL-1.0"
] | 1 | 2021-07-21T17:06:42.000Z | 2021-07-21T17:06:42.000Z | {
"targets": [{
"target_name": "catch.cc",
"type": "none",
"direct_dependent_settings": {
"include_dirs": [
"../single_include"
],
},
"sources": [
"../include/catch_with_main.hpp"
],
}]
}
| 16.133333 | 38 | 0.479339 | {
"targets": [{
"target_name": "catch.cc",
"type": "none",
"direct_dependent_settings": {
"include_dirs": [
"../single_include"
],
},
"sources": [
"../include/catch_with_main.hpp"
],
}]
}
| 0 | 0 | 0 |
933a0aecc9f59faff16390b28bbca8a395484165 | 28,142 | py | Python | hockeyGamePrediction/model.py | PhysicsUofRAUI/HockeyPredictionModels | 64c4dd8e456ec248751d499dd1b3bc5d3b57ef16 | [
"MIT"
] | null | null | null | hockeyGamePrediction/model.py | PhysicsUofRAUI/HockeyPredictionModels | 64c4dd8e456ec248751d499dd1b3bc5d3b57ef16 | [
"MIT"
] | null | null | null | hockeyGamePrediction/model.py | PhysicsUofRAUI/HockeyPredictionModels | 64c4dd8e456ec248751d499dd1b3bc5d3b57ef16 | [
"MIT"
] | null | null | null | import numpy
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from keras.preprocessing.text import Tokenizer
from keras.optimizers import SGD
opt = SGD(lr=100)
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
my_data = pd.read_csv("NoBlanksAndScoreAsDummy.csv")
score = my_data["score"]
my_data = my_data.drop("score", axis=1)
my_dummies = pd.get_dummies(my_data, prefix=['T1P1', 'T1P2', 'T1P3', 'T1P4', 'T1P5', 'T1P6', 'T1P7', 'T1P8', 'T1P9', 'T1P10', 'T1P11', 'T1P12', 'T1P13', 'T1P14', 'T1P15', 'T1P16', 'T1P17', 'T1P18', 'T1G', 'T2P1', 'T2P2', 'T2P3', 'T2P4', 'T2P5', 'T2P6', 'T2P7', 'T2P8', 'T2P9', 'T2P10', 'T2P11', 'T2P12', 'T2P13', 'T2P14', 'T2P15', 'T2P16', 'T2P17', 'T2P18', 'T2G'])
my_dummies["result"] = score
print(my_dummies)
dataset = my_dummies.values
X = dataset[:,0:12993]
Y = dataset[:,12993]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
print("Number,results,epochs,batch_size,number of layers")
# baseline model
# evaluate model with standardized dataset
estimator = KerasClassifier(build_fn=create_baseline, epochs=500, batch_size=250, verbose=0)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
print("1, %.2f%% , (%.2f%%) ,500,25,3" % (results.mean()*100, results.std()*100))
# # second model
# def create_baseline_one():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_one, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,1" % (results.mean()*100, results.std()*100))
#
#
#
# # third model
# def create_baseline_two():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_two, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,2" % (results.mean()*100, results.std()*100))
#
#
# # fourth model
# def create_baseline_three():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_three, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,4" % (results.mean()*100, results.std()*100))
#
#
#
#
# # fifth model
# def create_baseline_four():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_four, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,5" % (results.mean()*100, results.std()*100))
#
#
#
#
# # sixth model
# def create_baseline_five():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_five, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,6" % (results.mean()*100, results.std()*100))
#
#
#
#
#
# # seventh model
# def create_baseline_six():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_six, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,8" % (results.mean()*100, results.std()*100))
#
#
#
# # eighth model
# def create_baseline_seven():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_seven, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,10" % (results.mean()*100, results.std()*100))
#
#
#
# #
# #
# #
# #
# #
# #
# #
# # These next models are the same but they have more 1000 epochs
# #
# #
# #
# #
# #
# #
# #
# #
# #
#
# def create_baseline():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,3" % (results.mean()*100, results.std()*100))
#
#
#
# # second model
# def create_baseline_one():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_one, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,1" % (results.mean()*100, results.std()*100))
#
#
#
# # third model
# def create_baseline_two():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_two, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,2" % (results.mean()*100, results.std()*100))
#
#
# # fourth model
# def create_baseline_three():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_three, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,4" % (results.mean()*100, results.std()*100))
#
#
#
#
# # fifth model
# def create_baseline_four():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_four, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,5" % (results.mean()*100, results.std()*100))
#
#
#
#
# # sixth model
# def create_baseline_five():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_five, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,6" % (results.mean()*100, results.std()*100))
#
#
#
#
#
# # seventh model
# def create_baseline_six():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_six, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,8" % (results.mean()*100, results.std()*100))
#
#
#
# # eighth model
# def create_baseline_seven():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_seven, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,10" % (results.mean()*100, results.std()*100))
#
#
# #
# #
# #
# #
# #
# # these next ones have 2000 epochs
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
#
# def create_baseline():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,3" % (results.mean()*100, results.std()*100))
#
#
#
# # second model
# def create_baseline_one():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_one, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,1" % (results.mean()*100, results.std()*100))
#
#
#
# # third model
# def create_baseline_two():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_two, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,2" % (results.mean()*100, results.std()*100))
#
#
# # fourth model
# def create_baseline_three():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_three, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,4" % (results.mean()*100, results.std()*100))
#
#
#
#
# # fifth model
# def create_baseline_four():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_four, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,5" % (results.mean()*100, results.std()*100))
#
#
#
#
# # sixth model
# def create_baseline_five():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_five, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,6" % (results.mean()*100, results.std()*100))
#
#
#
#
#
# # seventh model
# def create_baseline_six():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_six, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,8" % (results.mean()*100, results.std()*100))
#
#
#
# # eighth model
# def create_baseline_seven():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_seven, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,10" % (results.mean()*100, results.std()*100))
| 44.669841 | 365 | 0.725215 | import numpy
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from keras.preprocessing.text import Tokenizer
from keras.optimizers import SGD
opt = SGD(lr=100)
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
my_data = pd.read_csv("NoBlanksAndScoreAsDummy.csv")
score = my_data["score"]
my_data = my_data.drop("score", axis=1)
my_dummies = pd.get_dummies(my_data, prefix=['T1P1', 'T1P2', 'T1P3', 'T1P4', 'T1P5', 'T1P6', 'T1P7', 'T1P8', 'T1P9', 'T1P10', 'T1P11', 'T1P12', 'T1P13', 'T1P14', 'T1P15', 'T1P16', 'T1P17', 'T1P18', 'T1G', 'T2P1', 'T2P2', 'T2P3', 'T2P4', 'T2P5', 'T2P6', 'T2P7', 'T2P8', 'T2P9', 'T2P10', 'T2P11', 'T2P12', 'T2P13', 'T2P14', 'T2P15', 'T2P16', 'T2P17', 'T2P18', 'T2G'])
my_dummies["result"] = score
print(my_dummies)
dataset = my_dummies.values
X = dataset[:,0:12993]
Y = dataset[:,12993]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
print("Number,results,epochs,batch_size,number of layers")
# baseline model
def create_baseline():
# create model
model = Sequential()
model.add(Dense(12993, input_dim=12993, kernel_initializer='normal', activation='relu'))
model.add(Dense(2500, kernel_initializer='normal', activation='sigmoid'))
model.add(Dense(2500, kernel_initializer='normal', activation='sigmoid'))
model.add(Dense(2500, kernel_initializer='normal', activation='sigmoid'))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
return model
# evaluate model with standardized dataset
estimator = KerasClassifier(build_fn=create_baseline, epochs=500, batch_size=250, verbose=0)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
print("1, %.2f%% , (%.2f%%) ,500,25,3" % (results.mean()*100, results.std()*100))
# # second model
# def create_baseline_one():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_one, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,1" % (results.mean()*100, results.std()*100))
#
#
#
# # third model
# def create_baseline_two():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_two, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,2" % (results.mean()*100, results.std()*100))
#
#
# # fourth model
# def create_baseline_three():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_three, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,4" % (results.mean()*100, results.std()*100))
#
#
#
#
# # fifth model
# def create_baseline_four():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_four, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,5" % (results.mean()*100, results.std()*100))
#
#
#
#
# # sixth model
# def create_baseline_five():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_five, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,6" % (results.mean()*100, results.std()*100))
#
#
#
#
#
# # seventh model
# def create_baseline_six():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_six, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,8" % (results.mean()*100, results.std()*100))
#
#
#
# # eighth model
# def create_baseline_seven():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_seven, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,10" % (results.mean()*100, results.std()*100))
#
#
#
# #
# #
# #
# #
# #
# #
# #
# # These next models are the same but they have more 1000 epochs
# #
# #
# #
# #
# #
# #
# #
# #
# #
#
# def create_baseline():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,3" % (results.mean()*100, results.std()*100))
#
#
#
# # second model
# def create_baseline_one():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_one, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,1" % (results.mean()*100, results.std()*100))
#
#
#
# # third model
# def create_baseline_two():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_two, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,2" % (results.mean()*100, results.std()*100))
#
#
# # fourth model
# def create_baseline_three():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_three, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,4" % (results.mean()*100, results.std()*100))
#
#
#
#
# # fifth model
# def create_baseline_four():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_four, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,5" % (results.mean()*100, results.std()*100))
#
#
#
#
# # sixth model
# def create_baseline_five():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_five, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,6" % (results.mean()*100, results.std()*100))
#
#
#
#
#
# # seventh model
# def create_baseline_six():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_six, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,8" % (results.mean()*100, results.std()*100))
#
#
#
# # eighth model
# def create_baseline_seven():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_seven, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,10" % (results.mean()*100, results.std()*100))
#
#
# #
# #
# #
# #
# #
# # these next ones have 2000 epochs
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
#
# def create_baseline():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,3" % (results.mean()*100, results.std()*100))
#
#
#
# # second model
# def create_baseline_one():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_one, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,1" % (results.mean()*100, results.std()*100))
#
#
#
# # third model
# def create_baseline_two():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_two, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,2" % (results.mean()*100, results.std()*100))
#
#
# # fourth model
# def create_baseline_three():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_three, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,4" % (results.mean()*100, results.std()*100))
#
#
#
#
# # fifth model
# def create_baseline_four():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_four, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,5" % (results.mean()*100, results.std()*100))
#
#
#
#
# # sixth model
# def create_baseline_five():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_five, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,6" % (results.mean()*100, results.std()*100))
#
#
#
#
#
# # seventh model
# def create_baseline_six():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_six, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,8" % (results.mean()*100, results.std()*100))
#
#
#
# # eighth model
# def create_baseline_seven():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_seven, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,10" % (results.mean()*100, results.std()*100))
| 543 | 0 | 22 |
542b5806c4a13f0aaa38eda82badbe8c249801d5 | 121 | py | Python | encoding/__init__.py | yohan-pg/stylegan2-ada-pytorch | e1225b08d55ff5ca38e1646fa430d3c3c3bb3c68 | [
"BSD-Source-Code"
] | null | null | null | encoding/__init__.py | yohan-pg/stylegan2-ada-pytorch | e1225b08d55ff5ca38e1646fa430d3c3c3bb3c68 | [
"BSD-Source-Code"
] | null | null | null | encoding/__init__.py | yohan-pg/stylegan2-ada-pytorch | e1225b08d55ff5ca38e1646fa430d3c3c3bb3c68 | [
"BSD-Source-Code"
] | null | null | null | from .stylegan_encoder_network import *
from .encoder import *
from .encoding_dataset import *
from .tensorboard import * | 30.25 | 39 | 0.809917 | from .stylegan_encoder_network import *
from .encoder import *
from .encoding_dataset import *
from .tensorboard import * | 0 | 0 | 0 |
5c6fd27905a5bef9aa55be20386c404c8c3495b4 | 8,224 | py | Python | s1acker/s1acker.py | quinoa42/s1acker | 79e520fb3a1d4bd4cc573c45e799d75637ccc00c | [
"MIT"
] | null | null | null | s1acker/s1acker.py | quinoa42/s1acker | 79e520fb3a1d4bd4cc573c45e799d75637ccc00c | [
"MIT"
] | null | null | null | s1acker/s1acker.py | quinoa42/s1acker | 79e520fb3a1d4bd4cc573c45e799d75637ccc00c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
s1acker.s1acker
~~~~~~~~~~~~~~
This module provides functions that deal with s1 search interface.
:copyright: (c) 2017 by quinoa42.
:license: MIT, see LICENSE for more details.
"""
import logging
import os.path as op
import re
import time
from itertools import chain
from os import makedirs
import requests
from bs4 import BeautifulSoup
flaten = chain.from_iterable
_SEARCH_URL = "http://bbs.saraba1st.com/2b/search.php?mod=forum"
_SEARCH_ADV_URL = "http://bbs.saraba1st.com/2b/search.php?mod=forum&adv=yes"
_TOPIC_URL = "http://bbs.saraba1st.com/2b/thread-{0}-1-1.html"
_HOST = "bbs.saraba1st.com"
_USER_AGENT = (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) "
"Gecko/20100101 Firefox/54.0"
)
_TIME_OUT = 10
_SLEEP_TIME = 1
logger = logging.getLogger(__name__)
def _wait():
"""wait for _SLEEP_TIME seconds.
"""
logger.info("wait for %s seconds.", _SLEEP_TIME)
time.sleep(_SLEEP_TIME)
class S1ack(object):
"""S1ack defines a class that implement search functions."""
def __init__(self, srchtxt, srchuname=None):
"""construct a s1ack object with given srchtxt and optional srchuname.
:srchtxt: str that you want to search
:srchuname: optional str that limit search topics to posts by this user
"""
self._session = requests.Session()
self._session.headers.update({
'User-Agent': _USER_AGENT,
'Host': _HOST
})
r = self._session.get(
_SEARCH_ADV_URL,
timeout=_TIME_OUT,
headers={'Referer': _SEARCH_URL}
)
soup = BeautifulSoup(r.text, 'lxml')
formhash = soup.find("input", attrs={"name": "formhash"})['value']
self._input_data = {
"ascdesc": "desc",
"before": "",
"formhash": formhash,
"orderby": "lastpost",
"searchsubmit": "yes",
"srchfid[]": "all",
"srchfilter": "all",
"srchfrom": 0,
"srchtxt": srchtxt,
"srchuname": srchuname if srchuname is not None else ""
}
logger.debug("input data: %r", self._input_data)
def search(self):
"""Return the search result.
:returns: list of Img object
"""
search_result = self._get_search_urls()
result = list(
set(
flaten(
map(
self._get_imgs,
flaten(
map(
self._get_pages,
flaten(
map(self._get_first_page, search_result)
)
)
)
)
)
)
)
logger.debug("final result: %r", result)
logger.info("find %d pictures", len(result))
return result
def _get_search_urls(self):
"""Return the urls of the pages of searching result
:returns: a list of str, where one str represent a url of one page
"""
logger.info("trying to search")
r = self._session.post(
_SEARCH_URL,
timeout=_TIME_OUT,
headers={"Referer": _SEARCH_ADV_URL},
data=self._input_data
)
r.raise_for_status()
result = BeautifulSoup(r.text, 'lxml').find("div", class_="pg")
num = int(result.find_all("a")[-2].string) if result else 1
url = re.sub("&kw=.+$", "", r.url, 1)
urls = [r.url] + [url + "&page=" + str(i) for i in range(2, num + 1)]
logger.debug("search result: %r", urls)
return urls
def _get_first_page(self, url):
"""Return the first pages of the topics in the given search result page
:url: str, the url of search result page
:returns: list of str, represent the list of urls of topics
"""
_wait()
logger.info("trying to get the topics in %s", url)
r = self._session.get(url, timeout=_TIME_OUT)
r.raise_for_status()
s = BeautifulSoup(r.text, "lxml")
topics = [
re.findall("tid=([0-9]{1,7})", topic.a['href'])[0]
for topic in s.find_all("h3", class_="xs3")
]
urls = [_TOPIC_URL.format(topic) for topic in topics]
logger.debug("topics in %s: %r", url, urls)
return urls
def _get_pages(self, url):
"""Return the urls of all pages of a topic.
:url: str, represent the url of a topic
:returns: list of str, represent list of urls of the pages
"""
_wait()
logger.info("trying to get the pages of %s", url)
r = self._session.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, 'lxml')
multipage = soup.find('div', class_="pg")
num = int(multipage.find_all("a")[-2].string) if multipage else 1
urls = [
re.sub("[0-9]{1,3}-1.html", str(page) + "-1.html", url)
for page in range(1, num + 1)
]
logger.debug("all pages of %s: %r", url, urls)
return urls
def _get_imgs(self, url):
"""Get list of imgs from the url.
:url: str, represent the url wish to explore
:returns: a list of Img object, represent the search result
"""
_wait()
logger.info("trying to get imgs on the page %s", url)
r = self._session.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, 'lxml')
imgs = [
url
for url in [
img.attrs.get('file') or img.attrs.get('src') for post in
soup.find_all("td", id=re.compile("postmessage_[0-9]{1,8}"))
for img in post.find_all("img")
]
if not re.match("http://static.saraba1st.com/image/smiley/", url)
and re.search("\.(png|jpg)$", url)
and not re.search("\.photobucket\.", url)
]
result = [
Img(img, str(index), url) for index, img in enumerate(set(imgs))
]
logger.debug("Imgs in %s: %r", url, result)
return result
class Img(object):
"""Img defines an object that can be downloaded."""
def __init__(self, url, name, origin=""):
"""construct an Img object with url, name, and optional origin
:url: str represent the url of the Img
:name: the name given to this Img when downloading
:origin: str represent the origin of the Img,i.e. the url of the topic
"""
self._url = url
self._origin = origin
self._topic = re.findall("thread-([0-9]{1,9})",
origin)[0] if origin else ""
self._name = name
self._fmt = re.findall("(\.jpg|\.png)$", url)[0]
def download(self, dest):
"""download this Img to the dest directory.
:returns: None
"""
_wait()
logger.info("trying to get img at %s", self._url)
try:
img = requests.get(
self._url,
headers={"User-Agent": _USER_AGENT,
"Referer": self._origin},
timeout=_TIME_OUT
)
img.raise_for_status()
except Exception as e:
logger.error("Failed when trying to get %s : %s", self._url, e)
else:
dir_path = op.join(dest, self._topic)
if not op.exists(dir_path):
logger.info("%s not exist, making the directory", dir_path)
makedirs(dir_path)
path = op.join(dir_path, self._name + self._fmt)
logger.info("downloading img to %s", path)
with open(path, 'wb') as f:
f.write(img.content)
__str__ = __unicode__ = __repr__
| 30.572491 | 79 | 0.536722 | # -*- coding: utf-8 -*-
"""
s1acker.s1acker
~~~~~~~~~~~~~~
This module provides functions that deal with s1 search interface.
:copyright: (c) 2017 by quinoa42.
:license: MIT, see LICENSE for more details.
"""
import logging
import os.path as op
import re
import time
from itertools import chain
from os import makedirs
import requests
from bs4 import BeautifulSoup
flaten = chain.from_iterable
_SEARCH_URL = "http://bbs.saraba1st.com/2b/search.php?mod=forum"
_SEARCH_ADV_URL = "http://bbs.saraba1st.com/2b/search.php?mod=forum&adv=yes"
_TOPIC_URL = "http://bbs.saraba1st.com/2b/thread-{0}-1-1.html"
_HOST = "bbs.saraba1st.com"
_USER_AGENT = (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) "
"Gecko/20100101 Firefox/54.0"
)
_TIME_OUT = 10
_SLEEP_TIME = 1
logger = logging.getLogger(__name__)
def _wait():
"""wait for _SLEEP_TIME seconds.
"""
logger.info("wait for %s seconds.", _SLEEP_TIME)
time.sleep(_SLEEP_TIME)
class S1ack(object):
"""S1ack defines a class that implement search functions."""
def __init__(self, srchtxt, srchuname=None):
"""construct a s1ack object with given srchtxt and optional srchuname.
:srchtxt: str that you want to search
:srchuname: optional str that limit search topics to posts by this user
"""
self._session = requests.Session()
self._session.headers.update({
'User-Agent': _USER_AGENT,
'Host': _HOST
})
r = self._session.get(
_SEARCH_ADV_URL,
timeout=_TIME_OUT,
headers={'Referer': _SEARCH_URL}
)
soup = BeautifulSoup(r.text, 'lxml')
formhash = soup.find("input", attrs={"name": "formhash"})['value']
self._input_data = {
"ascdesc": "desc",
"before": "",
"formhash": formhash,
"orderby": "lastpost",
"searchsubmit": "yes",
"srchfid[]": "all",
"srchfilter": "all",
"srchfrom": 0,
"srchtxt": srchtxt,
"srchuname": srchuname if srchuname is not None else ""
}
logger.debug("input data: %r", self._input_data)
def search(self):
"""Return the search result.
:returns: list of Img object
"""
search_result = self._get_search_urls()
result = list(
set(
flaten(
map(
self._get_imgs,
flaten(
map(
self._get_pages,
flaten(
map(self._get_first_page, search_result)
)
)
)
)
)
)
)
logger.debug("final result: %r", result)
logger.info("find %d pictures", len(result))
return result
def _get_search_urls(self):
"""Return the urls of the pages of searching result
:returns: a list of str, where one str represent a url of one page
"""
logger.info("trying to search")
r = self._session.post(
_SEARCH_URL,
timeout=_TIME_OUT,
headers={"Referer": _SEARCH_ADV_URL},
data=self._input_data
)
r.raise_for_status()
result = BeautifulSoup(r.text, 'lxml').find("div", class_="pg")
num = int(result.find_all("a")[-2].string) if result else 1
url = re.sub("&kw=.+$", "", r.url, 1)
urls = [r.url] + [url + "&page=" + str(i) for i in range(2, num + 1)]
logger.debug("search result: %r", urls)
return urls
def _get_first_page(self, url):
"""Return the first pages of the topics in the given search result page
:url: str, the url of search result page
:returns: list of str, represent the list of urls of topics
"""
_wait()
logger.info("trying to get the topics in %s", url)
r = self._session.get(url, timeout=_TIME_OUT)
r.raise_for_status()
s = BeautifulSoup(r.text, "lxml")
topics = [
re.findall("tid=([0-9]{1,7})", topic.a['href'])[0]
for topic in s.find_all("h3", class_="xs3")
]
urls = [_TOPIC_URL.format(topic) for topic in topics]
logger.debug("topics in %s: %r", url, urls)
return urls
def _get_pages(self, url):
"""Return the urls of all pages of a topic.
:url: str, represent the url of a topic
:returns: list of str, represent list of urls of the pages
"""
_wait()
logger.info("trying to get the pages of %s", url)
r = self._session.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, 'lxml')
multipage = soup.find('div', class_="pg")
num = int(multipage.find_all("a")[-2].string) if multipage else 1
urls = [
re.sub("[0-9]{1,3}-1.html", str(page) + "-1.html", url)
for page in range(1, num + 1)
]
logger.debug("all pages of %s: %r", url, urls)
return urls
def _get_imgs(self, url):
"""Get list of imgs from the url.
:url: str, represent the url wish to explore
:returns: a list of Img object, represent the search result
"""
_wait()
logger.info("trying to get imgs on the page %s", url)
r = self._session.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, 'lxml')
imgs = [
url
for url in [
img.attrs.get('file') or img.attrs.get('src') for post in
soup.find_all("td", id=re.compile("postmessage_[0-9]{1,8}"))
for img in post.find_all("img")
]
if not re.match("http://static.saraba1st.com/image/smiley/", url)
and re.search("\.(png|jpg)$", url)
and not re.search("\.photobucket\.", url)
]
result = [
Img(img, str(index), url) for index, img in enumerate(set(imgs))
]
logger.debug("Imgs in %s: %r", url, result)
return result
class Img(object):
"""Img defines an object that can be downloaded."""
def __init__(self, url, name, origin=""):
"""construct an Img object with url, name, and optional origin
:url: str represent the url of the Img
:name: the name given to this Img when downloading
:origin: str represent the origin of the Img,i.e. the url of the topic
"""
self._url = url
self._origin = origin
self._topic = re.findall("thread-([0-9]{1,9})",
origin)[0] if origin else ""
self._name = name
self._fmt = re.findall("(\.jpg|\.png)$", url)[0]
def download(self, dest):
"""download this Img to the dest directory.
:returns: None
"""
_wait()
logger.info("trying to get img at %s", self._url)
try:
img = requests.get(
self._url,
headers={"User-Agent": _USER_AGENT,
"Referer": self._origin},
timeout=_TIME_OUT
)
img.raise_for_status()
except Exception as e:
logger.error("Failed when trying to get %s : %s", self._url, e)
else:
dir_path = op.join(dest, self._topic)
if not op.exists(dir_path):
logger.info("%s not exist, making the directory", dir_path)
makedirs(dir_path)
path = op.join(dir_path, self._name + self._fmt)
logger.info("downloading img to %s", path)
with open(path, 'wb') as f:
f.write(img.content)
def __eq__(self, other):
return self._url == other._url
def __ne__(self, other):
return not self._url == other._url
def __repr__(self):
return "".format(**self.__dict__)
def __hash__(self):
return repr(self).__hash__()
__str__ = __unicode__ = __repr__
| 187 | 0 | 108 |
3be4556a4ed0d130e75c6ce7e4ffbc24aa80f072 | 6,656 | py | Python | qt-creator-opensource-src-4.6.1/scripts/uichanges.py | kevinlq/Qt-Creator-Opensource-Study | b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f | [
"MIT"
] | 5 | 2018-12-22T14:49:13.000Z | 2022-01-13T07:21:46.000Z | qt-creator-opensource-src-4.6.1/scripts/uichanges.py | kevinlq/Qt-Creator-Opensource-Study | b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f | [
"MIT"
] | null | null | null | qt-creator-opensource-src-4.6.1/scripts/uichanges.py | kevinlq/Qt-Creator-Opensource-Study | b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f | [
"MIT"
] | 8 | 2018-07-17T03:55:48.000Z | 2021-12-22T06:37:53.000Z | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
"""
A simple program that parses untranslated.ts files
current directory *must* be the top level qtcreator source directory
Usage:
scripts/uichanges.py old_untranslated.ts qtcreator_untranslated.ts
IN TOP LEVEL QTC SOURCE DIRECTORY!
"""
import os, sys, string
import subprocess
from xml.sax import saxutils, handler, make_parser
baseDir = os.getcwd()
transDir = os.path.join(baseDir, 'share/qtcreator/translations')
unchangedContexts = 0
# --- The ContentHandler
# Generate a tree consisting of hash of context names.
# Each context name value contains a hash of messages
# Each message value contains a the file name (or '<unknown>')
# ContentHandler methods
# --- The main program
oldGenerator = Generator()
oldParser = make_parser()
oldParser.setContentHandler(oldGenerator)
oldParser.parse(sys.argv[1])
oldTree = oldGenerator.tree()
newGenerator = Generator()
newParser = make_parser()
newParser.setContentHandler(newGenerator)
newParser.parse(sys.argv[2])
newTree = newGenerator.tree()
oldContextSet = set(oldTree.keys())
newContextSet = set(newTree.keys())
for c in sorted(oldContextSet.difference(newContextSet)):
report = diffContext(c, oldTree[c], {})
if report:
print(report.encode('utf-8'))
else:
unchangedContexts += 1
for c in sorted(newContextSet.difference(oldContextSet)):
report = diffContext(c, {}, newTree[c])
if report:
print(report.encode('utf-8'))
else:
unchangedContexts += 1
for c in sorted(newContextSet.intersection(oldContextSet)):
report = diffContext(c, oldTree[c], newTree[c])
if report:
print(report.encode('utf-8'))
else:
unchangedContexts += 1
print(u'{0} unchanged contexts'.format(unchangedContexts))
| 29.582222 | 100 | 0.589844 | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
"""
A simple program that parses untranslated.ts files
current directory *must* be the top level qtcreator source directory
Usage:
scripts/uichanges.py old_untranslated.ts qtcreator_untranslated.ts
IN TOP LEVEL QTC SOURCE DIRECTORY!
"""
import os, sys, string
import subprocess
from xml.sax import saxutils, handler, make_parser
baseDir = os.getcwd()
transDir = os.path.join(baseDir, 'share/qtcreator/translations')
unchangedContexts = 0
# --- The ContentHandler
# Generate a tree consisting of hash of context names.
# Each context name value contains a hash of messages
# Each message value contains a the file name (or '<unknown>')
class Generator(handler.ContentHandler):
def __init__(self):
handler.ContentHandler.__init__(self)
self._tree = {}
self._contextTree = {}
self._context = ''
self._file = ''
self._msg = ''
self._chars = ''
# ContentHandler methods
def startDocument(self):
self._tree = {}
self._contextTree = {}
self._context = ''
self._file = ''
self._chars = ''
def startElement(self, name, attrs):
if name == 'location':
fn = attrs.get('filename')
if fn:
fn = os.path.normpath(os.path.join(transDir, fn))
fn = os.path.relpath(fn, baseDir)
else:
fn = '<unknown>'
self._file = fn
return
def endElement(self, name):
if name == 'name':
if self._context == '':
self._context = self._chars.strip()
self._chars = ''
elif name == 'source':
if self._chars:
self._msg = self._chars.strip()
self._chars = ''
elif name == 'message':
if self._msg:
self._contextTree[self._msg] = self._file
self._chars = ''
self._file = '<unknown>'
self._msg = ''
elif name == 'context':
if self._context != '':
self._tree[self._context] = self._contextTree
self._contextTree = {}
self._context = ''
def characters(self, content):
self._chars += content
def tree(self):
return self._tree
def commitsForFile(file):
output = ''
if file == '<unknown>':
return output
try:
output = subprocess.check_output(u'git log -1 -- "{0}"'.format(file),
shell=True, stderr=subprocess.STDOUT,
universal_newlines=True)
except:
output = ''
return output
def examineMsg(ctx, msg, oldFile, newFile):
if oldFile == newFile:
# return ('', u' EQL Message: "{0}" ({1})\n'.format(msg, oldFile))
return ('', '')
if oldFile == '':
return (commitsForFile(newFile), u' ADD: "{0}" ({1})\n'.format(msg, newFile))
if newFile == '':
return (commitsForFile(oldFile), u' DEL: "{0}" ({1})\n'.format(msg, oldFile))
return (commitsForFile(newFile), u' MOV: "{0}" ({1} -> {2})\n'.format(msg, oldFile, newFile))
def diffContext(ctx, old, new):
oldMsgSet = set(old.keys())
newMsgSet = set(new.keys())
gitResults = set()
report = ''
unchanged = 0
for m in sorted(oldMsgSet.difference(newMsgSet)):
res = examineMsg(ctx, m, old[m], '')
gitResults.add(res[0])
report = report + res[1]
if not res[1]:
unchanged += 1
for m in sorted(newMsgSet.difference(oldMsgSet)):
res = examineMsg(ctx, m, '', new[m])
gitResults.add(res[0])
report = report + res[1]
if not res[1]:
unchanged += 1
for m in sorted(oldMsgSet.intersection(newMsgSet)):
res = examineMsg(ctx, m, old[m], new[m])
gitResults.add(res[0])
report = report + res[1]
if not res[1]:
unchanged += 1
gitResults.discard('')
if not report:
return ''
report = u'\nContext "{0}":\n{1} {2} unchanged messages'.format(ctx, report, unchanged)
if gitResults:
report += '\n\n Git Commits:\n'
for g in gitResults:
if g:
g = u' {}'.format(unicode(g.replace('\n', '\n '), errors='replace'))
report += g
return report
# --- The main program
oldGenerator = Generator()
oldParser = make_parser()
oldParser.setContentHandler(oldGenerator)
oldParser.parse(sys.argv[1])
oldTree = oldGenerator.tree()
newGenerator = Generator()
newParser = make_parser()
newParser.setContentHandler(newGenerator)
newParser.parse(sys.argv[2])
newTree = newGenerator.tree()
oldContextSet = set(oldTree.keys())
newContextSet = set(newTree.keys())
for c in sorted(oldContextSet.difference(newContextSet)):
report = diffContext(c, oldTree[c], {})
if report:
print(report.encode('utf-8'))
else:
unchangedContexts += 1
for c in sorted(newContextSet.difference(oldContextSet)):
report = diffContext(c, {}, newTree[c])
if report:
print(report.encode('utf-8'))
else:
unchangedContexts += 1
for c in sorted(newContextSet.intersection(oldContextSet)):
report = diffContext(c, oldTree[c], newTree[c])
if report:
print(report.encode('utf-8'))
else:
unchangedContexts += 1
print(u'{0} unchanged contexts'.format(unchangedContexts))
| 3,440 | 19 | 253 |
1c815d912194a3342599e0c10f8f781a58a15488 | 14,539 | py | Python | kinopoisk/tests/movie.py | nine9797/kinopoiskpy | 72f7597200dd1f1a1db5e3019e77489b868886a4 | [
"BSD-3-Clause"
] | null | null | null | kinopoisk/tests/movie.py | nine9797/kinopoiskpy | 72f7597200dd1f1a1db5e3019e77489b868886a4 | [
"BSD-3-Clause"
] | null | null | null | kinopoisk/tests/movie.py | nine9797/kinopoiskpy | 72f7597200dd1f1a1db5e3019e77489b868886a4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# flake8: noqa: E501
from __future__ import unicode_literals
from datetime import datetime
from kinopoisk.movie import Movie
from .base import BaseTest
| 46.302548 | 862 | 0.656716 | # -*- coding: utf-8 -*-
# flake8: noqa: E501
from __future__ import unicode_literals
from datetime import datetime
from kinopoisk.movie import Movie
from .base import BaseTest
class MovieTest(BaseTest):
def test_movie_posters_page_source(self):
m = Movie(id=51319)
m.get_content('posters')
self.assertGreaterEqual(len(m.posters), 34)
def test_movie_search_manager_redacted(self):
movies = Movie.objects.search('Без цензуры 2007')
self.assertGreater(len(movies), 1)
m = movies[0]
self.assertEqual(m.id, 278229)
self.assertEqual(m.year, 2007)
self.assertEqual(m.title, 'Без цензуры')
self.assertEqual(m.title_en, 'Redacted')
self.assertEqual(m.runtime, 90)
self.assertEqual(m.rating, 6.134)
self.assertGreaterEqual(m.votes, 1760)
def test_movie_search_manager_pulp_fiction(self):
movies = Movie.objects.search('pulp fiction')
self.assertGreater(len(movies), 1)
m = movies[0]
self.assertEqual(m.id, 342)
self.assertEqual(m.title, 'Криминальное чтиво')
self.assertEqual(m.year, 1994)
self.assertEqual(m.title_en, 'Pulp Fiction')
def test_movie_search_manager_warcraft(self):
movies = Movie.objects.search('Варкрафт')
self.assertEqual(len(movies), 1)
m = movies[0]
self.assertEqual(m.id, 277328)
self.assertEqual(m.title, 'Варкрафт')
self.assertEqual(m.year, 2016)
self.assertEqual(m.title_en, 'Warcraft')
def test_movie_main_page_id_278229(self):
"""
Test of movie manager, movie obtain by id (not via search)
"""
m = Movie(id=278229)
m.get_content('main_page')
m.get_content('trailers')
trailers_ids = [trailer.id for trailer in m.trailers]
trailers_files = [trailer.file for trailer in m.trailers]
self.assertEqual(m.id, 278229)
self.assertEqual(m.year, 2007)
self.assertEqual(m.title, 'Без цензуры')
self.assertEqual(m.title_en, 'Redacted')
self.assertEqual(m.plot,
'В центре картины — небольшой отряд американских солдат на контрольно-пропускном пункте в Ираке. Причём восприятие их истории постоянно меняется. Мы видим события глазами самих солдат, представителей СМИ, иракцев и понимаем, как на каждого из них влияет происходящее, их встречи и столкновения друг с другом.')
self.assertEqual(m.runtime, 90)
self.assertEqual(m.tagline, '«Фильм, запрещенный к прокату во многих странах»')
self.assertGreater(len(m.trailers), 3)
self.assertTrue('gettrailer.php?quality=hd&trailer_id=4476' in trailers_files)
self.assertTrue('4476' in trailers_ids)
self.assertEqualPersons(m.actors, ['Иззи Диаз', 'Роб Дивейни', 'Ти Джонс', 'Анас Веллман', 'Майк Фигуроа',
'Яналь Кассай', 'Дхиая Калиль', 'Кел О’Нил', 'Дэниэл Стюарт-Шерман',
'Патрик Кэрролл'])
def test_movie_main_page_id_746251(self):
m = Movie(id=746251)
m.get_content('main_page')
self.assertEqual(m.year, None)
self.assertEqual(m.title, 'Ловкость')
self.assertEqual(m.genres, ['драма'])
self.assertEqual(m.countries, ['США'])
def test_movie_main_page_empty_actors(self):
m = Movie(id=926005)
m.get_content('main_page')
self.assertEqual(m.actors, [])
def test_movie_main_page_id_4374(self):
"""
Test of movie manager, movie obtain by id (not via search)
"""
m = Movie(id=4374)
m.get_content('main_page')
m.get_content('trailers')
trailers_ids = [trailer.id for trailer in m.trailers]
trailers_files = [trailer.file for trailer in m.trailers]
self.assertEqual(m.id, 4374)
self.assertEqual(m.year, 2003)
self.assertEqual(m.title, 'Пираты Карибского моря: Проклятие Черной жемчужины')
self.assertEqual(m.title_en, 'Pirates of the Caribbean: The Curse of the Black Pearl')
self.assertEqual(m.plot,
'Жизнь харизматичного авантюриста, капитана Джека Воробья, полная увлекательных приключений, резко меняется, когда его заклятый враг — капитан Барбосса — похищает корабль Джека, Черную Жемчужину, а затем нападает на Порт Ройал и крадет прекрасную дочь губернатора, Элизабет Свонн. Друг детства Элизабет, Уилл Тернер, вместе с Джеком возглавляет спасательную экспедицию на самом быстром корабле Британии, в попытке вызволить девушку из плена и заодно отобрать у злодея Черную Жемчужину. Вслед за этой парочкой отправляется амбициозный коммодор Норрингтон, который к тому же числится женихом Элизабет. Однако Уилл не знает, что над Барбоссой висит вечное проклятие, при лунном свете превращающее его с командой в живых скелетов. Проклятье будет снято лишь тогда, когда украденное золото Ацтеков будет возвращено пиратами на старое место.')
self.assertEqual(m.runtime, 143)
self.assertEqual(m.rating, 8.335)
self.assertEqual(m.imdb_rating, 8.00)
self.assertGreaterEqual(m.votes, 327195)
self.assertGreaterEqual(m.imdb_votes, 859395)
self.assertEqual(m.tagline,
"«Over 3000 Islands of Paradise -- For Some it's A Blessing -- For Others... It's A Curse»")
self.assertGreater(len(m.trailers), 2)
self.assertTrue('529' in trailers_ids)
self.assertTrue('gettrailer.php?quality=hd&trailer_id=529' in trailers_files)
self.assertEqual(m.genres, ['фэнтези', 'боевик', 'приключения'])
self.assertEqual(m.countries, ['США'])
self.assertGreaterEqual(m.budget, 140000000)
self.assertGreaterEqual(m.marketing, 40000000)
self.assertGreaterEqual(m.profit_usa, 305413918)
self.assertGreaterEqual(m.profit_russia, 9060000)
self.assertGreaterEqual(m.profit_world, 654264015)
self.assertEqualPersons(m.actors,
['Джонни Депп', 'Джеффри Раш', 'Орландо Блум', 'Кира Найтли', 'Джек Девенпорт',
'Кевин МакНэлли', 'Джонатан Прайс', 'Ли Аренберг', 'Макензи Крук', 'Дэвид Бэйли'])
self.assertEqualPersons(m.directors, ['Гор Вербински'])
self.assertEqualPersons(m.screenwriters, ['Тед Эллиот', 'Терри Россио', 'Стюарт Битти'])
self.assertEqualPersons(m.producers, ['Джерри Брукхаймер', 'Пол Дисон', 'Брюс Хендрикс'])
self.assertEqualPersons(m.operators, ['Дариуш Вольски'])
self.assertEqualPersons(m.composers, ['Клаус Бадельт'])
self.assertEqualPersons(m.art_direction_by, ['Брайан Моррис', 'Дерек Р. Хилл', 'Майкл Пауэлс'])
self.assertEqualPersons(m.editing_by, ['Стивен Е. Ривкин', 'Артур Шмидт', 'Крэйг Вуд'])
def test_movie_main_page_id_258687(self):
"""
Test of movie manager, movie obtain by id (not via search)
"""
m = Movie(id=258687)
m.get_content('main_page')
m.get_content('trailers')
trailers_ids = [trailer.id for trailer in m.trailers]
trailers_files = [trailer.file for trailer in m.trailers]
self.assertEqual(m.id, 258687)
self.assertEqual(m.year, 2014)
self.assertEqual(m.title, 'Интерстеллар')
self.assertEqual(m.title_en, 'Interstellar')
self.assertEqual(m.plot,
'Когда засуха приводит человечество к продовольственному кризису, коллектив исследователей и учёных отправляется сквозь червоточину (которая предположительно соединяет области пространства-времени через большое расстояние) в путешествие, чтобы превзойти прежние ограничения для космических путешествий человека и переселить человечество на другую планету.')
self.assertEqual(m.runtime, 169)
self.assertEqual(m.tagline, '«Следующий шаг человечества станет величайшим»')
self.assertGreater(len(m.trailers), 70)
self.assertTrue('109352' in trailers_ids)
self.assertTrue('gettrailer.php?quality=hd&trailer_id=109352'in trailers_files)
self.assertEqual(m.genres, ['фантастика', 'драма', 'приключения'])
self.assertEqual(m.countries, ['США', 'Великобритания', 'Канада'])
self.assertGreaterEqual(m.profit_usa, 158445319)
self.assertGreaterEqual(m.profit_russia, 24110578)
self.assertGreaterEqual(m.profit_world, 592845319)
self.assertEqualPersons(m.directors, ['Кристофер Нолан'])
self.assertEqualPersons(m.screenwriters, ['Джонатан Нолан', 'Кристофер Нолан'])
self.assertEqualPersons(m.producers, ['Кристофер Нолан', 'Линда Обст', 'Эмма Томас'])
self.assertEqualPersons(m.operators, ['Хойте Ван Хойтема'])
self.assertEqualPersons(m.composers, ['Ханс Циммер'])
def test_movie_by_id_1552(self):
m = Movie(id=1552)
m.get_content('main_page')
# m.get_content('trailers')
self.assertEqual(m.profit_russia, 41000)
self.assertEqual(m.budget, 10000000)
def test_movie_trailers(self):
"""
Test of movie trailers source page
"""
m = Movie(id=521689)
m.get_content('trailers')
self.assertGreaterEqual(len(m.trailers), 11)
trailers_ids = [trailer.id for trailer in m.trailers]
trailers_files = [trailer.file for trailer in m.trailers]
self.assertTrue('76485' in trailers_ids)
self.assertTrue('gettrailer.php?quality=hd&trailer_id=76485' in trailers_files)
self.assertTrue('74666' in trailers_ids)
self.assertTrue('gettrailer.php?quality=hd&trailer_id=74666' in trailers_files)
self.assertEqual(m.youtube_ids, ['e4f5keHX_ks'])
def test_movie_cast(self):
"""
Test of movie cast source page
"""
m = Movie(id=4220)
m.get_content('cast')
self.assertEqual(len(m.cast), 7)
self.assertGreaterEqual(len(m.cast['director']), 1)
self.assertGreaterEqual(len(m.cast['actor']), 49)
self.assertGreaterEqual(len(m.cast['producer']), 4)
self.assertGreaterEqual(len(m.cast['writer']), 3)
self.assertGreaterEqual(len(m.cast['operator']), 2)
self.assertGreaterEqual(len(m.cast['design']), 1)
self.assertGreaterEqual(len(m.cast['editor']), 1)
self.assertEqual(m.cast['actor'][0].person.id, 8986)
self.assertEqual(m.cast['actor'][0].person.name, 'Питер Фонда')
self.assertEqual(m.cast['actor'][0].person.name_en, 'Peter Fonda')
self.assertEqual(m.cast['actor'][0].name, 'Wyatt')
# в титрах: ...
self.assertEqual(m.cast['actor'][13].person.name, 'Сэнди Браун Уайет')
self.assertEqual(m.cast['actor'][13].person.name_en, 'Sandy Brown Wyeth')
self.assertEqual(m.cast['actor'][13].name, 'Joanne')
def test_movie_cast_1(self):
"""
Test of movie cast source page
"""
m = Movie(id=63991)
m.get_content('cast')
self.assertEqual(len(m.cast), 11)
self.assertGreaterEqual(len(m.cast['director']), 1)
self.assertGreaterEqual(len(m.cast['actor']), 49)
self.assertGreaterEqual(len(m.cast['producer']), 4)
self.assertGreaterEqual(len(m.cast['voice_director']), 1)
self.assertGreaterEqual(len(m.cast['translator']), 1)
self.assertGreaterEqual(len(m.cast['voice']), 4)
self.assertGreaterEqual(len(m.cast['writer']), 3)
self.assertGreaterEqual(len(m.cast['operator']), 1)
self.assertGreaterEqual(len(m.cast['composer']), 1)
self.assertGreaterEqual(len(m.cast['design']), 1)
self.assertGreaterEqual(len(m.cast['editor']), 1)
# with $
self.assertEqual(m.cast['actor'][0].person.id, 6245)
self.assertEqual(m.cast['actor'][0].person.name, 'Джонни Депп')
self.assertEqual(m.cast['actor'][0].person.name_en, 'Johnny Depp')
self.assertEqual(m.cast['actor'][0].name, 'Jack Sparrow')
# no mention
self.assertEqual(m.cast['actor'][16].person.id, 24683)
self.assertEqual(m.cast['actor'][16].name, 'Captain Hector Barbossa')
# voice
self.assertEqual(m.cast['actor'][63].person.id, 288908)
self.assertEqual(m.cast['actor'][63].name, 'Parrot')
self.assertEqual(m.cast['actor'][63].voice, True)
# with $ and no name
self.assertEqual(m.cast['producer'][0].name, '')
def test_movie_repr(self):
instance = Movie(title='Молчание ягнят', title_en='The Silence of the Lambs', year='1990')
self.assertEqual(instance.__repr__(), 'Молчание ягнят (The Silence of the Lambs), 1990')
def test_movie_series_search_glee(self):
movies = Movie.objects.search('glee')
self.assertGreaterEqual(len(movies), 1)
m = movies[0] # Glee / Хор / Лузеры
self.assertTrue(m.series)
m.get_content('series')
self.assertGreaterEqual(len(m.seasons), 4)
f = m.seasons[0]
self.assertEqual(len(f.episodes), 22)
self.assertEqual(f.year, 2010)
e = m.seasons[0].episodes[5]
self.assertEqual(e.title, 'Витамин D')
self.assertEqual(e.release_date, datetime(2010, 11, 20).date())
def test_movie_series_search_killing(self):
# It will false someday as well, we should find some TV series, that announced more series, but
# stop showing them in some moment. At that moment, I can't find any.
movies = Movie.objects.search('the killing')
self.assertGreaterEqual(len(movies), 1)
m = movies[0] # The Killing / Убийство
self.assertTrue(m.series)
m.get_content('series')
ls = m.seasons[-1]
le = ls.episodes[-1]
self.assertEqual(le.title, 'Эдем')
# self.assertIsNone(le.release_date)
def test_movie_series_main_page_kickass(self):
m = Movie(id=419200) # Kick-Ass / Пипец
m.get_content('main_page')
self.assertFalse(m.series)
self.assertRaises(ValueError, m.get_content, ('series',))
def test_movie_series_main_page_bigband(self):
m = Movie(id=306084) # The Big Bang Theory / Теория большого взрыва
m.get_content('main_page')
self.assertTrue(m.series)
def test_movie_rating_from_search_result(self):
movies = Movie.objects.search('the big bang theory')
self.assertGreaterEqual(len(movies), 1)
m = movies[0] # The Big Bang Theory Series
self.assertGreaterEqual(m.rating, 8.5)
| 3,892 | 12,618 | 23 |
189671d6644f6055e5c2938128947d789ac96739 | 4,792 | py | Python | src/utils/data_drift_setup.py | nfmoore/aml-batch-deployment-template | 6248310690ddbbbe027fd33e90dae95af0aeff8d | [
"MIT"
] | null | null | null | src/utils/data_drift_setup.py | nfmoore/aml-batch-deployment-template | 6248310690ddbbbe027fd33e90dae95af0aeff8d | [
"MIT"
] | null | null | null | src/utils/data_drift_setup.py | nfmoore/aml-batch-deployment-template | 6248310690ddbbbe027fd33e90dae95af0aeff8d | [
"MIT"
] | null | null | null | import os
import sys
from argparse import ArgumentParser
from azureml.core import Dataset, Datastore, Workspace
from azureml.data.dataset_factory import DataType
from azureml.datadrift import DataDriftDetector
target_dataset_timestamp_column = "datetime"
input_schema_dir = os.path.join("input", "schema")
data_dir = "data"
input_schema_file = "schema.csv"
if __name__ == "__main__":
main()
| 34.978102 | 122 | 0.706386 | import os
import sys
from argparse import ArgumentParser
from azureml.core import Dataset, Datastore, Workspace
from azureml.data.dataset_factory import DataType
from azureml.datadrift import DataDriftDetector
target_dataset_timestamp_column = "datetime"
input_schema_dir = os.path.join("input", "schema")
data_dir = "data"
input_schema_file = "schema.csv"
def parse_args(argv):
ap = ArgumentParser("data_drift_setup")
ap.add_argument("--subscription_id", required=True)
ap.add_argument("--resource_group", required=True)
ap.add_argument("--workspace_name", required=True)
ap.add_argument("--target_dataset_path", required=True)
ap.add_argument("--target_datastore_name", required=True)
ap.add_argument("--baseline_dataset_name", required=True)
ap.add_argument("--data_drift_monitor_name", required=True)
ap.add_argument("--model_id", required=True)
ap.add_argument("--score_pipeline_endpoint_name", required=True)
ap.add_argument("--compute_target", required=True)
ap.add_argument("--feature_list", required=True)
ap.add_argument("--frequency", default="Day")
args, _ = ap.parse_known_args(argv)
return args
def main():
# Parse command line arguments
args = parse_args(sys.argv[1:])
# Retreive workspace
workspace = Workspace.get(
subscription_id=args.subscription_id,
resource_group=args.resource_group,
name=args.workspace_name,
)
# Retreive compute cluster
compute_target = workspace.compute_targets[args.compute_target]
# Get target and baseline datasets
baseline_dataset = Dataset.get_by_name(workspace, args.baseline_dataset_name)
# Retreive datastore for target dataset
target_datastore = Datastore.get(workspace, args.target_datastore_name)
# Upload sample data to the datastore
# [Note: this step is required to ensure a data sample is present for validation when
# registering a new target dataset below]
os.makedirs(data_dir, exist_ok=True)
baseline_dataset.take(1).to_pandas_dataframe().drop(
["cardiovascular_disease"], axis=1
).to_csv(os.path.join(data_dir, input_schema_file), index=False)
target_datastore.upload(src_dir=data_dir, target_path=input_schema_dir)
# Create a target dataset referencing the cloud location
target_dataset = Dataset.Tabular.from_delimited_files(
[(target_datastore, args.target_dataset_path)],
validate=False,
infer_column_types=False,
set_column_types={
"age": DataType.to_float(decimal_mark="."),
"height": DataType.to_float(decimal_mark="."),
"weight": DataType.to_float(decimal_mark="."),
"systolic": DataType.to_float(decimal_mark="."),
"diastolic": DataType.to_float(decimal_mark="."),
"gender": DataType.to_string(),
"cholesterol": DataType.to_string(),
"glucose": DataType.to_string(),
"smoker": DataType.to_string(),
"alcoholic": DataType.to_string(),
"active": DataType.to_string(),
"datetime": DataType.to_datetime(),
},
)
# Assign timestamp column for Tabular Dataset to activate time series related APIs
target_dataset = target_dataset.with_timestamp_columns(
timestamp=target_dataset_timestamp_column
)
# Get model id and version
model_name, model_version = args.model_id.split(":")
# Register dataset to Workspace
target_dataset_name = f"{args.target_datastore_name}-{model_name}-{model_version}-{args.score_pipeline_endpoint_name}"
target_dataset.register(
workspace, target_dataset_name, create_new_version=True,
)
print("Variable [target_dataset]:", target_dataset)
print("Variable [baseline_dataset]:", baseline_dataset)
# Define features to monitor
feature_list = args.feature_list.split(",")
print("Variable [feature_list]:", args.feature_list)
# List data drift detectors
drift_detector_list = DataDriftDetector.list(workspace)
# Delete existing data drift detector
for drift_monitor in drift_detector_list:
if drift_monitor.name == args.data_drift_monitor_name:
print("Deleteing existing data drift monitor...")
drift_monitor.delete()
# Define data drift detector
monitor = DataDriftDetector.create_from_datasets(
workspace,
args.data_drift_monitor_name,
baseline_dataset,
target_dataset,
compute_target=compute_target,
frequency=args.frequency,
feature_list=feature_list,
)
print("Variable [monitor]:", monitor)
# Enable the pipeline schedule for the data drift detector
monitor.enable_schedule()
if __name__ == "__main__":
main()
| 4,345 | 0 | 46 |
4ccbcc7c5db6b2d148de0aa6c68316018fe7d135 | 8,315 | py | Python | scripts/plot_exp_normal.py | LequnWang/Improve-Screening-via-Calibrated-Subset-Selection | de397a600b7ac1d4a2a844c58bb7ebde29841b2a | [
"MIT"
] | 4 | 2022-03-16T08:40:10.000Z | 2022-03-17T13:02:17.000Z | scripts/plot_exp_normal.py | Networks-Learning/Improve-Screening-via-Calibrated-Subset-Selection | de397a600b7ac1d4a2a844c58bb7ebde29841b2a | [
"MIT"
] | null | null | null | scripts/plot_exp_normal.py | Networks-Learning/Improve-Screening-via-Calibrated-Subset-Selection | de397a600b7ac1d4a2a844c58bb7ebde29841b2a | [
"MIT"
] | 1 | 2022-03-16T08:29:05.000Z | 2022-03-16T08:29:05.000Z | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import os
from plot_constants import *
plt.rcParams.update(params)
plt.rc('font', family='serif')
if __name__ == "__main__":
fig, axs = plt.subplots(1, 4)
fig.set_size_inches(28, 6)
from params_exp_noise import *
algorithms = ["ucss", "iso_reg_ss", "platt_scal_ss"]
algorithm_df_guarantee = {
"css": True,
"ucss": False,
"iso_reg_ss": False,
"platt_scal_ss": False
}
algorithm_labels = {
"css": "CSS",
"ucss": "Uncalibrated",
"iso_reg_ss": "Isotonic",
"platt_scal_ss": "Platt"
}
algorithm_colors = {
"css": "tab:blue",
"ucss": "tab:red",
"iso_reg_ss": "tab:purple",
"platt_scal_ss": "tab:cyan"
}
algorithm_markers = {
"css": "s",
"ucss": 9,
"iso_reg_ss": 10,
"platt_scal_ss": 11
}
for umb_num_bin in umb_num_bins:
algorithms.append("umb_" + str(umb_num_bin))
algorithm_labels["umb_" + str(umb_num_bin)] = "UMB {} Bins".format(umb_num_bin)
algorithm_colors["umb_" + str(umb_num_bin)] = umb_colors[umb_num_bin]
algorithm_df_guarantee["umb_" + str(umb_num_bin)] = True
algorithm_markers["umb_" + str(umb_num_bin)] = umb_markers[umb_num_bin]
algorithms.append("css")
metrics = ["num_selected", "num_qualified", "num_unqualified", "constraint_satisfied"]
results = {}
for noise_ratio in noise_ratios:
results[noise_ratio] = {}
for algorithm in algorithms:
results[noise_ratio][algorithm] = {}
for metric in metrics:
results[noise_ratio][algorithm][metric] = {}
results[noise_ratio][algorithm][metric]["values"] = []
for noise_ratio in noise_ratios:
for run in runs:
exp_identity_string = "_".join([str(n_train), str(noise_ratio), str(n_cal), lbd, str(run)])
for algorithm in algorithms:
result_path = os.path.join(exp_dir, exp_identity_string + "_{}_result.pkl".format(algorithm))
collect_results_normal_exp(result_path, noise_ratio, algorithm, results)
for noise_ratio in noise_ratios:
for algorithm in algorithms:
for metric in metrics:
results[noise_ratio][algorithm][metric]["mean"] = np.mean(results[noise_ratio][algorithm][metric]["values"])
results[noise_ratio][algorithm][metric]["std"] = np.std(results[noise_ratio][algorithm][metric]["values"],
ddof=1)
# plotting whether constraint is satisfied
handles = []
for algorithm in algorithms:
mean_algorithm = np.array([results[noise_ratio][algorithm]["constraint_satisfied"]["mean"]
for noise_ratio in noise_ratios])
std_err_algorithm = np.array(
[results[noise_ratio][algorithm]["constraint_satisfied"]["std"] / np.sqrt(n_runs) for noise_ratio in noise_ratios])
line = axs[0].plot(noise_ratios_label, mean_algorithm, color=algorithm_colors[algorithm],
marker=algorithm_markers[algorithm], linewidth=line_width,
label=algorithm_labels[algorithm])
if algorithm == "css":
handles = [line[0]] + handles
else:
handles.append(line[0])
axs[0].errorbar(noise_ratios_label, mean_algorithm, std_err_algorithm, color=algorithm_colors[algorithm],
marker=algorithm_markers[algorithm], linewidth=line_width,
label=algorithm_labels[algorithm], capthick=capthick)
axs[0].yaxis.set_major_locator(ticker.MultipleLocator(0.5))
axs[0].set_xlabel("$r_{\mathrm{noise}}$", fontsize=font_size)
axs[0].set_ylabel("EQ", fontsize=font_size)
# plotting the number of selected applicants
for algorithm in algorithms:
if not algorithm_df_guarantee[algorithm]:
continue
mean_algorithm = np.array([results[noise_ratio][algorithm]["num_selected"]["mean"] for noise_ratio
in noise_ratios])
std_algorithm = np.array([results[noise_ratio][algorithm]["num_selected"]["std"] for noise_ratio
in noise_ratios])
axs[1].plot(noise_ratios_label, mean_algorithm, linewidth=line_width, color=algorithm_colors[algorithm], marker=algorithm_markers[algorithm]
, label=algorithm_labels[algorithm])
axs[1].fill_between(noise_ratios_label, mean_algorithm - std_algorithm,
mean_algorithm + std_algorithm, alpha=transparency,
color=algorithm_colors[algorithm])
axs[1].set_xlabel("$r_{\mathrm{noise}}$", fontsize=font_size)
axs[1].set_ylabel("SS", fontsize=font_size)
axs[1].set_ylim(top=35)
axs[1].set_ylim(bottom=5)
from params_exp_cal_size import *
results = {}
for n_cal in n_cals:
results[n_cal] = {}
for algorithm in algorithms:
results[n_cal][algorithm] = {}
for metric in metrics:
results[n_cal][algorithm][metric] = {}
results[n_cal][algorithm][metric]["values"] = []
for n_cal in n_cals:
for run in runs:
exp_identity_string = "_".join([str(n_train), str(noise_ratio), str(n_cal), lbd, str(run)])
for algorithm in algorithms:
result_path = os.path.join(exp_dir, exp_identity_string + "_{}_result.pkl".format(algorithm))
collect_results_normal_exp(result_path, n_cal, algorithm, results)
for n_cal in n_cals:
for algorithm in algorithms:
for metric in metrics:
results[n_cal][algorithm][metric]["mean"] = np.mean(results[n_cal][algorithm][metric]["values"])
results[n_cal][algorithm][metric]["std"] = np.std(results[n_cal][algorithm][metric]["values"],
ddof=1)
# plotting whether constraint is satisfied
for algorithm in algorithms:
# if algorithm_df_guarantee[algorithm] and algorithm != "css":
# continue
mean_algorithm = np.array([results[n_cal][algorithm]["constraint_satisfied"]["mean"]
for n_cal in n_cals])
std_err_algorithm = np.array(
[results[n_cal][algorithm]["constraint_satisfied"]["std"] / np.sqrt(n_runs) for n_cal in n_cals])
axs[2].errorbar(n_cals_label, mean_algorithm, std_err_algorithm, color=algorithm_colors[algorithm],
linewidth=line_width, label=algorithm_labels[algorithm], marker=algorithm_markers[algorithm],
capthick=capthick)
axs[2].yaxis.set_major_locator(ticker.MultipleLocator(0.5))
axs[2].set_xlabel("$n$", fontsize=font_size)
axs[2].set_ylabel("EQ", fontsize=font_size)
# plotting the number of selected applicants
for algorithm in algorithms:
if not algorithm_df_guarantee[algorithm]:
continue
mean_algorithm = np.array([results[n_cal][algorithm]["num_selected"]["mean"] for n_cal
in n_cals])
std_algorithm = np.array([results[n_cal][algorithm]["num_selected"]["std"] for n_cal
in n_cals])
axs[3].plot(n_cals_label, mean_algorithm, linewidth=line_width, color=algorithm_colors[algorithm], marker=algorithm_markers[algorithm]
, label=algorithm_labels[algorithm])
axs[3].fill_between(n_cals_label, mean_algorithm - std_algorithm,
mean_algorithm + std_algorithm, alpha=transparency,
color=algorithm_colors[algorithm])
axs[3].set_xlabel("$n$", fontsize=font_size)
axs[3].set_ylabel("SS", fontsize=font_size)
axs[3].set_ylim(top=35)
axs[3].set_ylim(bottom=5)
fig.legend(handles=handles, bbox_to_anchor=(0.5, 1.02), loc="upper center", ncol=5)
plt.tight_layout(rect=[0, 0, 1, 0.78])
fig.savefig("./plots/exp_normal.pdf", format="pdf")
| 48.625731 | 148 | 0.613109 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import os
from plot_constants import *
plt.rcParams.update(params)
plt.rc('font', family='serif')
if __name__ == "__main__":
fig, axs = plt.subplots(1, 4)
fig.set_size_inches(28, 6)
from params_exp_noise import *
algorithms = ["ucss", "iso_reg_ss", "platt_scal_ss"]
algorithm_df_guarantee = {
"css": True,
"ucss": False,
"iso_reg_ss": False,
"platt_scal_ss": False
}
algorithm_labels = {
"css": "CSS",
"ucss": "Uncalibrated",
"iso_reg_ss": "Isotonic",
"platt_scal_ss": "Platt"
}
algorithm_colors = {
"css": "tab:blue",
"ucss": "tab:red",
"iso_reg_ss": "tab:purple",
"platt_scal_ss": "tab:cyan"
}
algorithm_markers = {
"css": "s",
"ucss": 9,
"iso_reg_ss": 10,
"platt_scal_ss": 11
}
for umb_num_bin in umb_num_bins:
algorithms.append("umb_" + str(umb_num_bin))
algorithm_labels["umb_" + str(umb_num_bin)] = "UMB {} Bins".format(umb_num_bin)
algorithm_colors["umb_" + str(umb_num_bin)] = umb_colors[umb_num_bin]
algorithm_df_guarantee["umb_" + str(umb_num_bin)] = True
algorithm_markers["umb_" + str(umb_num_bin)] = umb_markers[umb_num_bin]
algorithms.append("css")
metrics = ["num_selected", "num_qualified", "num_unqualified", "constraint_satisfied"]
results = {}
for noise_ratio in noise_ratios:
results[noise_ratio] = {}
for algorithm in algorithms:
results[noise_ratio][algorithm] = {}
for metric in metrics:
results[noise_ratio][algorithm][metric] = {}
results[noise_ratio][algorithm][metric]["values"] = []
for noise_ratio in noise_ratios:
for run in runs:
exp_identity_string = "_".join([str(n_train), str(noise_ratio), str(n_cal), lbd, str(run)])
for algorithm in algorithms:
result_path = os.path.join(exp_dir, exp_identity_string + "_{}_result.pkl".format(algorithm))
collect_results_normal_exp(result_path, noise_ratio, algorithm, results)
for noise_ratio in noise_ratios:
for algorithm in algorithms:
for metric in metrics:
results[noise_ratio][algorithm][metric]["mean"] = np.mean(results[noise_ratio][algorithm][metric]["values"])
results[noise_ratio][algorithm][metric]["std"] = np.std(results[noise_ratio][algorithm][metric]["values"],
ddof=1)
# plotting whether constraint is satisfied
handles = []
for algorithm in algorithms:
mean_algorithm = np.array([results[noise_ratio][algorithm]["constraint_satisfied"]["mean"]
for noise_ratio in noise_ratios])
std_err_algorithm = np.array(
[results[noise_ratio][algorithm]["constraint_satisfied"]["std"] / np.sqrt(n_runs) for noise_ratio in noise_ratios])
line = axs[0].plot(noise_ratios_label, mean_algorithm, color=algorithm_colors[algorithm],
marker=algorithm_markers[algorithm], linewidth=line_width,
label=algorithm_labels[algorithm])
if algorithm == "css":
handles = [line[0]] + handles
else:
handles.append(line[0])
axs[0].errorbar(noise_ratios_label, mean_algorithm, std_err_algorithm, color=algorithm_colors[algorithm],
marker=algorithm_markers[algorithm], linewidth=line_width,
label=algorithm_labels[algorithm], capthick=capthick)
axs[0].yaxis.set_major_locator(ticker.MultipleLocator(0.5))
axs[0].set_xlabel("$r_{\mathrm{noise}}$", fontsize=font_size)
axs[0].set_ylabel("EQ", fontsize=font_size)
# plotting the number of selected applicants
for algorithm in algorithms:
if not algorithm_df_guarantee[algorithm]:
continue
mean_algorithm = np.array([results[noise_ratio][algorithm]["num_selected"]["mean"] for noise_ratio
in noise_ratios])
std_algorithm = np.array([results[noise_ratio][algorithm]["num_selected"]["std"] for noise_ratio
in noise_ratios])
axs[1].plot(noise_ratios_label, mean_algorithm, linewidth=line_width, color=algorithm_colors[algorithm], marker=algorithm_markers[algorithm]
, label=algorithm_labels[algorithm])
axs[1].fill_between(noise_ratios_label, mean_algorithm - std_algorithm,
mean_algorithm + std_algorithm, alpha=transparency,
color=algorithm_colors[algorithm])
axs[1].set_xlabel("$r_{\mathrm{noise}}$", fontsize=font_size)
axs[1].set_ylabel("SS", fontsize=font_size)
axs[1].set_ylim(top=35)
axs[1].set_ylim(bottom=5)
from params_exp_cal_size import *
results = {}
for n_cal in n_cals:
results[n_cal] = {}
for algorithm in algorithms:
results[n_cal][algorithm] = {}
for metric in metrics:
results[n_cal][algorithm][metric] = {}
results[n_cal][algorithm][metric]["values"] = []
for n_cal in n_cals:
for run in runs:
exp_identity_string = "_".join([str(n_train), str(noise_ratio), str(n_cal), lbd, str(run)])
for algorithm in algorithms:
result_path = os.path.join(exp_dir, exp_identity_string + "_{}_result.pkl".format(algorithm))
collect_results_normal_exp(result_path, n_cal, algorithm, results)
for n_cal in n_cals:
for algorithm in algorithms:
for metric in metrics:
results[n_cal][algorithm][metric]["mean"] = np.mean(results[n_cal][algorithm][metric]["values"])
results[n_cal][algorithm][metric]["std"] = np.std(results[n_cal][algorithm][metric]["values"],
ddof=1)
# plotting whether constraint is satisfied
for algorithm in algorithms:
# if algorithm_df_guarantee[algorithm] and algorithm != "css":
# continue
mean_algorithm = np.array([results[n_cal][algorithm]["constraint_satisfied"]["mean"]
for n_cal in n_cals])
std_err_algorithm = np.array(
[results[n_cal][algorithm]["constraint_satisfied"]["std"] / np.sqrt(n_runs) for n_cal in n_cals])
axs[2].errorbar(n_cals_label, mean_algorithm, std_err_algorithm, color=algorithm_colors[algorithm],
linewidth=line_width, label=algorithm_labels[algorithm], marker=algorithm_markers[algorithm],
capthick=capthick)
axs[2].yaxis.set_major_locator(ticker.MultipleLocator(0.5))
axs[2].set_xlabel("$n$", fontsize=font_size)
axs[2].set_ylabel("EQ", fontsize=font_size)
# plotting the number of selected applicants
for algorithm in algorithms:
if not algorithm_df_guarantee[algorithm]:
continue
mean_algorithm = np.array([results[n_cal][algorithm]["num_selected"]["mean"] for n_cal
in n_cals])
std_algorithm = np.array([results[n_cal][algorithm]["num_selected"]["std"] for n_cal
in n_cals])
axs[3].plot(n_cals_label, mean_algorithm, linewidth=line_width, color=algorithm_colors[algorithm], marker=algorithm_markers[algorithm]
, label=algorithm_labels[algorithm])
axs[3].fill_between(n_cals_label, mean_algorithm - std_algorithm,
mean_algorithm + std_algorithm, alpha=transparency,
color=algorithm_colors[algorithm])
axs[3].set_xlabel("$n$", fontsize=font_size)
axs[3].set_ylabel("SS", fontsize=font_size)
axs[3].set_ylim(top=35)
axs[3].set_ylim(bottom=5)
fig.legend(handles=handles, bbox_to_anchor=(0.5, 1.02), loc="upper center", ncol=5)
plt.tight_layout(rect=[0, 0, 1, 0.78])
fig.savefig("./plots/exp_normal.pdf", format="pdf")
| 0 | 0 | 0 |
2047cd9bc642977e2dc261bae40c74e7151587bf | 4,206 | py | Python | fast_tmp/models.py | Chise1/fast-tmp2 | 0dd34ac3ec7ea5452c1e4b8d922a40665264f42b | [
"Apache-2.0"
] | 1 | 2021-07-02T09:14:12.000Z | 2021-07-02T09:14:12.000Z | fast_tmp/models.py | Chise1/fast-tmp2 | 0dd34ac3ec7ea5452c1e4b8d922a40665264f42b | [
"Apache-2.0"
] | null | null | null | fast_tmp/models.py | Chise1/fast-tmp2 | 0dd34ac3ec7ea5452c1e4b8d922a40665264f42b | [
"Apache-2.0"
] | null | null | null | from typing import List, Type, Union
from pydantic import BaseModel
from tortoise import Model, fields
from fast_tmp.utils.password import make_password, verify_password
| 28.228188 | 75 | 0.562292 | from typing import List, Type, Union
from pydantic import BaseModel
from tortoise import Model, fields
from fast_tmp.utils.password import make_password, verify_password
class Permission(Model):
label = fields.CharField(max_length=128)
codename = fields.CharField(max_length=128, unique=True)
@classmethod
def make_permission(
cls,
model: Type[BaseModel],
):
"""
生成model对应的权限
"""
model_name = model.__name__
Permission.get_or_create(
defaults={
"label": "can read " + model_name,
"model": model_name,
"codename": "can_read_" + model_name,
}
)
Permission.get_or_create(
defaults={
"label": "can create " + model_name,
"model": model_name,
"codename": "can_create_" + model_name,
}
)
Permission.get_or_create(
defaults={
"label": "can update " + model_name,
"model": model_name,
"codename": "can_update_" + model_name,
}
)
Permission.get_or_create(
defaults={
"label": "can delete " + model_name,
"model": model_name,
"codename": "can_delete_" + model_name,
}
)
def __eq__(self, other) -> bool:
if other == self.codename or getattr(other, "codename",
None) == self.codename:
return True
return False
def __str__(self):
return self.label
def __repr__(self):
return self.label
class User(Model):
username = fields.CharField(max_length=128, unique=True)
password = fields.CharField(max_length=255)
is_active = fields.BooleanField(default=True)
is_superuser = fields.BooleanField(default=False)
groups = fields.ManyToManyField("fast_tmp.Group", related_name='users')
def set_password(self, raw_password: str):
"""
设置密码
:param raw_password:
:return:
"""
self.password = make_password(raw_password)
def verify_password(self, raw_password: str) -> bool:
"""
验证密码
:param raw_password:
:return:
"""
return verify_password(raw_password, self.password)
@property
async def perms(self) -> List[str]:
if not hasattr(self, "__perms"):
permission_instances = await Permission.filter(
groups__users=self.pk)
self.__perms = [permission.codename for permission in
permission_instances]
return self.__perms
async def has_perm(self, perm: Union[Permission, str]) -> bool:
"""
判定用户是否有权限
"""
if self.is_superuser:
return True
for permission_instance in self.perms:
if permission_instance == perm:
return True
return False
async def has_perms(self, perms: List[Union[Permission, str]]) -> bool:
"""
根据permission的codename进行判定
"""
if self.is_superuser:
return True
for perm in perms:
for perm_instance_codename in await self.perms:
if perm == perm_instance_codename:
continue
else:
return False
return True
async def get_perms(self):
return self.perms
def __str__(self):
return self.username
class Group(Model):
label = fields.CharField(max_length=128, unique=True)
permissions = fields.ManyToManyField("fast_tmp.Permission",
related_name="groups")
users: fields.ManyToManyRelation[User]
def __str__(self):
return self.label
class Config(Model):
name = fields.CharField(max_length=64)
key = fields.CharField(max_length=64, unique=True)
value = fields.JSONField()
@classmethod
async def get_value(cls, key: str):
conf = await cls.filter(key=key).first()
if not conf:
return None
return conf.value
| 748 | 3,252 | 92 |
e9fb83f0e912cc4646c31575e133ff91af6a28c9 | 28,793 | py | Python | python/plot_line.py | meudnaes/VoronoiRT | 448eead8fa6e911ed40e2dfaba5baa1c9ef54cf2 | [
"MIT"
] | 1 | 2021-12-03T08:56:57.000Z | 2021-12-03T08:56:57.000Z | python/plot_line.py | meudnaes/VoronoiRT | 448eead8fa6e911ed40e2dfaba5baa1c9ef54cf2 | [
"MIT"
] | 2 | 2022-02-18T09:50:16.000Z | 2022-03-08T12:10:56.000Z | python/plot_line.py | meudnaes/VoronoiRT | 448eead8fa6e911ed40e2dfaba5baa1c9ef54cf2 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib as mpl
# import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.patheffects as pe
from brightness_temperature import *
from plot_searchlight import get_intensity, font_size, iunits
#plt.rc('text.latex', preamble=r'\usepackage{cmbright}')
#plt.rc('text', usetex=False)
lambda0 = 121.56841096386111 # nm
wavelength = np.array([120.85647513019845, 121.04863120292787, 121.18861407155109,
121.29060823835265, 121.36494181786958, 121.41913498583673,
121.45866338283498, 121.48751396885412, 121.50858975582949,
121.52400450419977, 121.53529729933265, 121.54358879034517,
121.54969495175679, 121.55420991638432, 121.55756628818231,
121.56007905763141, 121.56197757770408, 121.5634288464184,
121.56455445948667, 121.56544295399095, 121.56615879614279,
121.56674892551068, 121.56724752004678, 121.56767946562972,
121.56806288233183, 121.56841096386111, 121.56875904539042,
121.56914246209253, 121.56957440767549, 121.57007300221157,
121.57066313157947, 121.57137897373131, 121.5722674682356,
121.57339308130386, 121.57484435001817, 121.57674287009084,
121.57925563953995, 121.58261201133794, 121.58712697596546,
121.5932331373771, 121.6015246283896, 121.6128174235225,
121.62823217189276, 121.64930795886815, 121.67815854488727,
121.71768694188553, 121.77188010985269, 121.84621368936962,
121.94820785617118, 122.0881907247944, 122.2803467975238]) # nm
center = np.argmin(np.abs(wavelength - lambda0))
blue_wing = center - 11
red_wing = center + 11
continuum = np.argmax(wavelength)
plt.rcParams['text.usetex'] = True
PATH = "./linedata/"
CMAX = 100
CMIN = 0
CMAX_wing = 80
CMIN_wing = 0
CMAP = "gist_gray_r"
CMAP_CONT = "gist_gray_r"
lpad = 8
if __name__ == "__main__":
intensity_half = get_intensity("half_res_ul7n12_disk_centre_1.npy", PATH)
intensity_third = get_intensity("regular_third_disk_centre.npy", PATH)
intensity_quarter = get_intensity("regular_quarter_disk_centre.npy", PATH)
intensity_cont_ext_5e5 = get_intensity("voronoi_5e5_disk_centre.npy", PATH)
intensity_cont_ext_5e5_1dot5 = get_intensity("voronoi_ul7n12_5e5_disk_centre_1dot5.npy", PATH)
intensity_cont_ext_1e6 = get_intensity("voronoi_ul7n12_1e6_disk_centre_1.npy", PATH)
intensity_cont_ext_1e6_1dot5 = get_intensity("voronoi_ul7n12_1e6_disk_centre_1dot5.npy", PATH)
intensity_cont_ext_2e6 = get_intensity("voronoi_ul7n12_2e6_disk_centre_1.npy", PATH)
intensity_cont_ext_3e6 = get_intensity("voronoi_ul7n12_3e6_disk_centre_1.npy", PATH)
intensity_tot_ext_5e5 = get_intensity("total_ext_5e5_disk_centre_1.npy", PATH)
intensity_tot_ext_1e6 = get_intensity("total_ext_1e6_disk_centre_1.npy", PATH)
intensity_tot_ext_2e6 = get_intensity("total_ext_2e6_disk_centre_1.npy", PATH)
intensity_tot_ext_3e6 = get_intensity("total_ext_3e6_disk_centre_1.npy", PATH)
intensity_destruction_5e5 = get_intensity("destruction_5e5_disk_centre_1.npy", PATH)
intensity_destruction_1e6 = get_intensity("destruction_1e6_disk_centre_1.npy", PATH)
intensity_density_5e5 = get_intensity("density_5e5_disk_centre_1.npy", PATH)
intensity_ionised_5e5 = get_intensity("ionised_hydrogen_5e5_disk_centre_1.npy", PATH)
intensity_ionised_1e6 = get_intensity("ionised_hydrogen_1e6_disk_centre_1.npy", PATH)
intensity_ionised_2e6 = get_intensity("ionised_hydrogen_2e6_disk_centre_1.npy", PATH)
intensity_uniform_1e6 = get_intensity("uniform_1e6_disk_centre_1.npy", PATH)
convergence_quarter = np.load(PATH+"regular_ul7n12_quarter.npy")
convergence_half = np.load(PATH+"regular_ul7n12_half.npy")
convergence_third = np.load(PATH+"regular_ul7n12_third.npy")
convergence_cont_5e5 = np.load("./convergence/voronoi_ul7n12_5e5_convergence.npy")
convergence_cont_1e6 = np.load("./convergence/voronoi_ul7n12_1e6_convergence.npy")
convergence_cont_2e6 = np.load("./convergence/voronoi_ul7n12_2e6_convergence.npy")
convergence_cont_3e6 = np.load("./convergence/voronoi_ul7n12_3e6_convergence.npy")
convergence_ionised_5e5 = np.load("./convergence/ionised_hydrogen_5e5_convergence.npy")
convergence_ionised_1e6 = np.load("./convergence/ionised_hydrogen_1e6_convergence.npy")
convergence_ionised_2e6 = np.load("./convergence/ionised_hydrogen_2000000_convergence.npy")
convergence_density_5e5 = np.load("./convergence/density_5e5_convergence.npy")
convergence_destruction_5e5 = np.load("./convergence/destruction_5e5_convergence.npy")
convergence_destruction_1e6 = np.load("./convergence/destruction_1e6_convergence.npy")
convergence_tot_ext_5e5 = np.load("./convergence/total_ext_5e5_convergence.npy")
convergence_tot_ext_1e6 = np.load("./convergence/total_ext_1e6_convergence.npy")
convergence_tot_ext_2e6 = np.load("./convergence/total_ext_2e6_convergence.npy")
convergence_tot_ext_3e6 = np.load("./convergence/total_ext_3e6_convergence.npy")
convergence_uniform_1e6 = np.load("./convergence/uniform_1e6_convergence.npy")
velocity = ((wavelength - lambda0)/lambda0*constants.c).to("km s-1")
print("Velocity at blue wing: %.3f" %(velocity[blue_wing].value))
print("Velocity at continuum: %.3f" %(velocity[continuum].value))
CMAX_continuum = intensity_half[continuum, :, :].max()
CMIN_continuum = intensity_half[continuum, :, :].min()
font_size()
# compare sampling methods
fig, ax = plt.subplots(1, 2, figsize=(7.5,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
ax[0].imshow(intensity_cont_ext_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].axis(False)
ax[0].set_title(r"$\alpha^c~\textrm{sampling}$")
im = ax[1].imshow(intensity_uniform_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
ax[1].set_title(r"$U~\textrm{sampling}$")
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.colorbar(im, fraction=0.043, pad=0.04, label=iunits)
fig.suptitle(r"$\textbf{Disk-centre intensity at line centre, irregular grid}$")
plt.savefig("../img/compare_line/quick_compare.pdf")
plt.close()
################################################################################
################################################################################
################################################################################
# compare sampling methods
fig, ax = plt.subplots(1, 4, figsize=(14.5,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
ax[0].imshow(intensity_cont_ext_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].axis(False)
ax[0].set_title(r"$\alpha^c~\textrm{sampling}$")
ax[1].imshow(intensity_ionised_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
ax[1].set_title(r"$N_\textrm{\small{H\,II}}^\textrm{\small{LTE}}~\textrm{sampling}$")
ax[2].imshow(intensity_tot_ext_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[2].axis(False)
ax[2].set_title(r"$\alpha^\textrm{tot}~\textrm{sampling}$")
im = ax[3].imshow(intensity_destruction_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[3].axis(False)
ax[3].set_title(r"$\varepsilon~\textrm{sampling}$")
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[2].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[3].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[3].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.colorbar(im, fraction=0.043, pad=0.04, label=iunits)
fig.suptitle(r"$\textbf{Disk-centre intensity at line centre, irregular grid}$")
# plt.show()
plt.savefig("../img/compare_line/compare_sites.pdf")
################################################################################
################################################################################
################################################################################
# Plot images over line wing, centre, and continuum, regular grid
fig, ax = plt.subplots(1, 3, figsize=(13,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
im = ax[0].imshow(intensity_half[blue_wing, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX_wing,
vmin=CMIN_wing)
ax[0].axis(False)
wl = wavelength[blue_wing]
ax[0].set_title(r"$\textrm{Blue wing}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[0], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[1].imshow(intensity_half[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
wl = wavelength[center]
ax[1].set_title(r"$\textrm{Line centre}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[1], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[2].imshow(intensity_half[continuum, :, :],
cmap=CMAP_CONT,
origin="lower",
vmax=CMAX_continuum,
vmin=CMIN_continuum)
ax[2].axis(False)
wl = wavelength[continuum]
ax[2].set_title(r"$\textrm{Continuum}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[2], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=lpad)
x = np.load("../data/LTE/x_regular_half.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(20+1/pix2Mm)/2, ymin=7, ymax=9, lw=1/pix2Mm, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm, foreground="black"),pe.Normal()])
# Text:
ax[0].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[1].vlines(x=(20+1/pix2Mm)/2, ymin=7, ymax=9, lw=1/pix2Mm, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm, foreground="black"),pe.Normal()])
# Text:
ax[1].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[2].vlines(x=(20+1/pix2Mm)/2, ymin=7, ymax=9, lw=1/pix2Mm, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm, foreground="black"),pe.Normal()])
# Text:
ax[2].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.suptitle(r"$\textbf{Disk-centre intensity, regular Grid}$")
plt.savefig("../img/compare_line/regular_disk_centre.pdf")
################################################################################
################################################################################
################################################################################
# Plot images over line wing, centre, and continuum, irregular grid
fig, ax = plt.subplots(1, 3, figsize=(13,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
im = ax[0].imshow(intensity_cont_ext_3e6[blue_wing, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX_wing,
vmin=CMIN_wing)
ax[0].axis(False)
wl = wavelength[blue_wing]
ax[0].set_title(r"$\textrm{Blue wing}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[0], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[1].imshow(intensity_cont_ext_3e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
wl = wavelength[center]
ax[1].set_title(r"$\textrm{Line centre}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[1], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[2].imshow(intensity_cont_ext_3e6[continuum, :, :],
cmap=CMAP_CONT,
origin="lower",
vmax=CMAX_continuum,
vmin=CMIN_continuum)
ax[2].axis(False)
wl = wavelength[continuum]
ax[2].set_title(r"$\textrm{Continuum}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[2], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=lpad)
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[1].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[2].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[2].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.suptitle(r"$\textbf{Disk-centre intensity, irregular grid}$")
plt.savefig("../img/compare_line/irregular_disk_centre.pdf")
################################################################################
################################################################################
################################################################################
# compare regular resolutions
fig, ax = plt.subplots(1, 3, figsize=(11.75,4), constrained_layout=True)
ax[0].imshow(intensity_quarter[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].axis(False)
ax[0].set_title(r"$\textrm{Quarter resolution}$")
ax[1].imshow(intensity_third[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
ax[1].set_title(r"$\textrm{One-third resolution}$")
im = ax[2].imshow(intensity_half[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[2].axis(False)
ax[2].set_title(r"$\textrm{Half resolution}$")
# Line:
x = np.load("../data/LTE/x_regular_quarter.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
ax[0].hlines(y=8/2, xmin=10/2, xmax=10/2 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[0].text(9/2, 10/2, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
x = np.load("../data/LTE/x_regular_third.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
ax[1].hlines(y=8*2/3, xmin=10*2/3, xmax=10*2/3 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(9*2/3, 10*2/3, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
x = np.load("../data/LTE/x_regular_half.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
ax[2].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[2].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# fig.suptitle(r"$\textbf{Disk-centre Intensity line centre, Regular Grid}$")
fig.colorbar(im, fraction=0.043, pad=0.04, label=iunits)
plt.savefig("../img/compare_line/regular_resolutions.pdf")
################################################################################
################################################################################
################################################################################
# plot convergence
fig, ax = plt.subplots(1, 3, figsize=(14, 5.5), sharey=True)
ax[0].plot(convergence_quarter, label=r"$\textrm{regular (1/4 res.)}$", color="k", ls="solid")
ax[0].plot(convergence_ionised_5e5, label=r"$N_\textrm{H\,\small{II}}$", color="red", ls="solid")
ax[0].plot(convergence_cont_5e5, label=r"$\alpha^\textrm{cont}$", color="blue", ls="dashed")
ax[0].plot(convergence_tot_ext_5e5, label=r"$\alpha^\textrm{tot}$", color="gold", ls="solid")
ax[0].plot(convergence_density_5e5, label=r"$\rho$", color="gray", ls="dashdot")
ax[0].plot(convergence_destruction_5e5, label=r"$\varepsilon$", color="cyan", ls="solid")
ax[1].plot(convergence_third, label=r"$\textrm{regular (1/3 res.)}$", color="k", ls="solid")
ax[1].plot(convergence_destruction_1e6, label=r"$\varepsilon$", color="cyan", ls="solid")
ax[1].plot(convergence_cont_1e6, label=r"$\alpha^\textrm{cont}$", color="blue", ls="dashed")
ax[1].plot(convergence_ionised_1e6, label=r"$N_\textrm{H\,\small{II}}$", color="red", ls="solid")
ax[1].plot(convergence_uniform_1e6, label=r"$U~\textrm{(uniform)}$", color="pink", ls="solid")
ax[2].plot(convergence_half, label=r"$\textrm{regular (1/2 res.)}$", color="k", ls="solid")
ax[2].plot(convergence_cont_3e6, label=r"$\alpha^\textrm{cont}$", color="blue", ls="dashed")
# ax.plot(convergence_cont_2e6, label=r"$\alpha^\textrm{cont}~2\cdot 10^6~\textrm{sites}$", color="b", ls="dashdot")
# ax.plot(convergence_tot_ext_2e6, label=r"$\alpha^\textrm{tot}~2\cdot 10^6~\textrm{sites}$", color="g", ls="dashdot")
# ax.plot(convergence_tot_ext_1e6, label=r"$\alpha^\textrm{tot}~1\cdot 10^6~\textrm{sites}$", color="g", ls="dashed")
ax[0].set_ylabel(r"$\textrm{Max rel. change,}~\max\left(1 - S_\textrm{new}/S_\textrm{old}\right)$")
ax[0].set_yscale("log")
ax[0].legend()
ax[1].legend()
ax[2].legend()
ax[0].set_xlabel(r"$\textrm{Iterations}$")
ax[1].set_xlabel(r"$\textrm{Iterations}$")
ax[2].set_xlabel(r"$\textrm{Iterations}$")
ax[0].set_title(r"$\sim 5\cdot 10^5~\textrm{points}$")
ax[1].set_title(r"$\sim 10^6~\textrm{points}$")
ax[2].set_title(r"$\sim 3\cdot10^6~\textrm{points}$")
#ax.set_title(r"$\textrm{Convergence}$")
fig.tight_layout()
plt.savefig("../img/compare_line/convergence.pdf")
################################################################################
################################################################################
################################################################################
# resolution irregular grid
fig, ax = plt.subplots(1, 3, figsize=(11.75,4), constrained_layout=True)
ax[0].imshow(intensity_cont_ext_5e5[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].set_title(r"$5\cdot 10^5~\textrm{sites}$")
ax[0].axis(False)
ax[1].imshow(intensity_cont_ext_2e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].set_title(r"$2 \cdot 10^6~\textrm{sites}$")
ax[1].axis(False)
im = ax[2].imshow(intensity_cont_ext_3e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[2].set_title(r"$3 \cdot 10^6~\textrm{sites}$")
ax[2].axis(False)
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[2].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.colorbar(im, fraction=0.046, pad=0.04, label=iunits)
# fig.suptitle(r"$\textbf{Disk-Centre~Intensity~\textit{I}}_{\lambda_0}$")
# plt.show()
plt.savefig("../img/compare_line/disk_centre_irregular_resolution.pdf")
################################################################################
################################################################################
################################################################################
# plot all lines to highlight differences
fig, ax = plt.subplots(1, 2, figsize=(10, 5.5), constrained_layout=True, sharey=True)
I_regular = intensity_half.reshape(len(wavelength), -1)
I_regular *= units.kW*units.m**(-2)*units.nm**(-1)
I_irregular = intensity_cont_ext_3e6.reshape(len(wavelength), -1)
I_irregular *= units.kW*units.m**(-2)*units.nm**(-1)
Tb_regular = T_b(wavelength[:, np.newaxis]*units.nm, I_regular)
Tb_irregular = T_b(wavelength[:, np.newaxis]*units.nm, I_irregular)
ax[0].plot(wavelength[center-17:center+18],
Tb_regular[center-17:center+18, ::4].value,
color='k',
lw=0.03,
alpha=0.5,
rasterized=True)
ax[0].plot(wavelength[center-17:center+18],
np.mean(Tb_regular[center-17:center+18], axis=1).value,
color="crimson", label=r"$\textrm{spatial average}$")
ax[0].axvline(lambda0, ls="dashed", color="royalblue", lw=0.75)
ax[0].axvline(wavelength[blue_wing], ls="dashed", color="deepskyblue", lw=0.75)
ax[0].set_xlabel(r"$\textrm{Wavelength [nm]}$")
ax[0].set_ylabel(r"$\textrm{Brightness temperature [K]}$")
ax[0].set_title(r"$\textrm{Regular grid}$")
ax[0].legend(loc="upper right")
ax[0].text(x=lambda0+0.001, y=6150, s=r"$\lambda_0$", color="royalblue")
ax[0].text(x=wavelength[blue_wing]-0.006, y=6150,
s=r"$\textrm{wing}$", color="deepskyblue", rotation="vertical")
# ax[0].set_xticks(list(ax[0].get_xticks()) + [lambda0])
# ax[0].set_xticklabels([r"$%.2f$" %x for x in list(ax[0].get_xticks())[:-1]] + [r"$\lambda_0$"])
ax[1].plot(wavelength[center-17:center+18],
Tb_irregular[center-17:center+18, ::16].value,
color='k',
lw=0.03,
alpha=0.5,
rasterized=True)
ax[1].plot(wavelength[center-17:center+18],
np.mean(Tb_irregular[center-17:center+18], axis=1).value,
color="crimson", label=r"$\textrm{spatial average}$")
ax[1].axvline(lambda0, ls="dashed", color="royalblue", lw=0.75)
ax[1].axvline(wavelength[blue_wing], ls="dashed", color="deepskyblue", lw=0.75)
ax[1].set_xlabel(r"$\textrm{Wavelength [nm]}$")
ax[1].set_ylim(6000,12000)
ax[1].set_title(r"$\textrm{Irregular grid}$")
ax[1].legend(loc="upper right")
ax[1].text(x=lambda0+0.001, y=6150, s=r"$\lambda_0$", color="royalblue")
ax[1].text(x=wavelength[blue_wing]-0.006, y=6150,
s=r"$\textrm{wing}$", color="deepskyblue", rotation="vertical")
# ax[1].set_xticklabels([r"$%.2f$" %x for x in list(ax[0].get_xticks())[:-1]] + [r"$\lambda_0$"])
# ax[1].set_xticks(list(ax[1].get_xticks()) + [lambda0])
# fig.suptitle(r"$\textrm{Disk-Centre Intensity}$")
plt.savefig("../img/compare_line/lines.pdf", dpi=300)
| 44.296923 | 122 | 0.576216 | import numpy as np
import matplotlib as mpl
# import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.patheffects as pe
from brightness_temperature import *
from plot_searchlight import get_intensity, font_size, iunits
#plt.rc('text.latex', preamble=r'\usepackage{cmbright}')
#plt.rc('text', usetex=False)
lambda0 = 121.56841096386111 # nm
wavelength = np.array([120.85647513019845, 121.04863120292787, 121.18861407155109,
121.29060823835265, 121.36494181786958, 121.41913498583673,
121.45866338283498, 121.48751396885412, 121.50858975582949,
121.52400450419977, 121.53529729933265, 121.54358879034517,
121.54969495175679, 121.55420991638432, 121.55756628818231,
121.56007905763141, 121.56197757770408, 121.5634288464184,
121.56455445948667, 121.56544295399095, 121.56615879614279,
121.56674892551068, 121.56724752004678, 121.56767946562972,
121.56806288233183, 121.56841096386111, 121.56875904539042,
121.56914246209253, 121.56957440767549, 121.57007300221157,
121.57066313157947, 121.57137897373131, 121.5722674682356,
121.57339308130386, 121.57484435001817, 121.57674287009084,
121.57925563953995, 121.58261201133794, 121.58712697596546,
121.5932331373771, 121.6015246283896, 121.6128174235225,
121.62823217189276, 121.64930795886815, 121.67815854488727,
121.71768694188553, 121.77188010985269, 121.84621368936962,
121.94820785617118, 122.0881907247944, 122.2803467975238]) # nm
center = np.argmin(np.abs(wavelength - lambda0))
blue_wing = center - 11
red_wing = center + 11
continuum = np.argmax(wavelength)
plt.rcParams['text.usetex'] = True
PATH = "./linedata/"
CMAX = 100
CMIN = 0
CMAX_wing = 80
CMIN_wing = 0
CMAP = "gist_gray_r"
CMAP_CONT = "gist_gray_r"
lpad = 8
if __name__ == "__main__":
intensity_half = get_intensity("half_res_ul7n12_disk_centre_1.npy", PATH)
intensity_third = get_intensity("regular_third_disk_centre.npy", PATH)
intensity_quarter = get_intensity("regular_quarter_disk_centre.npy", PATH)
intensity_cont_ext_5e5 = get_intensity("voronoi_5e5_disk_centre.npy", PATH)
intensity_cont_ext_5e5_1dot5 = get_intensity("voronoi_ul7n12_5e5_disk_centre_1dot5.npy", PATH)
intensity_cont_ext_1e6 = get_intensity("voronoi_ul7n12_1e6_disk_centre_1.npy", PATH)
intensity_cont_ext_1e6_1dot5 = get_intensity("voronoi_ul7n12_1e6_disk_centre_1dot5.npy", PATH)
intensity_cont_ext_2e6 = get_intensity("voronoi_ul7n12_2e6_disk_centre_1.npy", PATH)
intensity_cont_ext_3e6 = get_intensity("voronoi_ul7n12_3e6_disk_centre_1.npy", PATH)
intensity_tot_ext_5e5 = get_intensity("total_ext_5e5_disk_centre_1.npy", PATH)
intensity_tot_ext_1e6 = get_intensity("total_ext_1e6_disk_centre_1.npy", PATH)
intensity_tot_ext_2e6 = get_intensity("total_ext_2e6_disk_centre_1.npy", PATH)
intensity_tot_ext_3e6 = get_intensity("total_ext_3e6_disk_centre_1.npy", PATH)
intensity_destruction_5e5 = get_intensity("destruction_5e5_disk_centre_1.npy", PATH)
intensity_destruction_1e6 = get_intensity("destruction_1e6_disk_centre_1.npy", PATH)
intensity_density_5e5 = get_intensity("density_5e5_disk_centre_1.npy", PATH)
intensity_ionised_5e5 = get_intensity("ionised_hydrogen_5e5_disk_centre_1.npy", PATH)
intensity_ionised_1e6 = get_intensity("ionised_hydrogen_1e6_disk_centre_1.npy", PATH)
intensity_ionised_2e6 = get_intensity("ionised_hydrogen_2e6_disk_centre_1.npy", PATH)
intensity_uniform_1e6 = get_intensity("uniform_1e6_disk_centre_1.npy", PATH)
convergence_quarter = np.load(PATH+"regular_ul7n12_quarter.npy")
convergence_half = np.load(PATH+"regular_ul7n12_half.npy")
convergence_third = np.load(PATH+"regular_ul7n12_third.npy")
convergence_cont_5e5 = np.load("./convergence/voronoi_ul7n12_5e5_convergence.npy")
convergence_cont_1e6 = np.load("./convergence/voronoi_ul7n12_1e6_convergence.npy")
convergence_cont_2e6 = np.load("./convergence/voronoi_ul7n12_2e6_convergence.npy")
convergence_cont_3e6 = np.load("./convergence/voronoi_ul7n12_3e6_convergence.npy")
convergence_ionised_5e5 = np.load("./convergence/ionised_hydrogen_5e5_convergence.npy")
convergence_ionised_1e6 = np.load("./convergence/ionised_hydrogen_1e6_convergence.npy")
convergence_ionised_2e6 = np.load("./convergence/ionised_hydrogen_2000000_convergence.npy")
convergence_density_5e5 = np.load("./convergence/density_5e5_convergence.npy")
convergence_destruction_5e5 = np.load("./convergence/destruction_5e5_convergence.npy")
convergence_destruction_1e6 = np.load("./convergence/destruction_1e6_convergence.npy")
convergence_tot_ext_5e5 = np.load("./convergence/total_ext_5e5_convergence.npy")
convergence_tot_ext_1e6 = np.load("./convergence/total_ext_1e6_convergence.npy")
convergence_tot_ext_2e6 = np.load("./convergence/total_ext_2e6_convergence.npy")
convergence_tot_ext_3e6 = np.load("./convergence/total_ext_3e6_convergence.npy")
convergence_uniform_1e6 = np.load("./convergence/uniform_1e6_convergence.npy")
velocity = ((wavelength - lambda0)/lambda0*constants.c).to("km s-1")
print("Velocity at blue wing: %.3f" %(velocity[blue_wing].value))
print("Velocity at continuum: %.3f" %(velocity[continuum].value))
CMAX_continuum = intensity_half[continuum, :, :].max()
CMIN_continuum = intensity_half[continuum, :, :].min()
font_size()
# compare sampling methods
fig, ax = plt.subplots(1, 2, figsize=(7.5,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
ax[0].imshow(intensity_cont_ext_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].axis(False)
ax[0].set_title(r"$\alpha^c~\textrm{sampling}$")
im = ax[1].imshow(intensity_uniform_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
ax[1].set_title(r"$U~\textrm{sampling}$")
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.colorbar(im, fraction=0.043, pad=0.04, label=iunits)
fig.suptitle(r"$\textbf{Disk-centre intensity at line centre, irregular grid}$")
plt.savefig("../img/compare_line/quick_compare.pdf")
plt.close()
################################################################################
################################################################################
################################################################################
# compare sampling methods
fig, ax = plt.subplots(1, 4, figsize=(14.5,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
ax[0].imshow(intensity_cont_ext_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].axis(False)
ax[0].set_title(r"$\alpha^c~\textrm{sampling}$")
ax[1].imshow(intensity_ionised_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
ax[1].set_title(r"$N_\textrm{\small{H\,II}}^\textrm{\small{LTE}}~\textrm{sampling}$")
ax[2].imshow(intensity_tot_ext_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[2].axis(False)
ax[2].set_title(r"$\alpha^\textrm{tot}~\textrm{sampling}$")
im = ax[3].imshow(intensity_destruction_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[3].axis(False)
ax[3].set_title(r"$\varepsilon~\textrm{sampling}$")
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[2].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[3].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[3].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.colorbar(im, fraction=0.043, pad=0.04, label=iunits)
fig.suptitle(r"$\textbf{Disk-centre intensity at line centre, irregular grid}$")
# plt.show()
plt.savefig("../img/compare_line/compare_sites.pdf")
################################################################################
################################################################################
################################################################################
# Plot images over line wing, centre, and continuum, regular grid
fig, ax = plt.subplots(1, 3, figsize=(13,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
im = ax[0].imshow(intensity_half[blue_wing, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX_wing,
vmin=CMIN_wing)
ax[0].axis(False)
wl = wavelength[blue_wing]
ax[0].set_title(r"$\textrm{Blue wing}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[0], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[1].imshow(intensity_half[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
wl = wavelength[center]
ax[1].set_title(r"$\textrm{Line centre}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[1], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[2].imshow(intensity_half[continuum, :, :],
cmap=CMAP_CONT,
origin="lower",
vmax=CMAX_continuum,
vmin=CMIN_continuum)
ax[2].axis(False)
wl = wavelength[continuum]
ax[2].set_title(r"$\textrm{Continuum}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[2], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=lpad)
x = np.load("../data/LTE/x_regular_half.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(20+1/pix2Mm)/2, ymin=7, ymax=9, lw=1/pix2Mm, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm, foreground="black"),pe.Normal()])
# Text:
ax[0].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[1].vlines(x=(20+1/pix2Mm)/2, ymin=7, ymax=9, lw=1/pix2Mm, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm, foreground="black"),pe.Normal()])
# Text:
ax[1].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[2].vlines(x=(20+1/pix2Mm)/2, ymin=7, ymax=9, lw=1/pix2Mm, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm, foreground="black"),pe.Normal()])
# Text:
ax[2].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.suptitle(r"$\textbf{Disk-centre intensity, regular Grid}$")
plt.savefig("../img/compare_line/regular_disk_centre.pdf")
################################################################################
################################################################################
################################################################################
# Plot images over line wing, centre, and continuum, irregular grid
fig, ax = plt.subplots(1, 3, figsize=(13,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
im = ax[0].imshow(intensity_cont_ext_3e6[blue_wing, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX_wing,
vmin=CMIN_wing)
ax[0].axis(False)
wl = wavelength[blue_wing]
ax[0].set_title(r"$\textrm{Blue wing}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[0], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[1].imshow(intensity_cont_ext_3e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
wl = wavelength[center]
ax[1].set_title(r"$\textrm{Line centre}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[1], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[2].imshow(intensity_cont_ext_3e6[continuum, :, :],
cmap=CMAP_CONT,
origin="lower",
vmax=CMAX_continuum,
vmin=CMIN_continuum)
ax[2].axis(False)
wl = wavelength[continuum]
ax[2].set_title(r"$\textrm{Continuum}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[2], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=lpad)
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[1].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[2].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[2].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.suptitle(r"$\textbf{Disk-centre intensity, irregular grid}$")
plt.savefig("../img/compare_line/irregular_disk_centre.pdf")
################################################################################
################################################################################
################################################################################
# compare regular resolutions
fig, ax = plt.subplots(1, 3, figsize=(11.75,4), constrained_layout=True)
ax[0].imshow(intensity_quarter[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].axis(False)
ax[0].set_title(r"$\textrm{Quarter resolution}$")
ax[1].imshow(intensity_third[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
ax[1].set_title(r"$\textrm{One-third resolution}$")
im = ax[2].imshow(intensity_half[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[2].axis(False)
ax[2].set_title(r"$\textrm{Half resolution}$")
# Line:
x = np.load("../data/LTE/x_regular_quarter.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
ax[0].hlines(y=8/2, xmin=10/2, xmax=10/2 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[0].text(9/2, 10/2, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
x = np.load("../data/LTE/x_regular_third.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
ax[1].hlines(y=8*2/3, xmin=10*2/3, xmax=10*2/3 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(9*2/3, 10*2/3, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
x = np.load("../data/LTE/x_regular_half.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
ax[2].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[2].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# fig.suptitle(r"$\textbf{Disk-centre Intensity line centre, Regular Grid}$")
fig.colorbar(im, fraction=0.043, pad=0.04, label=iunits)
plt.savefig("../img/compare_line/regular_resolutions.pdf")
################################################################################
################################################################################
################################################################################
# plot convergence
fig, ax = plt.subplots(1, 3, figsize=(14, 5.5), sharey=True)
ax[0].plot(convergence_quarter, label=r"$\textrm{regular (1/4 res.)}$", color="k", ls="solid")
ax[0].plot(convergence_ionised_5e5, label=r"$N_\textrm{H\,\small{II}}$", color="red", ls="solid")
ax[0].plot(convergence_cont_5e5, label=r"$\alpha^\textrm{cont}$", color="blue", ls="dashed")
ax[0].plot(convergence_tot_ext_5e5, label=r"$\alpha^\textrm{tot}$", color="gold", ls="solid")
ax[0].plot(convergence_density_5e5, label=r"$\rho$", color="gray", ls="dashdot")
ax[0].plot(convergence_destruction_5e5, label=r"$\varepsilon$", color="cyan", ls="solid")
ax[1].plot(convergence_third, label=r"$\textrm{regular (1/3 res.)}$", color="k", ls="solid")
ax[1].plot(convergence_destruction_1e6, label=r"$\varepsilon$", color="cyan", ls="solid")
ax[1].plot(convergence_cont_1e6, label=r"$\alpha^\textrm{cont}$", color="blue", ls="dashed")
ax[1].plot(convergence_ionised_1e6, label=r"$N_\textrm{H\,\small{II}}$", color="red", ls="solid")
ax[1].plot(convergence_uniform_1e6, label=r"$U~\textrm{(uniform)}$", color="pink", ls="solid")
ax[2].plot(convergence_half, label=r"$\textrm{regular (1/2 res.)}$", color="k", ls="solid")
ax[2].plot(convergence_cont_3e6, label=r"$\alpha^\textrm{cont}$", color="blue", ls="dashed")
# ax.plot(convergence_cont_2e6, label=r"$\alpha^\textrm{cont}~2\cdot 10^6~\textrm{sites}$", color="b", ls="dashdot")
# ax.plot(convergence_tot_ext_2e6, label=r"$\alpha^\textrm{tot}~2\cdot 10^6~\textrm{sites}$", color="g", ls="dashdot")
# ax.plot(convergence_tot_ext_1e6, label=r"$\alpha^\textrm{tot}~1\cdot 10^6~\textrm{sites}$", color="g", ls="dashed")
ax[0].set_ylabel(r"$\textrm{Max rel. change,}~\max\left(1 - S_\textrm{new}/S_\textrm{old}\right)$")
ax[0].set_yscale("log")
ax[0].legend()
ax[1].legend()
ax[2].legend()
ax[0].set_xlabel(r"$\textrm{Iterations}$")
ax[1].set_xlabel(r"$\textrm{Iterations}$")
ax[2].set_xlabel(r"$\textrm{Iterations}$")
ax[0].set_title(r"$\sim 5\cdot 10^5~\textrm{points}$")
ax[1].set_title(r"$\sim 10^6~\textrm{points}$")
ax[2].set_title(r"$\sim 3\cdot10^6~\textrm{points}$")
#ax.set_title(r"$\textrm{Convergence}$")
fig.tight_layout()
plt.savefig("../img/compare_line/convergence.pdf")
################################################################################
################################################################################
################################################################################
# resolution irregular grid
fig, ax = plt.subplots(1, 3, figsize=(11.75,4), constrained_layout=True)
ax[0].imshow(intensity_cont_ext_5e5[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].set_title(r"$5\cdot 10^5~\textrm{sites}$")
ax[0].axis(False)
ax[1].imshow(intensity_cont_ext_2e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].set_title(r"$2 \cdot 10^6~\textrm{sites}$")
ax[1].axis(False)
im = ax[2].imshow(intensity_cont_ext_3e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[2].set_title(r"$3 \cdot 10^6~\textrm{sites}$")
ax[2].axis(False)
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[2].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.colorbar(im, fraction=0.046, pad=0.04, label=iunits)
# fig.suptitle(r"$\textbf{Disk-Centre~Intensity~\textit{I}}_{\lambda_0}$")
# plt.show()
plt.savefig("../img/compare_line/disk_centre_irregular_resolution.pdf")
################################################################################
################################################################################
################################################################################
# plot all lines to highlight differences
fig, ax = plt.subplots(1, 2, figsize=(10, 5.5), constrained_layout=True, sharey=True)
I_regular = intensity_half.reshape(len(wavelength), -1)
I_regular *= units.kW*units.m**(-2)*units.nm**(-1)
I_irregular = intensity_cont_ext_3e6.reshape(len(wavelength), -1)
I_irregular *= units.kW*units.m**(-2)*units.nm**(-1)
Tb_regular = T_b(wavelength[:, np.newaxis]*units.nm, I_regular)
Tb_irregular = T_b(wavelength[:, np.newaxis]*units.nm, I_irregular)
ax[0].plot(wavelength[center-17:center+18],
Tb_regular[center-17:center+18, ::4].value,
color='k',
lw=0.03,
alpha=0.5,
rasterized=True)
ax[0].plot(wavelength[center-17:center+18],
np.mean(Tb_regular[center-17:center+18], axis=1).value,
color="crimson", label=r"$\textrm{spatial average}$")
ax[0].axvline(lambda0, ls="dashed", color="royalblue", lw=0.75)
ax[0].axvline(wavelength[blue_wing], ls="dashed", color="deepskyblue", lw=0.75)
ax[0].set_xlabel(r"$\textrm{Wavelength [nm]}$")
ax[0].set_ylabel(r"$\textrm{Brightness temperature [K]}$")
ax[0].set_title(r"$\textrm{Regular grid}$")
ax[0].legend(loc="upper right")
ax[0].text(x=lambda0+0.001, y=6150, s=r"$\lambda_0$", color="royalblue")
ax[0].text(x=wavelength[blue_wing]-0.006, y=6150,
s=r"$\textrm{wing}$", color="deepskyblue", rotation="vertical")
# ax[0].set_xticks(list(ax[0].get_xticks()) + [lambda0])
# ax[0].set_xticklabels([r"$%.2f$" %x for x in list(ax[0].get_xticks())[:-1]] + [r"$\lambda_0$"])
ax[1].plot(wavelength[center-17:center+18],
Tb_irregular[center-17:center+18, ::16].value,
color='k',
lw=0.03,
alpha=0.5,
rasterized=True)
ax[1].plot(wavelength[center-17:center+18],
np.mean(Tb_irregular[center-17:center+18], axis=1).value,
color="crimson", label=r"$\textrm{spatial average}$")
ax[1].axvline(lambda0, ls="dashed", color="royalblue", lw=0.75)
ax[1].axvline(wavelength[blue_wing], ls="dashed", color="deepskyblue", lw=0.75)
ax[1].set_xlabel(r"$\textrm{Wavelength [nm]}$")
ax[1].set_ylim(6000,12000)
ax[1].set_title(r"$\textrm{Irregular grid}$")
ax[1].legend(loc="upper right")
ax[1].text(x=lambda0+0.001, y=6150, s=r"$\lambda_0$", color="royalblue")
ax[1].text(x=wavelength[blue_wing]-0.006, y=6150,
s=r"$\textrm{wing}$", color="deepskyblue", rotation="vertical")
# ax[1].set_xticklabels([r"$%.2f$" %x for x in list(ax[0].get_xticks())[:-1]] + [r"$\lambda_0$"])
# ax[1].set_xticks(list(ax[1].get_xticks()) + [lambda0])
# fig.suptitle(r"$\textrm{Disk-Centre Intensity}$")
plt.savefig("../img/compare_line/lines.pdf", dpi=300)
| 0 | 0 | 0 |
d3c30d1b644c751ac6a4f91eab9ed0dd6ae26c2f | 732 | py | Python | Chapter 04/Gradient.py | bpbpublications/Neural-Network-for-Beginners | aa04574e2990920e5e0a65ed5af674adc61388c1 | [
"MIT"
] | 2 | 2021-11-24T02:51:58.000Z | 2022-02-03T12:50:15.000Z | Chapter 04/Gradient.py | bpbpublications/Neural-Network-for-Beginners | aa04574e2990920e5e0a65ed5af674adc61388c1 | [
"MIT"
] | null | null | null | Chapter 04/Gradient.py | bpbpublications/Neural-Network-for-Beginners | aa04574e2990920e5e0a65ed5af674adc61388c1 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pylab as plt
from Gradient_2D import numerical_gradient
init_x = np.array([-4.0, 5.0])
lr = 0.1
step_num = 30
x, x_history = gradient_descent(function, init_x, lr=lr, step_num=step_num)
plt.plot( [-6, 6], [0,0], '--b')
plt.plot( [0,0], [-6, 6], '--b')
plt.plot(x_history[:,0], x_history[:,1], 'o')
plt.xlim(-4.5, 4.5)
plt.ylim(-5.5, 5.5)
plt.xlabel("X0")
plt.ylabel("X1")
plt.show()
| 19.263158 | 75 | 0.613388 | import numpy as np
import matplotlib.pylab as plt
from Gradient_2D import numerical_gradient
def gradient_descent(f, init_x, lr=0.01, step_num=100):
x = init_x
x_history = []
for i in range(step_num):
x_history.append( x.copy() )
grad = numerical_gradient(f, x)
x -= lr * grad
return x, np.array(x_history)
def function(x):
return x[0]**2 + x[1]**2
init_x = np.array([-4.0, 5.0])
lr = 0.1
step_num = 30
x, x_history = gradient_descent(function, init_x, lr=lr, step_num=step_num)
plt.plot( [-6, 6], [0,0], '--b')
plt.plot( [0,0], [-6, 6], '--b')
plt.plot(x_history[:,0], x_history[:,1], 'o')
plt.xlim(-4.5, 4.5)
plt.ylim(-5.5, 5.5)
plt.xlabel("X0")
plt.ylabel("X1")
plt.show()
| 259 | 0 | 46 |
e646cda38f01dba017b607a2f6a89de3e52ca797 | 835 | py | Python | ball_finder.py | daniyalmaroufi/ball_tracker | cefe2ae5a44efd59b51e769df92891510e788277 | [
"Apache-2.0"
] | null | null | null | ball_finder.py | daniyalmaroufi/ball_tracker | cefe2ae5a44efd59b51e769df92891510e788277 | [
"Apache-2.0"
] | null | null | null | ball_finder.py | daniyalmaroufi/ball_tracker | cefe2ae5a44efd59b51e769df92891510e788277 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
cap=cv2.VideoCapture(0)
max_radius=0
max_center=(0,0)
lower=np.array([7,137,132])
upper=np.array([25,255,255])
while True:
ret, frame = cap.read()
if frame is None:
break
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
out=cv2.inRange(hsv,lower,upper)
erosion=cv2.erode(out,None,iterations=1)
dilate=cv2.dilate(erosion,None,iterations=2)
cnts,_=cv2.findContours(dilate,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
(x, y),r=cv2.minEnclosingCircle(c)
center=(int(x),int(y))
r=int(r)
if r>max_radius:
max_radius=r
max_center=center
cv2.circle(frame,center,r,(0,255,0),2)
cv2.imshow("image",frame)
if cv2.waitKey(30)==ord('q'):
break
cv2.destroyAllWindows()
cap.release()
| 18.977273 | 77 | 0.640719 | import cv2
import numpy as np
cap=cv2.VideoCapture(0)
max_radius=0
max_center=(0,0)
lower=np.array([7,137,132])
upper=np.array([25,255,255])
while True:
ret, frame = cap.read()
if frame is None:
break
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
out=cv2.inRange(hsv,lower,upper)
erosion=cv2.erode(out,None,iterations=1)
dilate=cv2.dilate(erosion,None,iterations=2)
cnts,_=cv2.findContours(dilate,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
(x, y),r=cv2.minEnclosingCircle(c)
center=(int(x),int(y))
r=int(r)
if r>max_radius:
max_radius=r
max_center=center
cv2.circle(frame,center,r,(0,255,0),2)
cv2.imshow("image",frame)
if cv2.waitKey(30)==ord('q'):
break
cv2.destroyAllWindows()
cap.release()
| 0 | 0 | 0 |
482cc4f47d8b8d0dcca00547e7c10c5371ad6158 | 5,760 | py | Python | mycroft/enclosure/display_manager.py | assistent-cat/mycroft-core | 6f8bae6ba136c9dd66ca47aaadd75e214d006190 | [
"Apache-2.0"
] | 6,099 | 2016-05-17T19:41:56.000Z | 2022-03-31T15:34:48.000Z | mycroft/enclosure/display_manager.py | assistent-cat/mycroft-core | 6f8bae6ba136c9dd66ca47aaadd75e214d006190 | [
"Apache-2.0"
] | 2,567 | 2016-05-20T16:23:11.000Z | 2022-03-23T01:54:39.000Z | mycroft/enclosure/display_manager.py | assistent-cat/mycroft-core | 6f8bae6ba136c9dd66ca47aaadd75e214d006190 | [
"Apache-2.0"
] | 1,563 | 2016-05-20T15:06:21.000Z | 2022-03-30T01:28:12.000Z | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" DisplayManager
This module provides basic "state" for the visual representation associated
with this Mycroft instance. The current states are:
ActiveSkill - The skill that last interacted with the display via the
Enclosure API.
Currently, a wakeword sets the ActiveSkill to "wakeword", which will auto
clear after 10 seconds.
A skill is set to Active when it matches an intent, outputs audio, or
changes the display via the EnclosureAPI()
A skill is automatically cleared from Active two seconds after audio
output is spoken, or 2 seconds after resetting the display.
So it is common to have '' as the active skill.
"""
import json
from threading import Thread, Timer
import os
from mycroft.messagebus.client import MessageBusClient
from mycroft.util import get_ipc_directory
from mycroft.util.log import LOG
def _write_data(dictionary):
""" Writes the dictionary of state data to the IPC directory.
Args:
dictionary (dict): information to place in the 'disp_info' file
"""
managerIPCDir = os.path.join(get_ipc_directory(), "managers")
# change read/write permissions based on if file exists or not
path = os.path.join(managerIPCDir, "disp_info")
permission = "r+" if os.path.isfile(path) else "w+"
if permission == "w+" and os.path.isdir(managerIPCDir) is False:
os.makedirs(managerIPCDir)
os.chmod(managerIPCDir, 0o777)
try:
with open(path, permission) as dispFile:
# check if file is empty
if os.stat(str(dispFile.name)).st_size != 0:
data = json.load(dispFile)
else:
data = {}
LOG.info("Display Manager is creating " + dispFile.name)
for key in dictionary:
data[key] = dictionary[key]
dispFile.seek(0)
dispFile.write(json.dumps(data))
dispFile.truncate()
os.chmod(path, 0o777)
except Exception as e:
LOG.error(e)
LOG.error("Error found in display manager file, deleting...")
os.remove(path)
_write_data(dictionary)
def _read_data():
""" Writes the dictionary of state data from the IPC directory.
Returns:
dict: loaded state information
"""
managerIPCDir = os.path.join(get_ipc_directory(), "managers")
path = os.path.join(managerIPCDir, "disp_info")
permission = "r" if os.path.isfile(path) else "w+"
if permission == "w+" and os.path.isdir(managerIPCDir) is False:
os.makedirs(managerIPCDir)
data = {}
try:
with open(path, permission) as dispFile:
if os.stat(str(dispFile.name)).st_size != 0:
data = json.load(dispFile)
except Exception as e:
LOG.error(e)
os.remove(path)
_read_data()
return data
class DisplayManager:
""" The Display manager handles the basic state of the display,
be it a mark-1 or a mark-2 or even a future Mark-3.
"""
def set_active(self, skill_name=None):
""" Sets skill name as active in the display Manager
Args:
string: skill_name
"""
name = skill_name if skill_name is not None else self.name
_write_data({"active_skill": name})
def get_active(self):
""" Get the currenlty active skill from the display manager
Returns:
string: The active skill's name
"""
data = _read_data()
active_skill = ""
if "active_skill" in data:
active_skill = data["active_skill"]
return active_skill
def remove_active(self):
""" Clears the active skill """
LOG.debug("Removing active skill...")
_write_data({"active_skill": ""})
def init_display_manager_bus_connection():
""" Connects the display manager to the messagebus """
LOG.info("Connecting display manager to messagebus")
# Should remove needs to be an object so it can be referenced in functions
# [https://stackoverflow.com/questions/986006/how-do-i-pass-a-variable-by-reference]
display_manager = DisplayManager()
should_remove = [True]
bus = MessageBusClient()
bus.on('recognizer_loop:audio_output_end', set_delay)
bus.on('recognizer_loop:audio_output_start', set_remove_flag)
bus.on('recognizer_loop:record_begin', set_wakeword_skill)
event_thread = Thread(target=connect)
event_thread.setDaemon(True)
event_thread.start()
| 30.315789 | 88 | 0.66059 | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" DisplayManager
This module provides basic "state" for the visual representation associated
with this Mycroft instance. The current states are:
ActiveSkill - The skill that last interacted with the display via the
Enclosure API.
Currently, a wakeword sets the ActiveSkill to "wakeword", which will auto
clear after 10 seconds.
A skill is set to Active when it matches an intent, outputs audio, or
changes the display via the EnclosureAPI()
A skill is automatically cleared from Active two seconds after audio
output is spoken, or 2 seconds after resetting the display.
So it is common to have '' as the active skill.
"""
import json
from threading import Thread, Timer
import os
from mycroft.messagebus.client import MessageBusClient
from mycroft.util import get_ipc_directory
from mycroft.util.log import LOG
def _write_data(dictionary):
""" Writes the dictionary of state data to the IPC directory.
Args:
dictionary (dict): information to place in the 'disp_info' file
"""
managerIPCDir = os.path.join(get_ipc_directory(), "managers")
# change read/write permissions based on if file exists or not
path = os.path.join(managerIPCDir, "disp_info")
permission = "r+" if os.path.isfile(path) else "w+"
if permission == "w+" and os.path.isdir(managerIPCDir) is False:
os.makedirs(managerIPCDir)
os.chmod(managerIPCDir, 0o777)
try:
with open(path, permission) as dispFile:
# check if file is empty
if os.stat(str(dispFile.name)).st_size != 0:
data = json.load(dispFile)
else:
data = {}
LOG.info("Display Manager is creating " + dispFile.name)
for key in dictionary:
data[key] = dictionary[key]
dispFile.seek(0)
dispFile.write(json.dumps(data))
dispFile.truncate()
os.chmod(path, 0o777)
except Exception as e:
LOG.error(e)
LOG.error("Error found in display manager file, deleting...")
os.remove(path)
_write_data(dictionary)
def _read_data():
""" Writes the dictionary of state data from the IPC directory.
Returns:
dict: loaded state information
"""
managerIPCDir = os.path.join(get_ipc_directory(), "managers")
path = os.path.join(managerIPCDir, "disp_info")
permission = "r" if os.path.isfile(path) else "w+"
if permission == "w+" and os.path.isdir(managerIPCDir) is False:
os.makedirs(managerIPCDir)
data = {}
try:
with open(path, permission) as dispFile:
if os.stat(str(dispFile.name)).st_size != 0:
data = json.load(dispFile)
except Exception as e:
LOG.error(e)
os.remove(path)
_read_data()
return data
class DisplayManager:
""" The Display manager handles the basic state of the display,
be it a mark-1 or a mark-2 or even a future Mark-3.
"""
def __init__(self, name=None):
self.name = name or ""
def set_active(self, skill_name=None):
""" Sets skill name as active in the display Manager
Args:
string: skill_name
"""
name = skill_name if skill_name is not None else self.name
_write_data({"active_skill": name})
def get_active(self):
""" Get the currenlty active skill from the display manager
Returns:
string: The active skill's name
"""
data = _read_data()
active_skill = ""
if "active_skill" in data:
active_skill = data["active_skill"]
return active_skill
def remove_active(self):
""" Clears the active skill """
LOG.debug("Removing active skill...")
_write_data({"active_skill": ""})
def init_display_manager_bus_connection():
""" Connects the display manager to the messagebus """
LOG.info("Connecting display manager to messagebus")
# Should remove needs to be an object so it can be referenced in functions
# [https://stackoverflow.com/questions/986006/how-do-i-pass-a-variable-by-reference]
display_manager = DisplayManager()
should_remove = [True]
def check_flag(flag):
if flag[0] is True:
display_manager.remove_active()
def set_delay(event=None):
should_remove[0] = True
Timer(2, check_flag, [should_remove]).start()
def set_remove_flag(event=None):
should_remove[0] = False
def connect():
bus.run_forever()
def remove_wake_word():
data = _read_data()
if "active_skill" in data and data["active_skill"] == "wakeword":
display_manager.remove_active()
def set_wakeword_skill(event=None):
display_manager.set_active("wakeword")
Timer(10, remove_wake_word).start()
bus = MessageBusClient()
bus.on('recognizer_loop:audio_output_end', set_delay)
bus.on('recognizer_loop:audio_output_start', set_remove_flag)
bus.on('recognizer_loop:record_begin', set_wakeword_skill)
event_thread = Thread(target=connect)
event_thread.setDaemon(True)
event_thread.start()
| 519 | 0 | 188 |
6c862f20c5d3e486d175bf3acd79343a9866965f | 1,403 | py | Python | rsa/rsa.py | VirangParekh/Rabin-Research | 9a7f2e5b90bb9dcfde839976d7f22a4166c80025 | [
"MIT"
] | null | null | null | rsa/rsa.py | VirangParekh/Rabin-Research | 9a7f2e5b90bb9dcfde839976d7f22a4166c80025 | [
"MIT"
] | 1 | 2021-12-25T07:18:25.000Z | 2021-12-25T07:18:25.000Z | rsa/rsa.py | VirangParekh/Rabin-Research | 9a7f2e5b90bb9dcfde839976d7f22a4166c80025 | [
"MIT"
] | null | null | null | from typing import Any, Tuple
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
def keyGen(key_size: int) -> Tuple[Any, Any]:
"""Genrates key for RSA algorithm.
Parameters
----------
key_size : int
Size of key
"""
key_pair = RSA.generate(key_size)
private_key = key_pair.exportKey()
public_key = key_pair.publickey()
public_key = public_key.exportKey()
return private_key, public_key, key_pair
def encrypt(plain_text: bytes, key_size: int) -> Tuple[bytes, Any]:
"""Encrypts plain text using RSA.
Parameters
----------
plain_message : bytes
Message to encrypt
key_size : int
Size of key
Returns
-------
Tuple[bytes, Any]
Private Key, Encrypted text
"""
private_key, public_key, key_pair = keyGen(key_size)
key = RSA.importKey(public_key)
encryptor = PKCS1_OAEP.new(key)
cipher_text = encryptor.encrypt(plain_text)
return key_pair, plain_text, cipher_text
def decrypt(cipher_text: bytes, key_pair: Any) -> bytes:
"""Decrypts RSA encrypted text.
Parameters
----------
ciphertext : bytes
Encrypted text
key_size : Any
Size of key
Returns
-------
bytes
Decrypted text
"""
decryptor = PKCS1_OAEP.new(key_pair)
plain_text = decryptor.decrypt(cipher_text)
return plain_text
| 20.940299 | 67 | 0.637919 | from typing import Any, Tuple
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
def keyGen(key_size: int) -> Tuple[Any, Any]:
"""Genrates key for RSA algorithm.
Parameters
----------
key_size : int
Size of key
"""
key_pair = RSA.generate(key_size)
private_key = key_pair.exportKey()
public_key = key_pair.publickey()
public_key = public_key.exportKey()
return private_key, public_key, key_pair
def encrypt(plain_text: bytes, key_size: int) -> Tuple[bytes, Any]:
"""Encrypts plain text using RSA.
Parameters
----------
plain_message : bytes
Message to encrypt
key_size : int
Size of key
Returns
-------
Tuple[bytes, Any]
Private Key, Encrypted text
"""
private_key, public_key, key_pair = keyGen(key_size)
key = RSA.importKey(public_key)
encryptor = PKCS1_OAEP.new(key)
cipher_text = encryptor.encrypt(plain_text)
return key_pair, plain_text, cipher_text
def decrypt(cipher_text: bytes, key_pair: Any) -> bytes:
"""Decrypts RSA encrypted text.
Parameters
----------
ciphertext : bytes
Encrypted text
key_size : Any
Size of key
Returns
-------
bytes
Decrypted text
"""
decryptor = PKCS1_OAEP.new(key_pair)
plain_text = decryptor.decrypt(cipher_text)
return plain_text
| 0 | 0 | 0 |
187fd1823b90924a4639f10746378163e4c943d5 | 21,116 | py | Python | veracode-da-reset-scheduler.py | dennismedeiros/veracode-da-reset-recheduler | c2c7b583371443de2bdcd1b2dd766a70acf78274 | [
"MIT"
] | null | null | null | veracode-da-reset-scheduler.py | dennismedeiros/veracode-da-reset-recheduler | c2c7b583371443de2bdcd1b2dd766a70acf78274 | [
"MIT"
] | null | null | null | veracode-da-reset-scheduler.py | dennismedeiros/veracode-da-reset-recheduler | c2c7b583371443de2bdcd1b2dd766a70acf78274 | [
"MIT"
] | null | null | null | import sys
import requests
import getopt
import json
import calendar
import math
from datetime import datetime, timezone, timedelta
from veracode_api_signing.plugin_requests import RequestsAuthPluginVeracodeHMAC
api_base = "https://api.veracode.com/was/configservice/v1/"
headers = {
"User-Agent": "Dynamic Analysis API Example Client",
"Content-Type": "application/json"
}
analysis_update_template= r'''{
"schedule": {
"duration": {
"length": {duration_length},
"unit": "{duration_unit}"
},
"end_date": "",
"now": false,
"scan_recurrence_schedule": {
"day_of_week": "{day_of_week}",
"recurrence_interval": {recurrence_interval},
"recurrence_type": "{recurrence_type}",
"schedule_end_after": {schedule_end_after},
"week_of_month": "{week_of_month}"
},
"schedule_status": "ACTIVE",
"start_date": "{start_date}"
}
}'''
weekly_update_template= r'''{
"schedule": {
"duration": {
"length": {duration_length},
"unit": "{duration_unit}"
},
"end_date": "",
"now": false,
"scan_recurrence_schedule": {
"day_of_week": "{day_of_week}",
"recurrence_interval": {recurrence_interval},
"recurrence_type": "{recurrence_type}",
"schedule_end_after": {schedule_end_after}
},
"schedule_status": "ACTIVE",
"start_date": "{start_date}"
}
}'''
monthly_update_template= r'''{
"schedule": {
"duration": {
"length": {duration_length},
"unit": "{duration_unit}"
},
"end_date": "",
"now": false,
"scan_recurrence_schedule": {
"day_of_week": "{day_of_week}",
"recurrence_interval": {recurrence_interval},
"recurrence_type": "{recurrence_type}",
"schedule_end_after": {schedule_end_after},
"week_of_month": "{week_of_month}"
},
"schedule_status": "ACTIVE",
"start_date": "{start_date}"
}
}'''
scan_update_hold = r'''{
"name": "{name}"
}
'''
cmdsettings = CommandSettings()
def print_help():
"""Prints command line options and exits"""
print("""veracode-da-reset-scheduler.py [-h] [-d] [-v] -x
Updates all Dynamic Analysis Recurring Scheduled scans that have expired with recurrences for one year.
Passing of the -x or --execute is required to run program
Options:
-h --help shows this help menu
-d --dry-run performs a dry run for updating content without committing changes
-v --verbose turns on the verbose debug logging for the program
-x --execute performs a live update to content
""")
sys.exit()
def main(argv):
"""Simple command line support for creating, deleting, and listing DA scanner variables"""
try:
#TODO: Add to commandline functionality
application_name = ''
target_url = ''
#print('ARGV :', argv)
options, args = getopt.getopt(argv, "hvdixa:u:",
["help","verbose","dry-run","execute"])
#, "interactive","application_name=", "target_url="])
#print('OPTIONS :', options)
for opt, arg in options:
if opt == '-h':
print_help()
elif opt == '-v':
cmdsettings.verbose = True
elif opt in ('-d', '--dry-run'):
cmdsettings.dry_run = True
elif opt in ('-x', '--execute'):
cmdsettings.dry_run = True
#elif opt in ('-i', '--interactive'):
# cmdsettings.interactive = True
#if opt in ('-a', '--application_name'):
# application_name = arg
#if opt in ('-u', '--url'):
# target_url = arg
#print('VERBOSE :', cmdsettings.verbose)
#print('DRY RUN :', cmdsettings.dry_run)
#print('INTERACTIVE :', cmdsettings.interactive)
#print('APPLICATION NAME:', application_name)
#print('TARGET URL :', target_url)
#print('REMAINING :', args)
if cmdsettings.execute or cmdsettings.dry_run:
execution_process()
else:
print_help()
except requests.RequestException as e:
print("An error occurred!")
print(e)
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| 36.916084 | 140 | 0.667693 | import sys
import requests
import getopt
import json
import calendar
import math
from datetime import datetime, timezone, timedelta
from veracode_api_signing.plugin_requests import RequestsAuthPluginVeracodeHMAC
api_base = "https://api.veracode.com/was/configservice/v1/"
headers = {
"User-Agent": "Dynamic Analysis API Example Client",
"Content-Type": "application/json"
}
analysis_update_template= r'''{
"schedule": {
"duration": {
"length": {duration_length},
"unit": "{duration_unit}"
},
"end_date": "",
"now": false,
"scan_recurrence_schedule": {
"day_of_week": "{day_of_week}",
"recurrence_interval": {recurrence_interval},
"recurrence_type": "{recurrence_type}",
"schedule_end_after": {schedule_end_after},
"week_of_month": "{week_of_month}"
},
"schedule_status": "ACTIVE",
"start_date": "{start_date}"
}
}'''
weekly_update_template= r'''{
"schedule": {
"duration": {
"length": {duration_length},
"unit": "{duration_unit}"
},
"end_date": "",
"now": false,
"scan_recurrence_schedule": {
"day_of_week": "{day_of_week}",
"recurrence_interval": {recurrence_interval},
"recurrence_type": "{recurrence_type}",
"schedule_end_after": {schedule_end_after}
},
"schedule_status": "ACTIVE",
"start_date": "{start_date}"
}
}'''
monthly_update_template= r'''{
"schedule": {
"duration": {
"length": {duration_length},
"unit": "{duration_unit}"
},
"end_date": "",
"now": false,
"scan_recurrence_schedule": {
"day_of_week": "{day_of_week}",
"recurrence_interval": {recurrence_interval},
"recurrence_type": "{recurrence_type}",
"schedule_end_after": {schedule_end_after},
"week_of_month": "{week_of_month}"
},
"schedule_status": "ACTIVE",
"start_date": "{start_date}"
}
}'''
scan_update_hold = r'''{
"name": "{name}"
}
'''
class RecurrenceSchedule:
day_of_week = "MONDAY"
recurrence_interval = 1
recurrence_type = "MONTHLY"
schedule_end_after = "12"
week_of_month = ""
class Duration:
length = 0
unit = "DAY"
class CommandSettings:
verbose = False
dry_run = False
interactive = False
execute = False
cmdsettings = CommandSettings()
def http_get(uri):
if cmdsettings.verbose:
print(f"URI: {uri}")
response = requests.get(uri, auth=RequestsAuthPluginVeracodeHMAC(), headers=headers)
# Error handling
if response.status_code == 200:
print("successfully request.")
return response
def get_da_analyses():
if cmdsettings.verbose:
print(f"get_da_analyses")
content = None
path = api_base + f"analyses"
response = requests.get(path, auth=RequestsAuthPluginVeracodeHMAC(), headers=headers)
if response.status_code == 200:
content = response.json()
if cmdsettings.verbose:
print(f"status code {response.status_code}")
result = response.text
if result is not None:
print(json.dumps(result, indent=4, sort_keys=True))
return content
def get_da_platform_applications(application_name):
if cmdsettings.verbose:
print(f"get_da_platform_applications(): {application_name}")
content = None
url = api_base + f"platform_applications"
parameters = f"application_name={application_name}"
response = requests.get(url, params=parameters, auth=RequestsAuthPluginVeracodeHMAC(), headers=headers)
if response.status_code == 200:
content = response.json()
if cmdsettings.verbose:
print(f"status code {response.status_code}")
result = response.text
if result is not None:
print(json.dumps(result, indent=4, sort_keys=True))
return content
def patch_update_analysis(analysis_id, json_payload):
bReturn = False
if cmdsettings.verbose:
print("patch_update_analysis")
print(json_payload)
if cmdsettings.dry_run is False:
scan_path = api_base + "analyses/" + analysis_id + "?method=PATCH"
response = requests.put(scan_path, auth=RequestsAuthPluginVeracodeHMAC(), headers=headers, json=json.loads(json_payload))
if cmdsettings.verbose:
print(f"status code: {response.status_code}")
content = response.text
if content is not None:
print("content:" + content)
if response.status_code == 204:
bReturn = True
return bReturn
def convert_from_datetime_to_utc(utc_date_time):
return utc_date_time.isoformat("T") + "Z"
def convert_from_utc_to_datetime(original_date_time):
if cmdsettings.verbose:
print("convert_from_vc_datetime....")
print(f"Original DateTime: {original_date_time}")
# String off the ending 'Z[UTC]' value and append on timezone UTC
date_time_str = original_date_time[:-6]
date_time_str = date_time_str + "+00:00"
if cmdsettings.verbose:
print(f"Converted DateTime {date_time_str}")
# convert to datetime object for processing
date_time_converted = datetime.fromisoformat(date_time_str)
if cmdsettings.verbose:
print('Date:', date_time_converted.date())
print('Time:', date_time_converted.time())
print('Date-time:', date_time_converted)
return date_time_converted
def find_week_of_month(week_of_month):
weeksOfTheMonth = ["FIRST", "SECOND", "THIRD", "FOURTH", "LAST"]
index_count = 1
for week in weeksOfTheMonth:
if week == week_of_month:
break
else:
index_count= index_count + 1
return index_count
def find_day_of_week(day_of_the_week):
daysOfTheWeek = ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]
index_count = 0
for day in daysOfTheWeek:
if day == day_of_the_week:
break
else:
index_count= index_count + 1
return index_count
def get_number_of_weeks_in_month(date):
if cmdsettings.verbose:
print("get_number_of_weeks_in_month....")
print(f"Proposed Date: {date}")
c = calendar.TextCalendar()
print(c.formatmonth(date.year, date.month))
print(c.formatmonth(date.year, date.month+1))
weeks_in_month = len(calendar.monthcalendar(date.year, date.month))
return weeks_in_month
def get_week_of_month(date):
first_day = date.replace(day=1)
day_of_month = date.day
if(first_day.weekday() == 6):
adjusted_dom = (1 + first_day.weekday()) / 7
else:
adjusted_dom = day_of_month + first_day.weekday()
return int(math.ceil(adjusted_dom/7.0))
def find_next_available_monthly(iso_current_datetime, original_time, recurrence_schedule):
if cmdsettings.verbose:
print("find_next_available_monthly....")
print(f"Current Date Time: {iso_current_datetime}")
print(f"Original Scheduled Day and Time: {recurrence_schedule.week_of_month} {recurrence_schedule.day_of_week} {original_time}")
proposed_datetime = find_next_available_weekday(iso_current_datetime, original_time, recurrence_schedule.day_of_week)
if cmdsettings.verbose:
print("find_next_available_monthly....")
print(f"Proposed Date and Time:{proposed_datetime}")
proposed_date = proposed_datetime.date()
proposed_time = proposed_datetime.time()
scheduled_week_of_month_num = find_week_of_month(recurrence_schedule.week_of_month)
scheduled_day_of_week_num = find_day_of_week(recurrence_schedule.day_of_week)
proposed_week_of_month_num = get_week_of_month(proposed_date)
if proposed_week_of_month_num < scheduled_week_of_month_num:
if cmdsettings.verbose:
print("proposed_week_of_month_num < scheduled_week_of_month_num")
delta = scheduled_week_of_month_num - proposed_week_of_month_num
proposed_date = proposed_date + timedelta(weeks=delta)
elif proposed_week_of_month_num > scheduled_week_of_month_num:
if cmdsettings.verbose:
print("proposed_week_of_month_num > scheduled_week_of_month_num")
# Must account if existing Monthy is 5 week or 4
weeks_in_month = get_number_of_weeks_in_month(proposed_date)
delta = 4 - (weeks_in_month - proposed_week_of_month_num)
proposed_date = proposed_date + timedelta(weeks=delta)
else:
if cmdsettings.verbose:
print("Proposed and Scheduled Weeks are the same.")
returnDateTime = datetime.combine(proposed_date, proposed_time)
if cmdsettings.verbose:
print(f"Calculated Date and Time: {returnDateTime}")
return returnDateTime
def find_next_available_weekday(iso_current_datetime, original_time, day_of_the_week):
if cmdsettings.verbose:
print("find_next_available_weekly....")
print(f"Current Date Time: {iso_current_datetime}")
print(f"Original Scheduled Day and Time: {day_of_the_week} {original_time}")
current_weekday_num = iso_current_datetime.weekday()
scheduled_weekday_num = find_day_of_week(day_of_the_week)
current_weekday = calendar.day_name[current_weekday_num]
scheduled_weekday = calendar.day_name[scheduled_weekday_num]
current_date = iso_current_datetime.date()
scheduled_date = current_date
if current_weekday_num > scheduled_weekday_num:
if cmdsettings.verbose:
print(f"today: {current_weekday} > scheduled: {scheduled_weekday}")
delta = scheduled_weekday_num - current_weekday_num + 7
scheduled_date = current_date + timedelta(days=delta)
elif current_weekday_num < scheduled_weekday_num:
if cmdsettings.verbose:
print(f"today: {current_weekday} < scheduled: {scheduled_weekday}")
delta = scheduled_weekday_num - current_weekday_num
scheduled_date = current_date + timedelta(days=delta)
else:
if cmdsettings.verbose:
print(f"today: {current_weekday} == scheduled: {scheduled_weekday}")
if iso_current_datetime.time() > original_time:
scheduled_date = current_date + timedelta(days=7)
scheduled_datetime = datetime.combine(scheduled_date, original_time)
if cmdsettings.verbose:
print(f"Next Available {scheduled_weekday}: {scheduled_datetime}")
return scheduled_datetime
def calculate_next_available_datetime(iso_date_time_now, original_date_time, recurrence_schedule):
if cmdsettings.verbose:
print("calculate_next_available_datetime....")
start_datetime = original_date_time
iso_date_time_scheduled = original_date_time
# update only if the scheduled time is in the past
if iso_date_time_now > iso_date_time_scheduled:
scheduled_date = iso_date_time_scheduled.date()
scheduled_time = iso_date_time_scheduled.time()
if recurrence_schedule.recurrence_type == "WEEKLY":
# find the next available day
start_datetime = find_next_available_weekday(iso_date_time_now, scheduled_time, recurrence_schedule.day_of_week)
else:
# find the next available monthly day
start_datetime = find_next_available_monthly(iso_date_time_now, scheduled_time, recurrence_schedule)
return start_datetime
def copy_scan_recurrence(analysis):
analysis_id = analysis["analysis_id"]
# Duration represents scan window
# Start Date must be just prior the recurrence cadence
# End Date not needed as system will calculate value based on start date
# May have to retrieve exiting duration
def update_scan_recurrence(analysis_id, duration, recurrence_schedule, start_date):
bResult = False
if cmdsettings.verbose:
print(f"update_scan_recurrence....")
schedule_update = weekly_update_template
if recurrence_schedule.recurrence_type == "MONTHLY":
schedule_update = monthly_update_template
schedule_update = schedule_update.replace("{duration_length}", str(duration.length), 1)
schedule_update = schedule_update.replace("{duration_unit}", duration.unit, 1)
schedule_update = schedule_update.replace("{recurrence_type}", recurrence_schedule.recurrence_type, 1)
schedule_update = schedule_update.replace("{day_of_week}", recurrence_schedule.day_of_week, 1)
schedule_update = schedule_update.replace("{recurrence_interval}", str(recurrence_schedule.recurrence_interval), 1)
schedule_update = schedule_update.replace("{schedule_end_after}", str(recurrence_schedule.schedule_end_after), 1)
if recurrence_schedule.recurrence_type == "MONTHLY":
schedule_update = schedule_update.replace("{week_of_month}", recurrence_schedule.week_of_month, 1)
schedule_update = schedule_update.replace("{start_date}", start_date, 1)
if cmdsettings.verbose:
print("updating to:")
print(schedule_update)
bResult = patch_update_analysis(analysis_id, schedule_update)
return bResult
def process_analysis(analysis):
bResult = False
analysis_id = analysis["analysis_id"]
analysis_name = analysis["name"]
print(f"Procesing Started: ({analysis_id}) '{analysis_name}'")
duration = Duration()
duration.length = analysis["schedule_summary"]["duration"]["length"]
duration.unit = analysis["schedule_summary"]["duration"]["unit"]
recurrence_schedule = RecurrenceSchedule()
recurrence_schedule.recurrence_type = analysis["schedule_summary"]["scan_recurrence_schedule"]["recurrence_type"]
recurrence_schedule.recurrence_interval = analysis["schedule_summary"]["scan_recurrence_schedule"]["recurrence_interval"]
recurrence_schedule.day_of_week = analysis["schedule_summary"]["scan_recurrence_schedule"]["day_of_week"]
recurrence_schedule.schedule_end_after = analysis["schedule_summary"]["scan_recurrence_schedule"]["schedule_end_after"]
if analysis["schedule_summary"]["scan_recurrence_schedule"]["recurrence_type"] == "MONTHLY":
recurrence_schedule.week_of_month = analysis["schedule_summary"]["scan_recurrence_schedule"]["week_of_month"]
# Calculate new date time based on recurrence schedule
original_start_datetime_str = analysis["schedule_summary"]["start_date"]
original_start_datetime = convert_from_utc_to_datetime(original_start_datetime_str)
iso_date_time_now = datetime.now(timezone.utc)
if original_start_datetime < iso_date_time_now:
iso_start_date = calculate_next_available_datetime(iso_date_time_now, original_start_datetime, recurrence_schedule)
print(f" ({analysis_id}) '{analysis_name}' proposed next available scheduled date and time: {iso_start_date}")
start_date = convert_from_datetime_to_utc(iso_start_date)
bResult = update_scan_recurrence(analysis_id, duration, recurrence_schedule, start_date)
if bResult == True:
print(f"Procesing Completed: Successfully updated ({analysis_id}) '{analysis_name}' is updated and scheduled for {start_date}.")
else:
print(f"Procesing Falied: Update for ({analysis_id}) '{analysis_name}' was not completed.")
else:
print(f"Procesing Completed: ({analysis_id}) '{analysis_name}' is scheduled for the future. Update not needed.")
return bResult
def update_analyses_to_recur(analyses):
if cmdsettings.verbose:
print("update_analyses_to_recur....")
analyses_updated = 0
analyses_not_updated = 0
for analysis in analyses:
if cmdsettings.verbose:
print("Analysis content:")
print(analysis)
if process_analysis(analysis) == True:
analyses_updated = analyses_updated + 1
else:
analyses_not_updated = analyses_not_updated + 1
print(f"Analyses updated: {analyses_updated}")
print(f"Analyses not updated: {analyses_not_updated}")
def filter_list_for_recurring(scans):
if cmdsettings.verbose:
print("filter_list_for_recurring")
# initialize return strcture
analyses = []
# process list for just recurring scheduled scans
for analysis in scans:
frequencyType = analysis["schedule_frequency"]["frequency_type"]
if frequencyType == 'RECURRING' or frequencyType == 'RECURRING_WITH_PAUSE_AND_RESUME':
# Check if recurring scheduled for a Year in either Monthly or Weekly modes
recrurrenceType = analysis["schedule_summary"]["scan_recurrence_schedule"]["recurrence_type"]
scheduleEndAfter = analysis["schedule_summary"]["scan_recurrence_schedule"]["schedule_end_after"]
if (recrurrenceType == 'WEEKLY' and scheduleEndAfter == 52) or (recrurrenceType == 'MONTHLY' and scheduleEndAfter == 12):
analyses.append(analysis)
if cmdsettings.verbose:
print("List of Recurring Scheduled Analyses")
for analysis in analyses:
print(f" ({analysis['analysis_id']}) '{analysis['name']}'")
return analyses
def execution_process():
if cmdsettings.verbose:
print("execution_process....")
if cmdsettings.dry_run:
print("Performing a Dry Run")
#TODO: Refactor to add Interactive Functionality and Application Name
#anaysis_result = get_da_platform_applications(application_name)
#if anaysis_result is not None:
# print("Have Scan!")
# retrieve exising list of scans available
result = get_da_analyses()
if result is not None:
# Iterate through each scan configuration and update to scan ?
if len(result["_embedded"]["analyses"]) == 0:
print("No analyses defined.")
return
# sort list of only recurring scheduled dynamic analysis scans
filtered_list = filter_list_for_recurring(result["_embedded"]["analyses"])
if len(filtered_list) > 0:
print(f"Discovered {len(filtered_list)} analyses for processing.")
# process list of dynamic analysis scans
update_analyses_to_recur(filtered_list)
else:
print("No recurring scheduled analyses defined")
return
def print_help():
"""Prints command line options and exits"""
print("""veracode-da-reset-scheduler.py [-h] [-d] [-v] -x
Updates all Dynamic Analysis Recurring Scheduled scans that have expired with recurrences for one year.
Passing of the -x or --execute is required to run program
Options:
-h --help shows this help menu
-d --dry-run performs a dry run for updating content without committing changes
-v --verbose turns on the verbose debug logging for the program
-x --execute performs a live update to content
""")
sys.exit()
def interactive_update_for_scan(scan):
result = False
print(scan)
# while: True
# entry = input("String [y] or [q] quit)")
# if entry == "q":
# break
# else
# break
return result
def main(argv):
"""Simple command line support for creating, deleting, and listing DA scanner variables"""
try:
#TODO: Add to commandline functionality
application_name = ''
target_url = ''
#print('ARGV :', argv)
options, args = getopt.getopt(argv, "hvdixa:u:",
["help","verbose","dry-run","execute"])
#, "interactive","application_name=", "target_url="])
#print('OPTIONS :', options)
for opt, arg in options:
if opt == '-h':
print_help()
elif opt == '-v':
cmdsettings.verbose = True
elif opt in ('-d', '--dry-run'):
cmdsettings.dry_run = True
elif opt in ('-x', '--execute'):
cmdsettings.dry_run = True
#elif opt in ('-i', '--interactive'):
# cmdsettings.interactive = True
#if opt in ('-a', '--application_name'):
# application_name = arg
#if opt in ('-u', '--url'):
# target_url = arg
#print('VERBOSE :', cmdsettings.verbose)
#print('DRY RUN :', cmdsettings.dry_run)
#print('INTERACTIVE :', cmdsettings.interactive)
#print('APPLICATION NAME:', application_name)
#print('TARGET URL :', target_url)
#print('REMAINING :', args)
if cmdsettings.execute or cmdsettings.dry_run:
execution_process()
else:
print_help()
except requests.RequestException as e:
print("An error occurred!")
print(e)
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| 15,689 | 255 | 531 |
1a4b3288f13e95b037d8ec4988f07395f199a3ba | 1,293 | py | Python | students/K33401/Nguyen_tuan/lap3/my_app/lap3/urls.py | emina13/ITMO_ICT_WebDevelopment_2021-2022 | 498a6138e352e7e0ca40d1eb301bc29416158f51 | [
"MIT"
] | 7 | 2021-09-02T08:20:58.000Z | 2022-01-12T11:48:07.000Z | students/K33401/Nguyen_tuan/lap3/my_app/lap3/urls.py | emina13/ITMO_ICT_WebDevelopment_2021-2022 | 498a6138e352e7e0ca40d1eb301bc29416158f51 | [
"MIT"
] | 76 | 2021-09-17T23:01:50.000Z | 2022-03-18T16:42:03.000Z | students/K33401/Nguyen_tuan/lap3/my_app/lap3/urls.py | emina13/ITMO_ICT_WebDevelopment_2021-2022 | 498a6138e352e7e0ca40d1eb301bc29416158f51 | [
"MIT"
] | 60 | 2021-09-04T16:47:39.000Z | 2022-03-21T04:41:27.000Z | from django.contrib import admin
from django.urls import path, include
from .views import *
urlpatterns = [
path('staff/all', StaffListView.as_view()),
path('staff/<int:pk>', StaffRetrieveView.as_view()),
path('staff/update/<int:pk>', StaffUpdateView.as_view()),
path('staff/new', StaffCreateView.as_view()),
path('staff/delete/<int:pk>', StaffRetrieveView.as_view()),
path('room/all', RoomListView.as_view()),
path('room/<int:pk>', RoomRetrieveView.as_view()),
path('room/update/<int:pk>', RoomUpdateView.as_view()),
path('room/new', RoomCreateView.as_view()),
path('room/delete/<int:pk>', RoomRetrieveView.as_view()),
path('guest/all', GuestListView.as_view()),
path('guest/<int:pk>', GuestRetrieveView.as_view()),
path('guest/update/<int:pk>', GuestUpdateView.as_view()),
path('guest/new', GuestCreateView.as_view()),
path('guest/delete/<int:pk>', GuestRetrieveView.as_view()),
path('schedule/all', ScheduleListView.as_view()),
path('schedule/<int:pk>', ScheduleRetrieveView.as_view()),
path('schedule/update/<int:pk>', ScheduleUpdateView.as_view()),
path('schedule/new', ScheduleCreateView.as_view()),
path('schedule/delete/<int:pk>', ScheduleRetrieveView.as_view()),
] | 39.181818 | 70 | 0.668987 | from django.contrib import admin
from django.urls import path, include
from .views import *
urlpatterns = [
path('staff/all', StaffListView.as_view()),
path('staff/<int:pk>', StaffRetrieveView.as_view()),
path('staff/update/<int:pk>', StaffUpdateView.as_view()),
path('staff/new', StaffCreateView.as_view()),
path('staff/delete/<int:pk>', StaffRetrieveView.as_view()),
path('room/all', RoomListView.as_view()),
path('room/<int:pk>', RoomRetrieveView.as_view()),
path('room/update/<int:pk>', RoomUpdateView.as_view()),
path('room/new', RoomCreateView.as_view()),
path('room/delete/<int:pk>', RoomRetrieveView.as_view()),
path('guest/all', GuestListView.as_view()),
path('guest/<int:pk>', GuestRetrieveView.as_view()),
path('guest/update/<int:pk>', GuestUpdateView.as_view()),
path('guest/new', GuestCreateView.as_view()),
path('guest/delete/<int:pk>', GuestRetrieveView.as_view()),
path('schedule/all', ScheduleListView.as_view()),
path('schedule/<int:pk>', ScheduleRetrieveView.as_view()),
path('schedule/update/<int:pk>', ScheduleUpdateView.as_view()),
path('schedule/new', ScheduleCreateView.as_view()),
path('schedule/delete/<int:pk>', ScheduleRetrieveView.as_view()),
] | 0 | 0 | 0 |
136e5cea11ebe14ca22086997403538c7375eb0f | 2,274 | py | Python | models/resnet.py | sin1012/kaggle-birdcall-identification | 737ccca47d66009b4d14fb8e5ba3c6226fd2fa1b | [
"MIT"
] | 1 | 2021-02-11T10:35:02.000Z | 2021-02-11T10:35:02.000Z | models/resnet.py | sin1012/kaggle-birdcall-identification | 737ccca47d66009b4d14fb8e5ba3c6226fd2fa1b | [
"MIT"
] | null | null | null | models/resnet.py | sin1012/kaggle-birdcall-identification | 737ccca47d66009b4d14fb8e5ba3c6226fd2fa1b | [
"MIT"
] | null | null | null | import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
import torch
"""
# https://zhuanlan.zhihu.com/p/93806755
class res50(torch.nn.Module):
def __init__(self, num_classes):
super(res50, self).__init__()
resnet = resnet50(pretrained=True)
self.backbone = torch.nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.layer1,
resnet.layer2,
resnet.layer3,
resnet.layer4
)
self.pool = torch.nn.AdaptiveMaxPool2d(1)
self.bnneck = nn.BatchNorm1d(2048)
self.bnneck.bias.requires_grad_(False) # no shift
self.classifier = nn.Linear(2048, num_classes, bias=False)
def forward(self, x):
x = self.backbone(x)
x = self.pool(x)
feat = x.view(x.shape[0], -1)
feat = self.bnneck(feat)
if not self.training:
return nn.functional.normalize(feat, dim=1, p=2)
x = self.classifier(feat)
return x
""" | 34.984615 | 72 | 0.547493 | import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
import torch
class ResNet(nn.Module):
def __init__(self, base_model_name: str, pretrained=False,
num_classes=264):
super().__init__()
base_model = models.__getattribute__(base_model_name)(
pretrained=pretrained)
layers = list(base_model.children())[:-2]
layers.append(nn.AdaptiveMaxPool2d(1))
self.encoder = nn.Sequential(*layers)
in_features = base_model.fc.in_features
self.classifier = nn.Sequential(
nn.Linear(in_features, 1024), nn.ReLU(), nn.Dropout(p=0.2),
nn.Linear(1024, 1024), nn.ReLU(), nn.Dropout(p=0.2),
nn.Linear(1024, num_classes))
def forward(self, x):
batch_size = x.size(0)
x = self.encoder(x)
x = x.view(batch_size, -1)
x = self.classifier(x)
multiclass_proba = F.softmax(x, dim=1)
multilabel_proba = torch.sigmoid(x)
# return {
# "logits": x,
# "multiclass_proba": multiclass_proba,
# "multilabel_proba": multilabel_proba
# }
return x
"""
# https://zhuanlan.zhihu.com/p/93806755
class res50(torch.nn.Module):
def __init__(self, num_classes):
super(res50, self).__init__()
resnet = resnet50(pretrained=True)
self.backbone = torch.nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.layer1,
resnet.layer2,
resnet.layer3,
resnet.layer4
)
self.pool = torch.nn.AdaptiveMaxPool2d(1)
self.bnneck = nn.BatchNorm1d(2048)
self.bnneck.bias.requires_grad_(False) # no shift
self.classifier = nn.Linear(2048, num_classes, bias=False)
def forward(self, x):
x = self.backbone(x)
x = self.pool(x)
feat = x.view(x.shape[0], -1)
feat = self.bnneck(feat)
if not self.training:
return nn.functional.normalize(feat, dim=1, p=2)
x = self.classifier(feat)
return x
""" | 1,028 | 3 | 81 |
41929837401832ee79471043f80fbf8fdcd626c3 | 218 | py | Python | findmax_a0201.py | lsm4446/study_python | d05077b319c98007af26c92f69f5d59fe33483d0 | [
"BSD-2-Clause"
] | 1 | 2020-02-17T01:25:35.000Z | 2020-02-17T01:25:35.000Z | findmax_a0201.py | lsm4446/study_python | d05077b319c98007af26c92f69f5d59fe33483d0 | [
"BSD-2-Clause"
] | 2 | 2021-03-31T19:32:47.000Z | 2021-12-13T20:33:30.000Z | findmax_a0201.py | lsm4446/study_python | d05077b319c98007af26c92f69f5d59fe33483d0 | [
"BSD-2-Clause"
] | null | null | null |
v = [17, 92, 18, 33, 58, 7, 33, 42, 79, 37]
print(find_max(v))
| 19.818182 | 44 | 0.454128 | def find_max(a):
n = len(a)
max_v = a[0]
for i in range(1, n-1):
if a[i] > max_v:
max_v = a[i]
return max_v
v = [17, 92, 18, 33, 58, 7, 33, 42, 79, 37]
print(find_max(v))
| 128 | 0 | 23 |
aad07478504a872413e780025845c6c8770504f0 | 82 | py | Python | rest-api/flask_app/database/__init__.py | sinedie/Flask-Svelte-Websockets-Nginx-Docker | 76daeec2c76f9f27ca526f53393ab4363020b92b | [
"WTFPL"
] | 4 | 2021-11-21T14:04:15.000Z | 2022-03-20T15:28:14.000Z | rest-api/flask_app/database/__init__.py | sinedie/Utimate-flask-websocket-template | 76daeec2c76f9f27ca526f53393ab4363020b92b | [
"WTFPL"
] | null | null | null | rest-api/flask_app/database/__init__.py | sinedie/Utimate-flask-websocket-template | 76daeec2c76f9f27ca526f53393ab4363020b92b | [
"WTFPL"
] | null | null | null | from flask_app.database.db import *
from flask_app.database.models.User import *
| 20.5 | 44 | 0.804878 | from flask_app.database.db import *
from flask_app.database.models.User import *
| 0 | 0 | 0 |
5243d2c8c712d26818f8e8ea1412002a743a4516 | 25,802 | py | Python | odin/bay/layers/count_layers.py | tirkarthi/odin-ai | 7900bef82ad8801d0c73880330d5b24d9ff7cd06 | [
"MIT"
] | 7 | 2020-12-29T19:35:58.000Z | 2022-01-31T21:01:30.000Z | odin/bay/layers/count_layers.py | tirkarthi/odin-ai | 7900bef82ad8801d0c73880330d5b24d9ff7cd06 | [
"MIT"
] | 3 | 2020-02-06T16:44:17.000Z | 2020-09-26T05:26:14.000Z | odin/bay/layers/count_layers.py | tirkarthi/odin-ai | 7900bef82ad8801d0c73880330d5b24d9ff7cd06 | [
"MIT"
] | 6 | 2019-02-14T01:36:28.000Z | 2020-10-30T13:16:32.000Z | from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow.python import keras
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import layers as tfl
from tensorflow_probability.python.internal import \
distribution_util as dist_util
from tensorflow_probability.python.layers import DistributionLambda
from tensorflow_probability.python.layers.distribution_layer import _event_size
from odin.backend import parse_activation
from odin.backend.maths import softplus1
from odin.bay.distributions import NegativeBinomialDisp, ZeroInflated
__all__ = [
'PoissonLayer',
'NegativeBinomialDispLayer',
'NegativeBinomialLayer',
'ZINegativeBinomialDispLayer',
'ZINegativeBinomialLayer',
'ZIPoissonLayer',
'MultinomialLayer',
'DirichletMultinomialLayer',
'BinomialLayer',
]
PoissonLayer = tfl.IndependentPoisson
# ===========================================================================
# Negative binomial
# ===========================================================================
class NegativeBinomialLayer(DistributionLambda):
r"""An independent NegativeBinomial Keras layer.
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
count_activation: activation function return non-negative floating-point,
i.e. the `total_count` of failures
dispersion : {'full', 'share', 'single'}
- 'full' creates a dispersion value for each individual data point,
- 'share' creates a single vector of dispersion for all examples, and
- 'single' uses a single value as dispersion for all data points.
Note: the dispersion in this case is the probability of success.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.exp,
validate_args=False,
name="NegativeBinomialLayer",
disp=None):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat(
[tf.shape(input=params)[:-1], event_shape],
axis=0,
)
if disp is None:
total_count, logits = tf.split(params, 2, axis=-1)
logits = tf.reshape(logits, output_shape)
else:
total_count = params
logits = disp
total_count = tf.reshape(total_count, output_shape)
total_count = count_activation(total_count)
return tfd.Independent(
tfd.NegativeBinomial(total_count=total_count,
logits=logits,
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
name="NegativeBinomialLayer_params_size"):
r"""The number of `params` needed to create a single distribution."""
if dispersion == 'full':
return 2 * _event_size(event_shape, name=name)
return _event_size(event_shape, name=name)
class NegativeBinomialDispLayer(DistributionLambda):
r"""An alternative parameterization of the NegativeBinomial Keras layer.
The order of input parameters are: mean, dispersion
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
mean_activation : activation for the non-negative mean
disp_activation : activation for the non-negative dispersion
dispersion : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single dispersion vector of `event_shape` for all examples,
and 'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
@staticmethod
def new(params,
event_shape=(),
mean_activation=tf.nn.softplus,
disp_activation=softplus1,
validate_args=False,
name="NegativeBinomialDispLayer",
disp=None):
r""" Create the distribution instance from a `params` vector. """
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat(
[tf.shape(input=params)[:-1], event_shape],
axis=0,
)
if disp is None:
loc, disp = tf.split(params, 2, axis=-1)
disp = tf.reshape(disp, output_shape)
else:
loc = params
loc = tf.reshape(loc, output_shape)
loc = mean_activation(loc)
disp = disp_activation(disp)
return tfd.Independent(
NegativeBinomialDisp(loc=loc, disp=disp, validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
name="NegativeBinomialDispLayer_params_size"):
r"""The number of `params` needed to create a single distribution."""
if dispersion == 'full':
return 2 * _event_size(event_shape, name=name)
return _event_size(event_shape, name=name)
# ===========================================================================
# Zero inflated
# ===========================================================================
class ZIPoissonLayer(DistributionLambda):
r"""A Independent zero-inflated Poisson keras layer """
@staticmethod
def new(params,
event_shape=(),
activation=tf.identity,
validate_args=False,
name="ZIPoissonLayer"):
"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat(
[tf.shape(input=params)[:-1], event_shape],
axis=0,
)
(log_rate_params, logits_params) = tf.split(params, 2, axis=-1)
return tfd.Independent(
ZeroInflated(count_distribution=tfd.Poisson(
log_rate=activation(tf.reshape(log_rate_params, output_shape)),
validate_args=validate_args),
logits=tf.reshape(logits_params, output_shape),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(), name="ZeroInflatedPoisson_params_size"):
r"""The number of `params` needed to create a single distribution."""
return 2 * _event_size(event_shape, name=name)
class ZINegativeBinomialLayer(DistributionLambda):
r"""A Independent zero-inflated negative binomial keras layer
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
count_activation: activation function return non-negative floating-point,
i.e. the `total_count` of failures
dispersion, inflation : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single vector of dispersion for all examples, and
'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.exp,
validate_args=False,
name="ZINegativeBinomialLayer",
disp=None,
rate=None):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat((tf.shape(input=params)[:-1], event_shape), axis=0)
if disp is None: # full dispersion
if rate is None:
total_count, logits, rate = tf.split(params, 3, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
total_count, logits = tf.split(params, 2, axis=-1)
logits = tf.reshape(logits, output_shape)
else: # share dispersion
if rate is None:
total_count, rate = tf.split(params, 2, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
total_count = params
logits = disp
total_count = tf.reshape(total_count, output_shape)
total_count = count_activation(total_count)
nb = tfd.NegativeBinomial(total_count=total_count,
logits=logits,
validate_args=validate_args)
zinb = ZeroInflated(count_distribution=nb,
logits=rate,
validate_args=validate_args)
return tfd.Independent(zinb,
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
inflation='full',
name="ZeroInflatedNegativeBinomial_params_size"):
r"""The number of `params` needed to create a single distribution."""
size = _event_size(event_shape, name=name)
total = 3 * size
if dispersion != 'full':
total -= size
if inflation != 'full':
total -= size
return total
class ZINegativeBinomialDispLayer(DistributionLambda):
r"""A Independent zero-inflated negative binomial (alternative
parameterization) keras layer.
The order of input parameters are: mean, dispersion, dropout rate
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
mean_activation : activation for the non-negative mean
disp_activation : activation for the non-negative dispersion
dispersion, inflation : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single dispersion vector of `event_shape` for all examples,
and 'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
@staticmethod
def new(params,
event_shape=(),
mean_activation=tf.nn.softplus,
disp_activation=softplus1,
validate_args=False,
name="ZINegativeBinomialDispLayer",
disp=None,
rate=None):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat((tf.shape(input=params)[:-1], event_shape), axis=0)
### splitting the parameters
if disp is None: # full dispersion
if rate is None:
loc, disp, rate = tf.split(params, 3, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
loc, disp = tf.split(params, 2, axis=-1)
disp = tf.reshape(disp, output_shape)
else: # share dispersion
if rate is None:
loc, rate = tf.split(params, 2, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
loc = params
# as count value, do exp if necessary
loc = tf.reshape(loc, output_shape)
loc = mean_activation(loc)
disp = disp_activation(disp)
# create the distribution
nb = NegativeBinomialDisp(loc=loc, disp=disp, validate_args=validate_args)
zinb = ZeroInflated(count_distribution=nb,
logits=rate,
validate_args=validate_args)
return tfd.Independent(zinb,
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
inflation='full',
name="ZINegativeBinomialDisp_params_size"):
r"""The number of `params` needed to create a single distribution."""
size = _event_size(event_shape, name=name)
total = 3 * size
if dispersion != 'full':
total -= size
if inflation != 'full':
total -= size
return total
# ===========================================================================
# Binomial Multinomial layer
# ===========================================================================
class MultinomialLayer(tfl.DistributionLambda):
r""" Parameterization:
- total_count : `[batch_size, 1]`
- logits : `[batch_size, ndim]`
- sample : `[batch_size, ndim]` with `sum(x, axis=1) = total_count`
"""
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.nn.softplus,
validate_args=False,
name='MultinomialLayer'):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
count_activation = parse_activation(count_activation, 'tf')
total_count = count_activation(params[..., 0])
logits = params[..., 1:]
return tfd.Multinomial(total_count=total_count,
logits=logits,
validate_args=validate_args,
name=name)
@staticmethod
def params_size(event_shape=(), name='MultinomialLayer_params_size'):
r"""The number of `params` needed to create a single distribution."""
return _event_size(event_shape, name=name) + 1.
class DirichletMultinomialLayer(tfl.DistributionLambda):
r""" Dirichlet-Multinomial compound distribution.
K=2 equal to Beta-Binomial distribution
"""
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.nn.softplus,
concentration_activation=softplus1,
clip_for_stable=True,
validate_args=False,
name='DirichletMultinomialLayer'):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
count_activation = parse_activation(count_activation, 'tf')
concentration_activation = parse_activation(concentration_activation, 'tf')
total_count = count_activation(params[..., 0])
concentration = concentration_activation(params[..., 1:])
if clip_for_stable:
concentration = tf.clip_by_value(concentration, 1e-3, 1e3)
return tfd.DirichletMultinomial(total_count=total_count,
concentration=concentration,
validate_args=validate_args,
name=name)
@staticmethod
def params_size(event_shape=(), name='DirichletMultinomialLayer_params_size'):
r"""The number of `params` needed to create a single distribution."""
return _event_size(event_shape, name=name) + 1.
class BinomialLayer(tfl.DistributionLambda):
r""" Binomial distribution, each entry is a flipping of the coin K times (
parameterized by `total_count` """
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.nn.softplus,
validate_args=False,
name='BinomialLayer'):
r"""Create the distribution instance from a `params` vector."""
count_activation = parse_activation(count_activation, 'tf')
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat((tf.shape(params)[:-1], event_shape), axis=0)
total_count, logits = tf.split(params, 2, axis=-1)
total_count = tf.reshape(total_count, output_shape)
logits = tf.reshape(logits, output_shape)
return tfd.Independent(
tfd.Binomial(total_count=count_activation(total_count),
logits=logits,
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(), name='BinomialLayer_params_size'):
r"""The number of `params` needed to create a single distribution."""
return 2 * _event_size(event_shape, name=name)
| 38.858434 | 88 | 0.621037 | from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow.python import keras
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import layers as tfl
from tensorflow_probability.python.internal import \
distribution_util as dist_util
from tensorflow_probability.python.layers import DistributionLambda
from tensorflow_probability.python.layers.distribution_layer import _event_size
from odin.backend import parse_activation
from odin.backend.maths import softplus1
from odin.bay.distributions import NegativeBinomialDisp, ZeroInflated
__all__ = [
'PoissonLayer',
'NegativeBinomialDispLayer',
'NegativeBinomialLayer',
'ZINegativeBinomialDispLayer',
'ZINegativeBinomialLayer',
'ZIPoissonLayer',
'MultinomialLayer',
'DirichletMultinomialLayer',
'BinomialLayer',
]
PoissonLayer = tfl.IndependentPoisson
# ===========================================================================
# Negative binomial
# ===========================================================================
def _dispersion(disp, event_shape, is_logits, name, n_components=1):
dispersion = str(disp).lower().strip()
assert dispersion in ('full', 'single', 'share'), \
"Only support three different dispersion value: 'full', 'single' and " + \
"'share', but given: %s" % dispersion
disp = None
if n_components > 1:
shape_single = (n_components, 1)
shape_share = tf.concat(
[[n_components], tf.nest.flatten(event_shape)], axis=0)
else:
shape_single = (1,)
shape_share = tf.nest.flatten(event_shape)
######## logits values
if is_logits:
if dispersion == 'single':
disp = tf.Variable(tf.zeros(shape_single),
trainable=True,
dtype=keras.backend.floatx(),
name=f"{name}_logits")
elif dispersion == 'share':
disp = tf.Variable(tf.zeros(shape_share),
trainable=True,
dtype=keras.backend.floatx(),
name=f"{name}_logits")
######## raw dispersion values
else:
if dispersion == 'single':
disp = tf.Variable(tf.random.normal(shape_single),
trainable=True,
dtype=keras.backend.floatx(),
name=f"{name}_raw")
elif dispersion == 'share':
disp = tf.Variable(tf.random.normal(shape_share),
trainable=True,
dtype=keras.backend.floatx(),
name=f"{name}_raw")
return disp
class NegativeBinomialLayer(DistributionLambda):
r"""An independent NegativeBinomial Keras layer.
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
count_activation: activation function return non-negative floating-point,
i.e. the `total_count` of failures
dispersion : {'full', 'share', 'single'}
- 'full' creates a dispersion value for each individual data point,
- 'share' creates a single vector of dispersion for all examples, and
- 'single' uses a single value as dispersion for all data points.
Note: the dispersion in this case is the probability of success.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
count_activation='exp',
dispersion='full',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
**kwargs):
disp = _dispersion(dispersion,
event_shape,
is_logits=True,
name="dispersion")
super(NegativeBinomialLayer, self).__init__(
lambda t: type(self).new(
t,
event_shape,
count_activation=parse_activation(count_activation, self),
validate_args=validate_args,
disp=disp,
), convert_to_tensor_fn, **kwargs)
self.disp = disp
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.exp,
validate_args=False,
name="NegativeBinomialLayer",
disp=None):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat(
[tf.shape(input=params)[:-1], event_shape],
axis=0,
)
if disp is None:
total_count, logits = tf.split(params, 2, axis=-1)
logits = tf.reshape(logits, output_shape)
else:
total_count = params
logits = disp
total_count = tf.reshape(total_count, output_shape)
total_count = count_activation(total_count)
return tfd.Independent(
tfd.NegativeBinomial(total_count=total_count,
logits=logits,
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
name="NegativeBinomialLayer_params_size"):
r"""The number of `params` needed to create a single distribution."""
if dispersion == 'full':
return 2 * _event_size(event_shape, name=name)
return _event_size(event_shape, name=name)
class NegativeBinomialDispLayer(DistributionLambda):
r"""An alternative parameterization of the NegativeBinomial Keras layer.
The order of input parameters are: mean, dispersion
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
mean_activation : activation for the non-negative mean
disp_activation : activation for the non-negative dispersion
dispersion : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single dispersion vector of `event_shape` for all examples,
and 'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
mean_activation='softplus',
disp_activation='softplus1',
dispersion='full',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
**kwargs):
disp = _dispersion(dispersion,
event_shape,
is_logits=False,
name="dispersion")
super(NegativeBinomialDispLayer, self).__init__(
lambda t: type(self).new(
t,
event_shape,
mean_activation=parse_activation(mean_activation, self),
disp_activation=parse_activation(disp_activation, self),
validate_args=validate_args,
disp=disp,
), convert_to_tensor_fn, **kwargs)
self.disp = disp
@staticmethod
def new(params,
event_shape=(),
mean_activation=tf.nn.softplus,
disp_activation=softplus1,
validate_args=False,
name="NegativeBinomialDispLayer",
disp=None):
r""" Create the distribution instance from a `params` vector. """
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat(
[tf.shape(input=params)[:-1], event_shape],
axis=0,
)
if disp is None:
loc, disp = tf.split(params, 2, axis=-1)
disp = tf.reshape(disp, output_shape)
else:
loc = params
loc = tf.reshape(loc, output_shape)
loc = mean_activation(loc)
disp = disp_activation(disp)
return tfd.Independent(
NegativeBinomialDisp(loc=loc, disp=disp, validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
name="NegativeBinomialDispLayer_params_size"):
r"""The number of `params` needed to create a single distribution."""
if dispersion == 'full':
return 2 * _event_size(event_shape, name=name)
return _event_size(event_shape, name=name)
# ===========================================================================
# Zero inflated
# ===========================================================================
class ZIPoissonLayer(DistributionLambda):
r"""A Independent zero-inflated Poisson keras layer """
def __init__(self,
event_shape=(),
activation='linear',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
**kwargs):
super(ZIPoissonLayer, self).__init__(
lambda t: type(self).new(
t, event_shape, parse_activation(activation, self), validate_args),
convert_to_tensor_fn, **kwargs)
@staticmethod
def new(params,
event_shape=(),
activation=tf.identity,
validate_args=False,
name="ZIPoissonLayer"):
"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat(
[tf.shape(input=params)[:-1], event_shape],
axis=0,
)
(log_rate_params, logits_params) = tf.split(params, 2, axis=-1)
return tfd.Independent(
ZeroInflated(count_distribution=tfd.Poisson(
log_rate=activation(tf.reshape(log_rate_params, output_shape)),
validate_args=validate_args),
logits=tf.reshape(logits_params, output_shape),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(), name="ZeroInflatedPoisson_params_size"):
r"""The number of `params` needed to create a single distribution."""
return 2 * _event_size(event_shape, name=name)
class ZINegativeBinomialLayer(DistributionLambda):
r"""A Independent zero-inflated negative binomial keras layer
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
count_activation: activation function return non-negative floating-point,
i.e. the `total_count` of failures
dispersion, inflation : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single vector of dispersion for all examples, and
'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
count_activation='exp',
dispersion='full',
inflation='full',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
**kwargs):
disp = _dispersion(dispersion,
event_shape,
is_logits=True,
name="dispersion")
rate = _dispersion(inflation, event_shape, is_logits=True, name="inflation")
super(ZINegativeBinomialLayer, self).__init__(
lambda t: type(self).new(
t,
event_shape,
count_activation=parse_activation(count_activation, self),
validate_args=validate_args,
disp=disp,
rate=rate,
), convert_to_tensor_fn, **kwargs)
self.disp = disp
self.rate = rate
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.exp,
validate_args=False,
name="ZINegativeBinomialLayer",
disp=None,
rate=None):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat((tf.shape(input=params)[:-1], event_shape), axis=0)
if disp is None: # full dispersion
if rate is None:
total_count, logits, rate = tf.split(params, 3, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
total_count, logits = tf.split(params, 2, axis=-1)
logits = tf.reshape(logits, output_shape)
else: # share dispersion
if rate is None:
total_count, rate = tf.split(params, 2, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
total_count = params
logits = disp
total_count = tf.reshape(total_count, output_shape)
total_count = count_activation(total_count)
nb = tfd.NegativeBinomial(total_count=total_count,
logits=logits,
validate_args=validate_args)
zinb = ZeroInflated(count_distribution=nb,
logits=rate,
validate_args=validate_args)
return tfd.Independent(zinb,
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
inflation='full',
name="ZeroInflatedNegativeBinomial_params_size"):
r"""The number of `params` needed to create a single distribution."""
size = _event_size(event_shape, name=name)
total = 3 * size
if dispersion != 'full':
total -= size
if inflation != 'full':
total -= size
return total
class ZINegativeBinomialDispLayer(DistributionLambda):
r"""A Independent zero-inflated negative binomial (alternative
parameterization) keras layer.
The order of input parameters are: mean, dispersion, dropout rate
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
mean_activation : activation for the non-negative mean
disp_activation : activation for the non-negative dispersion
dispersion, inflation : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single dispersion vector of `event_shape` for all examples,
and 'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
mean_activation='softplus',
disp_activation='softplus1',
dispersion='full',
inflation='full',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
**kwargs):
disp = _dispersion(dispersion,
event_shape,
is_logits=True,
name="dispersion")
rate = _dispersion(inflation, event_shape, is_logits=True, name="inflation")
super(ZINegativeBinomialDispLayer, self).__init__(
lambda t: type(self).new(
t,
event_shape,
mean_activation=parse_activation(mean_activation, self),
disp_activation=parse_activation(disp_activation, self),
disp=disp,
rate=rate,
validate_args=validate_args,
), convert_to_tensor_fn, **kwargs)
self.disp = disp
self.rate = rate
@staticmethod
def new(params,
event_shape=(),
mean_activation=tf.nn.softplus,
disp_activation=softplus1,
validate_args=False,
name="ZINegativeBinomialDispLayer",
disp=None,
rate=None):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat((tf.shape(input=params)[:-1], event_shape), axis=0)
### splitting the parameters
if disp is None: # full dispersion
if rate is None:
loc, disp, rate = tf.split(params, 3, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
loc, disp = tf.split(params, 2, axis=-1)
disp = tf.reshape(disp, output_shape)
else: # share dispersion
if rate is None:
loc, rate = tf.split(params, 2, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
loc = params
# as count value, do exp if necessary
loc = tf.reshape(loc, output_shape)
loc = mean_activation(loc)
disp = disp_activation(disp)
# create the distribution
nb = NegativeBinomialDisp(loc=loc, disp=disp, validate_args=validate_args)
zinb = ZeroInflated(count_distribution=nb,
logits=rate,
validate_args=validate_args)
return tfd.Independent(zinb,
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
inflation='full',
name="ZINegativeBinomialDisp_params_size"):
r"""The number of `params` needed to create a single distribution."""
size = _event_size(event_shape, name=name)
total = 3 * size
if dispersion != 'full':
total -= size
if inflation != 'full':
total -= size
return total
# ===========================================================================
# Binomial Multinomial layer
# ===========================================================================
class MultinomialLayer(tfl.DistributionLambda):
r""" Parameterization:
- total_count : `[batch_size, 1]`
- logits : `[batch_size, ndim]`
- sample : `[batch_size, ndim]` with `sum(x, axis=1) = total_count`
"""
def __init__(self,
event_shape=(),
count_activation='softplus',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
name="MultinomialLayer"):
super().__init__(
lambda t: MultinomialLayer.new(t, event_shape, count_activation,
validate_args, name), convert_to_tensor_fn)
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.nn.softplus,
validate_args=False,
name='MultinomialLayer'):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
count_activation = parse_activation(count_activation, 'tf')
total_count = count_activation(params[..., 0])
logits = params[..., 1:]
return tfd.Multinomial(total_count=total_count,
logits=logits,
validate_args=validate_args,
name=name)
@staticmethod
def params_size(event_shape=(), name='MultinomialLayer_params_size'):
r"""The number of `params` needed to create a single distribution."""
return _event_size(event_shape, name=name) + 1.
class DirichletMultinomialLayer(tfl.DistributionLambda):
r""" Dirichlet-Multinomial compound distribution.
K=2 equal to Beta-Binomial distribution
"""
def __init__(self,
event_shape=(),
count_activation='softplus',
concentration_activation='softplus1',
clip_for_stable=True,
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False):
super().__init__(
lambda t: DirichletMultinomialLayer.
new(t, event_shape, count_activation, concentration_activation, clip_for_stable,
validate_args), convert_to_tensor_fn)
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.nn.softplus,
concentration_activation=softplus1,
clip_for_stable=True,
validate_args=False,
name='DirichletMultinomialLayer'):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
count_activation = parse_activation(count_activation, 'tf')
concentration_activation = parse_activation(concentration_activation, 'tf')
total_count = count_activation(params[..., 0])
concentration = concentration_activation(params[..., 1:])
if clip_for_stable:
concentration = tf.clip_by_value(concentration, 1e-3, 1e3)
return tfd.DirichletMultinomial(total_count=total_count,
concentration=concentration,
validate_args=validate_args,
name=name)
@staticmethod
def params_size(event_shape=(), name='DirichletMultinomialLayer_params_size'):
r"""The number of `params` needed to create a single distribution."""
return _event_size(event_shape, name=name) + 1.
class BinomialLayer(tfl.DistributionLambda):
r""" Binomial distribution, each entry is a flipping of the coin K times (
parameterized by `total_count` """
def __init__(self,
event_shape=(),
count_activation='softplus',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
name="BinomialLayer"):
super().__init__(
lambda t: BinomialLayer.new(t, event_shape, count_activation,
validate_args, name), convert_to_tensor_fn)
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.nn.softplus,
validate_args=False,
name='BinomialLayer'):
r"""Create the distribution instance from a `params` vector."""
count_activation = parse_activation(count_activation, 'tf')
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat((tf.shape(params)[:-1], event_shape), axis=0)
total_count, logits = tf.split(params, 2, axis=-1)
total_count = tf.reshape(total_count, output_shape)
logits = tf.reshape(logits, output_shape)
return tfd.Independent(
tfd.Binomial(total_count=count_activation(total_count),
logits=logits,
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(), name='BinomialLayer_params_size'):
r"""The number of `params` needed to create a single distribution."""
return 2 * _event_size(event_shape, name=name)
| 6,480 | 0 | 222 |
d25cb3fd32b7633613323994b081ae1c14b9eeca | 4,934 | py | Python | tools/pe_absorbing_layer.py | qgoestch/sinecity_testcases | ec04ba707ff69b5c1b4b42e56e522855a2f34a65 | [
"BSD-3-Clause"
] | null | null | null | tools/pe_absorbing_layer.py | qgoestch/sinecity_testcases | ec04ba707ff69b5c1b4b42e56e522855a2f34a65 | [
"BSD-3-Clause"
] | null | null | null | tools/pe_absorbing_layer.py | qgoestch/sinecity_testcases | ec04ba707ff69b5c1b4b42e56e522855a2f34a65 | [
"BSD-3-Clause"
] | 1 | 2021-02-18T13:07:10.000Z | 2021-02-18T13:07:10.000Z | # -*- coding: utf-8 -*-
##
# \file pe_absorbing_layer.py
# \title Definition of an absorbing layer for the parabolic equation.
# \author Pierre Chobeau
# \version 0.1
# \license BSD 3-Clause License
# \inst UMRAE (Ifsttar Nantes), LAUM (Le Mans Université)
# \date 2017, 20 Nov.
##
import numpy as np
def abs_lay_top(p_ij, dy, Ny, y_start_abs):
"""
Absorbing layer for the parabolic equation defined following the
vertical direction - i.e. along the y axis.
For more details see **[chevret_phd1994, Eq.(4.38), p.59]**.
:param p_ij: pressure at the discrete location i,j ~ (x, y) (Pa).
:type p_ij: 2D numpy arrays of complexes
:param dy: spatial step for the y directions (m).
:type dy: float
:param Ny: length of the domain in number of nodes following the y dir.
:type Ny: int
:param y_start_abs: y coordinate of the layer starting position (m).
:type y_start_abs: float
:return: the pressure array inclunding the absorption of the layer (Pa).
:rtype: 2D numpy arrays of complexes
"""
a_empirical = 4.5
b_empirical = int(round(1.4/dy))
abs_lay_top = np.ones((Ny + 1), dtype=np.float64)
for j in range(int(y_start_abs / dy) + 1, Ny + 1):
abs_lay_top[j] = np.exp(-((j - int(y_start_abs / dy)) /
(a_empirical * (Ny + b_empirical - j)))**2)
p_ij[j] = p_ij[j] * abs_lay_top[j]
plot_abs_profil = False
if plot_abs_profil:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(range(int(y_start_abs / dy) + 1, Ny + 1),
abs_lay_top[int(y_start_abs / dy) + 1: Ny + 1])
plt.show()
return p_ij
def abs_lay_bottom_top(p_ij, dy, Ny, y_start_abs):
"""
Absorbing layer for the ground (low part) of the parabolic equation, in
order to simulate free field propagation.
The absorbing layer is defined following the vertical direction -
i.e. along the y axis.
For more details see **[chevret_phd1994, Eq.(4.38), p.59]**.
:param p_ij: pressure at the discrete location i,j ~ (x, y) (Pa).
:type p_ij: 2D numpy arrays of complexes
:param dy: spatial step for the y directions (m).
:type dy: float
:param Ny: length of the domain in number of nodes following the y dir.
:type Ny: int
:param y_start_abs: y coordinate of the layer starting position (m).
:type y_start_abs: float
:return: the pressure array inclunding the absorption of the layer (Pa).
:rtype: 2D numpy arrays of complexes
"""
a_empirical = 4.5
b_empirical = int(round(1.4/dy))
abs_lay_top = np.ones((Ny + 1), dtype=np.float64)
abs_lay_total = np.ones((Ny + 1), dtype=np.float64)
abs_lay_bottom = np.ones((Ny + 1), dtype=np.float64)
for j in range(int(y_start_abs / dy) + 1, Ny + 1):
abs_lay_top[j] = np.exp(-((j - int(y_start_abs / dy)) /
(a_empirical * (Ny + b_empirical - j)))**2)
abs_lay_bottom[Ny - j - 1] = np.exp(-((j + 1 - int(y_start_abs / dy)) /
(a_empirical *
(- Ny - b_empirical + j))) ** 2)
for j in range(Ny + 1):
abs_lay_total[j] = abs_lay_bottom[j] * abs_lay_top[j]
p_ij[j] = p_ij[j] * abs_lay_total[j]
plot_abs_profil = False
if plot_abs_profil:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(range(int(y_start_abs / dy) + 1, Ny + 1),
abs_lay_top[int(y_start_abs / dy) + 1: Ny + 1])
plt.figure(2)
plt.plot(range(Ny - int(y_start_abs / dy)),
abs_lay_bottom[: Ny - int(y_start_abs / dy)])
plt.show()
return p_ij
def abs_lay_top_1(k, dy, Ny, y_start_abs):
"""
Absorbing layer for the parabolic equation defined following the
vertical direction - It has to be applied on the wavenumber directly.
:param p_ij: pressure at the discrete location i,j ~ (x, y) (Pa).
:type p_ij: 2D numpy arrays of complexes
:param dy: spatial step for the y directions (m).
:type dy: float
:param Ny: length of the domain in number of nodes following the y dir.
:type Ny: int
:param y_start_abs: y coordinate of the layer starting position (m).
:type y_start_abs: float
:return: the wavenumber.
:rtype: 2D numpy arrays of complexes
"""
abs_lay_top = np.ones((Ny + 1), dtype=np.float64)
A = np.ones((Ny + 1), dtype=np.float64)
for j in range(int(y_start_abs / dy) + 1, Ny + 1):
A[Ny + 1 - j + int(y_start_abs / dy)] = np.exp(-4. *
((j * dy) - y_start_abs) / ((Ny + 1) * dy - y_start_abs))
abs_lay_top[Ny + 1 - j + int(y_start_abs / dy)] = A[j] * \
((j * dy - y_start_abs) /
((Ny + 1) * dy - y_start_abs)) ** 2
k[j] = k[j] * abs_lay_top[j]
return k
| 39.790323 | 79 | 0.59546 | # -*- coding: utf-8 -*-
##
# \file pe_absorbing_layer.py
# \title Definition of an absorbing layer for the parabolic equation.
# \author Pierre Chobeau
# \version 0.1
# \license BSD 3-Clause License
# \inst UMRAE (Ifsttar Nantes), LAUM (Le Mans Université)
# \date 2017, 20 Nov.
##
import numpy as np
def abs_lay_top(p_ij, dy, Ny, y_start_abs):
"""
Absorbing layer for the parabolic equation defined following the
vertical direction - i.e. along the y axis.
For more details see **[chevret_phd1994, Eq.(4.38), p.59]**.
:param p_ij: pressure at the discrete location i,j ~ (x, y) (Pa).
:type p_ij: 2D numpy arrays of complexes
:param dy: spatial step for the y directions (m).
:type dy: float
:param Ny: length of the domain in number of nodes following the y dir.
:type Ny: int
:param y_start_abs: y coordinate of the layer starting position (m).
:type y_start_abs: float
:return: the pressure array inclunding the absorption of the layer (Pa).
:rtype: 2D numpy arrays of complexes
"""
a_empirical = 4.5
b_empirical = int(round(1.4/dy))
abs_lay_top = np.ones((Ny + 1), dtype=np.float64)
for j in range(int(y_start_abs / dy) + 1, Ny + 1):
abs_lay_top[j] = np.exp(-((j - int(y_start_abs / dy)) /
(a_empirical * (Ny + b_empirical - j)))**2)
p_ij[j] = p_ij[j] * abs_lay_top[j]
plot_abs_profil = False
if plot_abs_profil:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(range(int(y_start_abs / dy) + 1, Ny + 1),
abs_lay_top[int(y_start_abs / dy) + 1: Ny + 1])
plt.show()
return p_ij
def abs_lay_bottom_top(p_ij, dy, Ny, y_start_abs):
"""
Absorbing layer for the ground (low part) of the parabolic equation, in
order to simulate free field propagation.
The absorbing layer is defined following the vertical direction -
i.e. along the y axis.
For more details see **[chevret_phd1994, Eq.(4.38), p.59]**.
:param p_ij: pressure at the discrete location i,j ~ (x, y) (Pa).
:type p_ij: 2D numpy arrays of complexes
:param dy: spatial step for the y directions (m).
:type dy: float
:param Ny: length of the domain in number of nodes following the y dir.
:type Ny: int
:param y_start_abs: y coordinate of the layer starting position (m).
:type y_start_abs: float
:return: the pressure array inclunding the absorption of the layer (Pa).
:rtype: 2D numpy arrays of complexes
"""
a_empirical = 4.5
b_empirical = int(round(1.4/dy))
abs_lay_top = np.ones((Ny + 1), dtype=np.float64)
abs_lay_total = np.ones((Ny + 1), dtype=np.float64)
abs_lay_bottom = np.ones((Ny + 1), dtype=np.float64)
for j in range(int(y_start_abs / dy) + 1, Ny + 1):
abs_lay_top[j] = np.exp(-((j - int(y_start_abs / dy)) /
(a_empirical * (Ny + b_empirical - j)))**2)
abs_lay_bottom[Ny - j - 1] = np.exp(-((j + 1 - int(y_start_abs / dy)) /
(a_empirical *
(- Ny - b_empirical + j))) ** 2)
for j in range(Ny + 1):
abs_lay_total[j] = abs_lay_bottom[j] * abs_lay_top[j]
p_ij[j] = p_ij[j] * abs_lay_total[j]
plot_abs_profil = False
if plot_abs_profil:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(range(int(y_start_abs / dy) + 1, Ny + 1),
abs_lay_top[int(y_start_abs / dy) + 1: Ny + 1])
plt.figure(2)
plt.plot(range(Ny - int(y_start_abs / dy)),
abs_lay_bottom[: Ny - int(y_start_abs / dy)])
plt.show()
return p_ij
def abs_lay_top_1(k, dy, Ny, y_start_abs):
"""
Absorbing layer for the parabolic equation defined following the
vertical direction - It has to be applied on the wavenumber directly.
:param p_ij: pressure at the discrete location i,j ~ (x, y) (Pa).
:type p_ij: 2D numpy arrays of complexes
:param dy: spatial step for the y directions (m).
:type dy: float
:param Ny: length of the domain in number of nodes following the y dir.
:type Ny: int
:param y_start_abs: y coordinate of the layer starting position (m).
:type y_start_abs: float
:return: the wavenumber.
:rtype: 2D numpy arrays of complexes
"""
abs_lay_top = np.ones((Ny + 1), dtype=np.float64)
A = np.ones((Ny + 1), dtype=np.float64)
for j in range(int(y_start_abs / dy) + 1, Ny + 1):
A[Ny + 1 - j + int(y_start_abs / dy)] = np.exp(-4. *
((j * dy) - y_start_abs) / ((Ny + 1) * dy - y_start_abs))
abs_lay_top[Ny + 1 - j + int(y_start_abs / dy)] = A[j] * \
((j * dy - y_start_abs) /
((Ny + 1) * dy - y_start_abs)) ** 2
k[j] = k[j] * abs_lay_top[j]
return k
| 0 | 0 | 0 |
140f8439b5d15391db4146dff52f3c3f1194c8fe | 38,736 | py | Python | pyinstaller-1.5.1/mf.py | rafidhoda/differentiator_utility | b30ef54abef5bde46fb2d3eac8cb43218a7a4c17 | [
"Xnet",
"X11"
] | null | null | null | pyinstaller-1.5.1/mf.py | rafidhoda/differentiator_utility | b30ef54abef5bde46fb2d3eac8cb43218a7a4c17 | [
"Xnet",
"X11"
] | null | null | null | pyinstaller-1.5.1/mf.py | rafidhoda/differentiator_utility | b30ef54abef5bde46fb2d3eac8cb43218a7a4c17 | [
"Xnet",
"X11"
] | null | null | null | #
# Copyright (C) 2005, Giovanni Bajo
#
# Based on previous work under copyright (c) 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
import sys, string, os, imp, marshal, dircache, glob
try:
# zipimport is supported starting with Python 2.3
import zipimport
except ImportError:
zipimport = None
try:
# if ctypes is present, we can enable specific dependency discovery
import ctypes
from ctypes.util import find_library
except ImportError:
ctypes = None
import suffixes
try:
STRINGTYPE = basestring
except NameError:
STRINGTYPE = type("")
if not os.environ.has_key('PYTHONCASEOK') and sys.version_info >= (2, 1):
else:
def pyco():
"""
Returns correct extension ending: 'c' or 'o'
"""
if __debug__:
return 'c'
else:
return 'o'
#=======================Owners==========================#
# An Owner does imports from a particular piece of turf
# That is, there's an Owner for each thing on sys.path
# There are owners for directories and .pyz files.
# There could be owners for zip files, or even URLs.
# Note that they replace the string in sys.path,
# but str(sys.path[n]) should yield the original string.
ZipOwner = None
if zipimport:
# We cannot use zipimporter here because it has a stupid bug:
#
# >>> z.find_module("setuptools.setuptools.setuptools.setuptools.setuptools") is not None
# True
#
# So mf will go into infinite recursion.
# Instead, we'll reuse the BaseDirOwner logic, simply changing
# the template methods.
_globalownertypes = filter(None, [
DirOwner,
ZipOwner,
PYZOwner,
Owner,
])
#===================Import Directors====================================#
# ImportDirectors live on the metapath
# There's one for builtins, one for frozen modules, and one for sys.path
# Windows gets one for modules gotten from the Registry
# There should be one for Frozen modules
# Mac would have them for PY_RESOURCE modules etc.
# A generalization of Owner - their concept of "turf" is broader
# for Windows only
#=================Import Tracker============================#
# This one doesn't really import, just analyzes
# If it *were* importing, it would be the one-and-only ImportManager
# ie, the builtin import
UNTRIED = -1
imptyps = ['top-level', 'conditional', 'delayed', 'delayed, conditional']
import hooks
if __debug__:
import sys
import UserDict
else:
LogDict = dict
# really the equivalent of builtin import
#====================Modules============================#
# All we're doing here is tracking, not importing
# If we were importing, these would be hooked to the real module objects
#======================== Utility ================================#
# Scan the code object for imports, __all__ and wierd stuff
import dis
IMPORT_NAME = dis.opname.index('IMPORT_NAME')
IMPORT_FROM = dis.opname.index('IMPORT_FROM')
try:
IMPORT_STAR = dis.opname.index('IMPORT_STAR')
except:
IMPORT_STAR = 999
STORE_NAME = dis.opname.index('STORE_NAME')
STORE_FAST = dis.opname.index('STORE_FAST')
STORE_GLOBAL = dis.opname.index('STORE_GLOBAL')
try:
STORE_MAP = dis.opname.index('STORE_MAP')
except:
STORE_MAP = 999
LOAD_GLOBAL = dis.opname.index('LOAD_GLOBAL')
LOAD_ATTR = dis.opname.index('LOAD_ATTR')
LOAD_NAME = dis.opname.index('LOAD_NAME')
EXEC_STMT = dis.opname.index('EXEC_STMT')
try:
SET_LINENO = dis.opname.index('SET_LINENO')
except ValueError:
SET_LINENO = 999
BUILD_LIST = dis.opname.index('BUILD_LIST')
LOAD_CONST = dis.opname.index('LOAD_CONST')
if getattr(sys, 'version_info', (0,0,0)) > (2,5,0):
LOAD_CONST_level = LOAD_CONST
else:
LOAD_CONST_level = 999
if getattr(sys, 'version_info', (0,0,0)) >= (2,7,0):
COND_OPS = [dis.opname.index('POP_JUMP_IF_TRUE'),
dis.opname.index('POP_JUMP_IF_FALSE'),
dis.opname.index('JUMP_IF_TRUE_OR_POP'),
dis.opname.index('JUMP_IF_FALSE_OR_POP'),
]
else:
COND_OPS = [dis.opname.index('JUMP_IF_FALSE'),
dis.opname.index('JUMP_IF_TRUE'),
]
JUMP_FORWARD = dis.opname.index('JUMP_FORWARD')
try:
STORE_DEREF = dis.opname.index('STORE_DEREF')
except ValueError:
STORE_DEREF = 999
STORE_OPS = [STORE_NAME, STORE_FAST, STORE_GLOBAL, STORE_DEREF, STORE_MAP]
#IMPORT_STAR -> IMPORT_NAME mod ; IMPORT_STAR
#JUMP_IF_FALSE / JUMP_IF_TRUE / JUMP_FORWARD
def scan_code_for_ctypes(co, instrs, i):
"""Detects ctypes dependencies, using reasonable heuristics that should
cover most common ctypes usages; returns a tuple of two lists, one
containing names of binaries detected as dependencies, the other containing
warnings.
"""
def _libFromConst(i):
"""Extracts library name from an expected LOAD_CONST instruction and
appends it to local binaries list.
"""
op, oparg, conditional, curline = instrs[i]
if op == LOAD_CONST:
soname = co.co_consts[oparg]
b.append(soname)
b = []
op, oparg, conditional, curline = instrs[i]
if op in (LOAD_GLOBAL, LOAD_NAME):
name = co.co_names[oparg]
if name in ("CDLL", "WinDLL"):
# Guesses ctypes imports of this type: CDLL("library.so")
# LOAD_GLOBAL 0 (CDLL) <--- we "are" here right now
# LOAD_CONST 1 ('library.so')
_libFromConst(i+1)
elif name == "ctypes":
# Guesses ctypes imports of this type: ctypes.DLL("library.so")
# LOAD_GLOBAL 0 (ctypes) <--- we "are" here right now
# LOAD_ATTR 1 (CDLL)
# LOAD_CONST 1 ('library.so')
op2, oparg2, conditional2, curline2 = instrs[i+1]
if op2 == LOAD_ATTR:
if co.co_names[oparg2] in ("CDLL", "WinDLL"):
# Fetch next, and finally get the library name
_libFromConst(i+2)
elif name in ("cdll", "windll"):
# Guesses ctypes imports of these types:
# * cdll.library (only valid on Windows)
# LOAD_GLOBAL 0 (cdll) <--- we "are" here right now
# LOAD_ATTR 1 (library)
# * cdll.LoadLibrary("library.so")
# LOAD_GLOBAL 0 (cdll) <--- we "are" here right now
# LOAD_ATTR 1 (LoadLibrary)
# LOAD_CONST 1 ('library.so')
op2, oparg2, conditional2, curline2 = instrs[i+1]
if op2 == LOAD_ATTR:
if co.co_names[oparg2] != "LoadLibrary":
# First type
soname = co.co_names[oparg2] + ".dll"
b.append(soname)
else:
# Second type, needs to fetch one more instruction
_libFromConst(i+2)
# If any of the libraries has been requested with anything different from
# the bare filename, drop that entry and warn the user - pyinstaller would
# need to patch the compiled pyc file to make it work correctly!
w = []
for bin in list(b):
if bin != os.path.basename(bin):
b.remove(bin)
w.append("W: ignoring %s - ctypes imports only supported using bare filenames" % (bin,))
return b, w
def _resolveCtypesImports(cbinaries):
"""Completes ctypes BINARY entries for modules with their full path.
"""
if sys.platform.startswith("linux"):
envvar = "LD_LIBRARY_PATH"
elif sys.platform.startswith("darwin"):
envvar = "DYLD_LIBRARY_PATH"
else:
envvar = "PATH"
ret = []
# Try to locate the shared library on disk. This is done by
# executing ctypes.utile.find_library prepending ImportTracker's
# local paths to library search paths, then replaces original values.
old = _savePaths()
for cbin in cbinaries:
ext = os.path.splitext(cbin)[1]
# On Windows, only .dll files can be loaded.
if os.name == "nt" and ext.lower() in [".so", ".dylib"]:
continue
cpath = find_library(os.path.splitext(cbin)[0])
if sys.platform == "linux2":
# CAVEAT: find_library() is not the correct function. Ctype's
# documentation says that it is meant to resolve only the filename
# (as a *compiler* does) not the full path. Anyway, it works well
# enough on Windows and Mac. On Linux, we need to implement
# more code to find out the full path.
if cpath is None:
cpath = cbin
# "man ld.so" says that we should first search LD_LIBRARY_PATH
# and then the ldcache
for d in os.environ["LD_LIBRARY_PATH"].split(":"):
if os.path.isfile(d + "/" + cpath):
cpath = d + "/" + cpath
break
else:
for L in os.popen("ldconfig -p").read().splitlines():
if cpath in L:
cpath = L.split("=>", 1)[1].strip()
assert os.path.isfile(cpath)
break
else:
cpath = None
if cpath is None:
print "W: library %s required via ctypes not found" % (cbin,)
else:
ret.append((cbin, cpath, "BINARY"))
_restorePaths(old)
return ret
| 34.960289 | 126 | 0.531263 | #
# Copyright (C) 2005, Giovanni Bajo
#
# Based on previous work under copyright (c) 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
import sys, string, os, imp, marshal, dircache, glob
try:
# zipimport is supported starting with Python 2.3
import zipimport
except ImportError:
zipimport = None
try:
# if ctypes is present, we can enable specific dependency discovery
import ctypes
from ctypes.util import find_library
except ImportError:
ctypes = None
import suffixes
try:
STRINGTYPE = basestring
except NameError:
STRINGTYPE = type("")
if not os.environ.has_key('PYTHONCASEOK') and sys.version_info >= (2, 1):
def caseOk(filename):
files = dircache.listdir(os.path.dirname(filename))
return os.path.basename(filename) in files
else:
def caseOk(filename):
return True
def pyco():
"""
Returns correct extension ending: 'c' or 'o'
"""
if __debug__:
return 'c'
else:
return 'o'
#=======================Owners==========================#
# An Owner does imports from a particular piece of turf
# That is, there's an Owner for each thing on sys.path
# There are owners for directories and .pyz files.
# There could be owners for zip files, or even URLs.
# Note that they replace the string in sys.path,
# but str(sys.path[n]) should yield the original string.
class OwnerError(Exception):
pass
class Owner:
def __init__(self, path, target_platform=None):
self.path = path
self.target_platform = target_platform
def __str__(self):
return self.path
def getmod(self, nm):
return None
class BaseDirOwner(Owner):
def _getsuffixes(self):
return suffixes.get_suffixes(self.target_platform)
def getmod(self, nm, getsuffixes=None, loadco=marshal.loads):
if getsuffixes is None:
getsuffixes = self._getsuffixes
possibles = [(nm, 0, None)]
if self._isdir(nm) and self._caseok(nm):
possibles.insert(0, (os.path.join(nm, '__init__'), 1, nm))
py = pyc = None
for pth, ispkg, pkgpth in possibles:
for ext, mode, typ in getsuffixes():
attempt = pth+ext
modtime = self._modtime(attempt)
if modtime is not None:
# Check case
if not self._caseok(attempt):
continue
if typ == imp.C_EXTENSION:
#print "DirOwner.getmod -> ExtensionModule(%s, %s)" % (nm, attempt)
return ExtensionModule(nm, os.path.join(self.path,attempt))
elif typ == imp.PY_SOURCE:
py = (attempt, modtime)
else:
pyc = (attempt, modtime)
if py or pyc:
break
if py is None and pyc is None:
#print "DirOwner.getmod -> (py == pyc == None)"
return None
while 1:
# If we have no pyc or py is newer
if pyc is None or py and pyc[1] < py[1]:
try:
stuff = self._read(py[0])+'\n'
co = compile(string.replace(stuff, "\r\n", "\n"), py[0], 'exec')
pth = py[0] + pyco()
break
except SyntaxError, e:
print "Syntax error in", py[0]
print e.args
raise
elif pyc:
stuff = self._read(pyc[0])
# If this file was not generated for this version of
# Python, we need to regenerate it.
if stuff[:4] != imp.get_magic():
print "W: wrong version .pyc found (%s), will use .py" % pyc[0]
pyc = None
continue
try:
co = loadco(stuff[8:])
pth = pyc[0]
break
except (ValueError, EOFError):
print "W: bad .pyc found (%s), will use .py" % pyc[0]
pyc = None
else:
#print "DirOwner.getmod while 1 -> None"
return None
pth = os.path.join(self.path, pth)
if not os.path.isabs(pth):
pth = os.path.abspath(pth)
if ispkg:
mod = self._pkgclass()(nm, pth, co)
else:
mod = self._modclass()(nm, pth, co)
#print "DirOwner.getmod -> %s" % mod
return mod
class DirOwner(BaseDirOwner):
def __init__(self, path, target_platform=None):
if path == '':
path = os.getcwd()
if not os.path.isdir(path):
raise OwnerError("%s is not a directory" % repr(path))
Owner.__init__(self, path, target_platform)
def _isdir(self, fn):
return os.path.isdir(os.path.join(self.path, fn))
def _modtime(self, fn):
try:
return os.stat(os.path.join(self.path, fn))[8]
except OSError:
return None
def _read(self, fn):
return open(os.path.join(self.path, fn), 'rb').read()
def _pkgclass(self):
return PkgModule
def _modclass(self):
return PyModule
def _caseok(self, fn):
return caseOk(os.path.join(self.path, fn))
class PYZOwner(Owner):
def __init__(self, path, target_platform=None):
import archive
self.pyz = archive.ZlibArchive(path)
Owner.__init__(self, path, target_platform)
def getmod(self, nm):
rslt = self.pyz.extract(nm)
if not rslt:
return None
ispkg, co = rslt
if ispkg:
return PkgInPYZModule(nm, co, self)
return PyModule(nm, self.path, co)
ZipOwner = None
if zipimport:
# We cannot use zipimporter here because it has a stupid bug:
#
# >>> z.find_module("setuptools.setuptools.setuptools.setuptools.setuptools") is not None
# True
#
# So mf will go into infinite recursion.
# Instead, we'll reuse the BaseDirOwner logic, simply changing
# the template methods.
class ZipOwner(BaseDirOwner):
def __init__(self, path, target_platform=None):
import zipfile
try:
self.zf = zipfile.ZipFile(path, "r")
except IOError:
raise OwnerError("%s is not a zipfile" % path)
Owner.__init__(self, path, target_platform)
def getmod(self, fn):
fn = fn.replace(".", "/")
return BaseDirOwner.getmod(self, fn)
def _modtime(self, fn):
fn = fn.replace("\\","/")
try:
dt = self.zf.getinfo(fn).date_time
return dt
except KeyError:
return None
def _isdir(self, fn):
# No way to find out if "fn" is a directory
# so just always look into it in case it is.
return True
def _caseok(self, fn):
# zipfile is always case-sensitive, so surely
# there is no case mismatch.
return True
def _read(self, fn):
# zipfiles always use forward slashes
fn = fn.replace("\\","/")
return self.zf.read(fn)
def _pkgclass(self):
return lambda *args: PkgInZipModule(self, *args)
def _modclass(self):
return lambda *args: PyInZipModule(self, *args)
_globalownertypes = filter(None, [
DirOwner,
ZipOwner,
PYZOwner,
Owner,
])
#===================Import Directors====================================#
# ImportDirectors live on the metapath
# There's one for builtins, one for frozen modules, and one for sys.path
# Windows gets one for modules gotten from the Registry
# There should be one for Frozen modules
# Mac would have them for PY_RESOURCE modules etc.
# A generalization of Owner - their concept of "turf" is broader
class ImportDirector(Owner):
pass
class BuiltinImportDirector(ImportDirector):
def __init__(self):
self.path = 'Builtins'
def getmod(self, nm, isbuiltin=imp.is_builtin):
if isbuiltin(nm):
return BuiltinModule(nm)
return None
class FrozenImportDirector(ImportDirector):
def __init__(self):
self.path = 'FrozenModules'
def getmod(self, nm, isfrozen=imp.is_frozen):
if isfrozen(nm):
return FrozenModule(nm)
return None
class RegistryImportDirector(ImportDirector):
# for Windows only
def __init__(self):
self.path = "WindowsRegistry"
self.map = {}
try:
import win32api
import win32con
except ImportError:
pass
else:
subkey = r"Software\Python\PythonCore\%s\Modules" % sys.winver
for root in (win32con.HKEY_CURRENT_USER, win32con.HKEY_LOCAL_MACHINE):
try:
#hkey = win32api.RegOpenKeyEx(root, subkey, 0, win32con.KEY_ALL_ACCESS)
hkey = win32api.RegOpenKeyEx(root, subkey, 0, win32con.KEY_READ)
except Exception, e:
#print "RegistryImportDirector", e
pass
else:
numsubkeys, numvalues, lastmodified = win32api.RegQueryInfoKey(hkey)
for i in range(numsubkeys):
subkeyname = win32api.RegEnumKey(hkey, i)
#hskey = win32api.RegOpenKeyEx(hkey, subkeyname, 0, win32con.KEY_ALL_ACCESS)
hskey = win32api.RegOpenKeyEx(hkey, subkeyname, 0, win32con.KEY_READ)
val = win32api.RegQueryValueEx(hskey, '')
desc = getDescr(val[0])
#print " RegistryImportDirector got %s %s" % (val[0], desc) #XXX
self.map[subkeyname] = (val[0], desc)
hskey.Close()
hkey.Close()
break
def getmod(self, nm):
stuff = self.map.get(nm)
if stuff:
fnm, (suffix, mode, typ) = stuff
if typ == imp.C_EXTENSION:
return ExtensionModule(nm, fnm)
elif typ == imp.PY_SOURCE:
try:
stuff = open(fnm, 'r').read()+'\n'
co = compile(string.replace(stuff, "\r\n", "\n"), fnm, 'exec')
except SyntaxError, e:
print "Invalid syntax in %s" % py[0]
print e.args
raise
else:
stuff = open(fnm, 'rb').read()
co = loadco(stuff[8:])
return PyModule(nm, fnm, co)
return None
class PathImportDirector(ImportDirector):
def __init__(self, pathlist=None, importers=None, ownertypes=None,
target_platform=None):
if pathlist is None:
self.path = sys.path
else:
self.path = pathlist
if ownertypes == None:
self.ownertypes = _globalownertypes
else:
self.ownertypes = ownertypes
if importers:
self.shadowpath = importers
else:
self.shadowpath = {}
self.inMakeOwner = 0
self.building = {}
self.target_platform = target_platform
def __str__(self):
return str(self.path)
def getmod(self, nm):
mod = None
for thing in self.path:
if isinstance(thing, STRINGTYPE):
owner = self.shadowpath.get(thing, -1)
if owner == -1:
owner = self.shadowpath[thing] = self.makeOwner(thing)
if owner:
mod = owner.getmod(nm)
else:
mod = thing.getmod(nm)
if mod:
break
return mod
def makeOwner(self, path):
if self.building.get(path):
return None
self.building[path] = 1
owner = None
for klass in self.ownertypes:
try:
# this may cause an import, which may cause recursion
# hence the protection
owner = klass(path, self.target_platform)
except OwnerError:
pass
except Exception, e:
#print "FIXME: Wrong exception", e
pass
else:
break
del self.building[path]
return owner
def getDescr(fnm):
ext = os.path.splitext(fnm)[1]
for (suffix, mode, typ) in imp.get_suffixes():
if suffix == ext:
return (suffix, mode, typ)
#=================Import Tracker============================#
# This one doesn't really import, just analyzes
# If it *were* importing, it would be the one-and-only ImportManager
# ie, the builtin import
UNTRIED = -1
imptyps = ['top-level', 'conditional', 'delayed', 'delayed, conditional']
import hooks
if __debug__:
import sys
import UserDict
class LogDict(UserDict.UserDict):
count = 0
def __init__(self, *args):
UserDict.UserDict.__init__(self, *args)
LogDict.count += 1
self.logfile = open("logdict%s-%d.log" % (".".join(map(str, sys.version_info)),
LogDict.count), "w")
def __setitem__(self, key, value):
self.logfile.write("%s: %s -> %s\n" % (key, self.data.get(key), value))
UserDict.UserDict.__setitem__(self, key, value)
def __delitem__(self, key):
self.logfile.write(" DEL %s\n" % key)
UserDict.UserDict.__delitem__(self, key)
else:
LogDict = dict
class ImportTracker:
# really the equivalent of builtin import
def __init__(self, xpath=None, hookspath=None, excludes=None,
target_platform=None):
self.path = []
self.warnings = {}
if xpath:
self.path = xpath
self.path.extend(sys.path)
self.modules = LogDict()
self.metapath = [
BuiltinImportDirector(),
FrozenImportDirector(),
RegistryImportDirector(),
PathImportDirector(self.path, target_platform=target_platform)
]
if hookspath:
hooks.__path__.extend(hookspath)
self.excludes = excludes
if excludes is None:
self.excludes = []
self.target_platform = target_platform
def analyze_r(self, nm, importernm=None):
importer = importernm
if importer is None:
importer = '__main__'
seen = {}
nms = self.analyze_one(nm, importernm)
nms = map(None, nms, [importer]*len(nms))
i = 0
while i < len(nms):
nm, importer = nms[i]
if seen.get(nm,0):
del nms[i]
mod = self.modules[nm]
if mod:
mod.xref(importer)
else:
i = i + 1
seen[nm] = 1
j = i
mod = self.modules[nm]
if mod:
mod.xref(importer)
for name, isdelayed, isconditional, level in mod.imports:
imptyp = isdelayed * 2 + isconditional
newnms = self.analyze_one(name, nm, imptyp, level)
newnms = map(None, newnms, [nm]*len(newnms))
nms[j:j] = newnms
j = j + len(newnms)
return map(lambda a: a[0], nms)
def analyze_one(self, nm, importernm=None, imptyp=0, level=-1):
#print '## analyze_one', nm, importernm, imptyp, level
# break the name being imported up so we get:
# a.b.c -> [a, b, c] ; ..z -> ['', '', z]
if not nm:
nm = importernm
importernm = None
level = 0
nmparts = string.split(nm, '.')
if level < 0:
# behaviour up to Python 2.4 (and default in Python 2.5)
# first see if we could be importing a relative name
contexts = [None]
if importernm:
if self.ispackage(importernm):
contexts.insert(0, importernm)
else:
pkgnm = string.join(string.split(importernm, '.')[:-1], '.')
if pkgnm:
contexts.insert(0, pkgnm)
elif level == 0:
# absolute import, do not try relative
importernm = None
contexts = [None]
elif level > 0:
# relative import, do not try absolute
if self.ispackage(importernm):
level -= 1
if level > 0:
importernm = string.join(string.split(importernm, '.')[:-level], ".")
contexts = [importernm, None]
importernm = None
_all = None
assert contexts
# so contexts is [pkgnm, None] or just [None]
if nmparts[-1] == '*':
del nmparts[-1]
_all = []
nms = []
for context in contexts:
ctx = context
for i in range(len(nmparts)):
nm = nmparts[i]
if ctx:
fqname = ctx + '.' + nm
else:
fqname = nm
mod = self.modules.get(fqname, UNTRIED)
if mod is UNTRIED:
mod = self.doimport(nm, ctx, fqname)
if mod:
nms.append(mod.__name__)
ctx = fqname
else:
break
else:
# no break, point i beyond end
i = i + 1
if i:
break
# now nms is the list of modules that went into sys.modules
# just as result of the structure of the name being imported
# however, each mod has been scanned and that list is in mod.imports
if i<len(nmparts):
if ctx:
if hasattr(self.modules[ctx], nmparts[i]):
return nms
if not self.ispackage(ctx):
return nms
self.warnings["W: no module named %s (%s import by %s)" % (fqname, imptyps[imptyp], importernm or "__main__")] = 1
if self.modules.has_key(fqname):
del self.modules[fqname]
return nms
if _all is None:
return nms
bottommod = self.modules[ctx]
if bottommod.ispackage():
for nm in bottommod._all:
if not hasattr(bottommod, nm):
mod = self.doimport(nm, ctx, ctx+'.'+nm)
if mod:
nms.append(mod.__name__)
else:
bottommod.warnings.append("W: name %s not found" % nm)
return nms
def analyze_script(self, fnm):
try:
stuff = open(fnm, 'r').read()+'\n'
co = compile(string.replace(stuff, "\r\n", "\n"), fnm, 'exec')
except SyntaxError, e:
print "Invalid syntax in %s" % fnm
print e.args
raise
mod = PyScript(fnm, co)
self.modules['__main__'] = mod
return self.analyze_r('__main__')
def ispackage(self, nm):
return self.modules[nm].ispackage()
def doimport(self, nm, ctx, fqname):
#print "doimport", nm, ctx, fqname
# Not that nm is NEVER a dotted name at this point
assert ("." not in nm), nm
if fqname in self.excludes:
return None
if ctx:
parent = self.modules[ctx]
if parent.ispackage():
mod = parent.doimport(nm)
if mod:
# insert the new module in the parent package
# FIXME why?
setattr(parent, nm, mod)
else:
# if parent is not a package, there is nothing more to do
return None
else:
# now we're dealing with an absolute import
# try to import nm using available directors
for director in self.metapath:
mod = director.getmod(nm)
if mod:
break
# here we have `mod` from:
# mod = parent.doimport(nm)
# or
# mod = director.getmod(nm)
if mod:
mod.__name__ = fqname
self.modules[fqname] = mod
# now look for hooks
# this (and scan_code) are instead of doing "exec co in mod.__dict__"
try:
hookmodnm = 'hook-'+fqname
hooks = __import__('hooks', globals(), locals(), [hookmodnm])
hook = getattr(hooks, hookmodnm)
except AttributeError:
pass
else:
# rearranged so that hook() has a chance to mess with hiddenimports & attrs
if hasattr(hook, 'hook'):
mod = hook.hook(mod)
if hasattr(hook, 'hiddenimports'):
for impnm in hook.hiddenimports:
mod.imports.append((impnm, 0, 0, -1))
if hasattr(hook, 'attrs'):
for attr, val in hook.attrs:
setattr(mod, attr, val)
if hasattr(hook, 'datas'):
# hook.datas is a list of globs of files or directories to bundle
# as datafiles. For each glob, a destination directory is specified.
for g,dest_dir in hook.datas:
if dest_dir: dest_dir += "/"
for fn in glob.glob(g):
if os.path.isfile(fn):
mod.datas.append((dest_dir + os.path.basename(fn), fn, 'DATA'))
else:
def visit((base,dest_dir,datas), dirname, names):
for fn in names:
fn = os.path.join(dirname, fn)
if os.path.isfile(fn):
datas.append((dest_dir + fn[len(base)+1:], fn, 'DATA'))
os.path.walk(fn, visit, (os.path.dirname(fn),dest_dir,mod.datas))
if fqname != mod.__name__:
print "W: %s is changing it's name to %s" % (fqname, mod.__name__)
self.modules[mod.__name__] = mod
else:
assert (mod == None), mod
self.modules[fqname] = None
# should be equivalent using only one
# self.modules[fqname] = mod
# here
return mod
def getwarnings(self):
warnings = self.warnings.keys()
for nm,mod in self.modules.items():
if mod:
for w in mod.warnings:
warnings.append(w+' - %s (%s)' % (mod.__name__, mod.__file__))
return warnings
def getxref(self):
mods = self.modules.items() # (nm, mod)
mods.sort()
rslt = []
for nm, mod in mods:
if mod:
importers = mod._xref.keys()
importers.sort()
rslt.append((nm, importers))
return rslt
#====================Modules============================#
# All we're doing here is tracking, not importing
# If we were importing, these would be hooked to the real module objects
class Module:
_ispkg = 0
typ = 'UNKNOWN'
def __init__(self, nm):
self.__name__ = nm
self.__file__ = None
self._all = []
self.imports = []
self.warnings = []
self.binaries = []
self.datas = []
self._xref = {}
def ispackage(self):
return self._ispkg
def doimport(self, nm):
pass
def xref(self, nm):
self._xref[nm] = 1
def __str__(self):
return "<Module %s %s imports=%s binaries=%s datas=%s>" % \
(self.__name__, self.__file__, self.imports, self.binaries, self.datas)
class BuiltinModule(Module):
typ = 'BUILTIN'
def __init__(self, nm):
Module.__init__(self, nm)
class ExtensionModule(Module):
typ = 'EXTENSION'
def __init__(self, nm, pth):
Module.__init__(self, nm)
self.__file__ = pth
class PyModule(Module):
typ = 'PYMODULE'
def __init__(self, nm, pth, co):
Module.__init__(self, nm)
self.co = co
self.__file__ = pth
if os.path.splitext(self.__file__)[1] == '.py':
self.__file__ = self.__file__ + pyco()
self.scancode()
def scancode(self):
self.imports, self.warnings, self.binaries, allnms = scan_code(self.co)
if allnms:
self._all = allnms
if ctypes and self.binaries:
self.binaries = _resolveCtypesImports(self.binaries)
class PyScript(PyModule):
typ = 'PYSOURCE'
def __init__(self, pth, co):
Module.__init__(self, '__main__')
self.co = co
self.__file__ = pth
self.scancode()
class PkgModule(PyModule):
typ = 'PYMODULE'
def __init__(self, nm, pth, co):
PyModule.__init__(self, nm, pth, co)
self._ispkg = 1
pth = os.path.dirname(pth)
self.__path__ = [ pth ]
self._update_director(force=True)
def _update_director(self, force=False):
if force or self.subimporter.path != self.__path__:
self.subimporter = PathImportDirector(self.__path__)
def doimport(self, nm):
self._update_director()
mod = self.subimporter.getmod(nm)
if mod:
mod.__name__ = self.__name__ + '.' + mod.__name__
return mod
class PkgInPYZModule(PyModule):
def __init__(self, nm, co, pyzowner):
PyModule.__init__(self, nm, co.co_filename, co)
self._ispkg = 1
self.__path__ = [ str(pyzowner) ]
self.owner = pyzowner
def doimport(self, nm):
mod = self.owner.getmod(self.__name__ + '.' + nm)
return mod
class PyInZipModule(PyModule):
typ = 'ZIPFILE'
def __init__(self, zipowner, nm, pth, co):
PyModule.__init__(self, nm, co.co_filename, co)
self.owner = zipowner
class PkgInZipModule(PyModule):
typ = 'ZIPFILE'
def __init__(self, zipowner, nm, pth, co):
PyModule.__init__(self, nm, co.co_filename, co)
self._ispkg = 1
self.__path__ = [ str(zipowner) ]
self.owner = zipowner
def doimport(self, nm):
mod = self.owner.getmod(self.__name__ + '.' + nm)
return mod
#======================== Utility ================================#
# Scan the code object for imports, __all__ and wierd stuff
import dis
IMPORT_NAME = dis.opname.index('IMPORT_NAME')
IMPORT_FROM = dis.opname.index('IMPORT_FROM')
try:
IMPORT_STAR = dis.opname.index('IMPORT_STAR')
except:
IMPORT_STAR = 999
STORE_NAME = dis.opname.index('STORE_NAME')
STORE_FAST = dis.opname.index('STORE_FAST')
STORE_GLOBAL = dis.opname.index('STORE_GLOBAL')
try:
STORE_MAP = dis.opname.index('STORE_MAP')
except:
STORE_MAP = 999
LOAD_GLOBAL = dis.opname.index('LOAD_GLOBAL')
LOAD_ATTR = dis.opname.index('LOAD_ATTR')
LOAD_NAME = dis.opname.index('LOAD_NAME')
EXEC_STMT = dis.opname.index('EXEC_STMT')
try:
SET_LINENO = dis.opname.index('SET_LINENO')
except ValueError:
SET_LINENO = 999
BUILD_LIST = dis.opname.index('BUILD_LIST')
LOAD_CONST = dis.opname.index('LOAD_CONST')
if getattr(sys, 'version_info', (0,0,0)) > (2,5,0):
LOAD_CONST_level = LOAD_CONST
else:
LOAD_CONST_level = 999
if getattr(sys, 'version_info', (0,0,0)) >= (2,7,0):
COND_OPS = [dis.opname.index('POP_JUMP_IF_TRUE'),
dis.opname.index('POP_JUMP_IF_FALSE'),
dis.opname.index('JUMP_IF_TRUE_OR_POP'),
dis.opname.index('JUMP_IF_FALSE_OR_POP'),
]
else:
COND_OPS = [dis.opname.index('JUMP_IF_FALSE'),
dis.opname.index('JUMP_IF_TRUE'),
]
JUMP_FORWARD = dis.opname.index('JUMP_FORWARD')
try:
STORE_DEREF = dis.opname.index('STORE_DEREF')
except ValueError:
STORE_DEREF = 999
STORE_OPS = [STORE_NAME, STORE_FAST, STORE_GLOBAL, STORE_DEREF, STORE_MAP]
#IMPORT_STAR -> IMPORT_NAME mod ; IMPORT_STAR
#JUMP_IF_FALSE / JUMP_IF_TRUE / JUMP_FORWARD
def pass1(code):
instrs = []
i = 0
n = len(code)
curline = 0
incondition = 0
out = 0
while i < n:
if i >= out:
incondition = 0
c = code[i]
i = i+1
op = ord(c)
if op >= dis.HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
else:
oparg = None
if not incondition and op in COND_OPS:
incondition = 1
out = oparg
if op in dis.hasjrel:
out += i
elif incondition and op == JUMP_FORWARD:
out = max(out, i + oparg)
if op == SET_LINENO:
curline = oparg
else:
instrs.append((op, oparg, incondition, curline))
return instrs
def scan_code(co, m=None, w=None, b=None, nested=0):
instrs = pass1(co.co_code)
if m is None:
m = []
if w is None:
w = []
if b is None:
b = []
all = None
lastname = None
level = -1 # import-level, same behaviour as up to Python 2.4
for i in range(len(instrs)):
op, oparg, conditional, curline = instrs[i]
if op == IMPORT_NAME:
if level <= 0:
name = lastname = co.co_names[oparg]
else:
name = lastname = co.co_names[oparg]
#print 'import_name', name, `lastname`, level
m.append((name, nested, conditional, level))
elif op == IMPORT_FROM:
name = co.co_names[oparg]
#print 'import_from', name, `lastname`, level,
if level > 0 and (not lastname or lastname[-1:] == '.'):
name = lastname + name
else:
name = lastname + '.' + name
#print name
m.append((name, nested, conditional, level))
assert lastname is not None
elif op == IMPORT_STAR:
assert lastname is not None
m.append((lastname+'.*', nested, conditional, level))
elif op == STORE_NAME:
if co.co_names[oparg] == "__all__":
j = i - 1
pop, poparg, pcondtl, pline = instrs[j]
if pop != BUILD_LIST:
w.append("W: __all__ is built strangely at line %s" % pline)
else:
all = []
while j > 0:
j = j - 1
pop, poparg, pcondtl, pline = instrs[j]
if pop == LOAD_CONST:
all.append(co.co_consts[poparg])
else:
break
elif op in STORE_OPS:
pass
elif op == LOAD_CONST_level:
# starting with Python 2.5, _each_ import is preceeded with a
# LOAD_CONST to indicate the relative level.
if isinstance(co.co_consts[oparg], (int, long)):
level = co.co_consts[oparg]
elif op == LOAD_GLOBAL:
name = co.co_names[oparg]
cndtl = ['', 'conditional'][conditional]
lvl = ['top-level', 'delayed'][nested]
if name == "__import__":
w.append("W: %s %s __import__ hack detected at line %s" % (lvl, cndtl, curline))
elif name == "eval":
w.append("W: %s %s eval hack detected at line %s" % (lvl, cndtl, curline))
elif op == EXEC_STMT:
cndtl = ['', 'conditional'][conditional]
lvl = ['top-level', 'delayed'][nested]
w.append("W: %s %s exec statement detected at line %s" % (lvl, cndtl, curline))
else:
lastname = None
if ctypes:
# ctypes scanning requires a scope wider than one bytecode instruction,
# so the code resides in a separate function for clarity.
ctypesb, ctypesw = scan_code_for_ctypes(co, instrs, i)
b.extend(ctypesb)
w.extend(ctypesw)
for c in co.co_consts:
if isinstance(c, type(co)):
# FIXME: "all" was not updated here nor returned. Was it the desired
# behaviour?
_, _, _, all_nested = scan_code(c, m, w, b, 1)
if all_nested:
all.extend(all_nested)
return m, w, b, all
def scan_code_for_ctypes(co, instrs, i):
"""Detects ctypes dependencies, using reasonable heuristics that should
cover most common ctypes usages; returns a tuple of two lists, one
containing names of binaries detected as dependencies, the other containing
warnings.
"""
def _libFromConst(i):
"""Extracts library name from an expected LOAD_CONST instruction and
appends it to local binaries list.
"""
op, oparg, conditional, curline = instrs[i]
if op == LOAD_CONST:
soname = co.co_consts[oparg]
b.append(soname)
b = []
op, oparg, conditional, curline = instrs[i]
if op in (LOAD_GLOBAL, LOAD_NAME):
name = co.co_names[oparg]
if name in ("CDLL", "WinDLL"):
# Guesses ctypes imports of this type: CDLL("library.so")
# LOAD_GLOBAL 0 (CDLL) <--- we "are" here right now
# LOAD_CONST 1 ('library.so')
_libFromConst(i+1)
elif name == "ctypes":
# Guesses ctypes imports of this type: ctypes.DLL("library.so")
# LOAD_GLOBAL 0 (ctypes) <--- we "are" here right now
# LOAD_ATTR 1 (CDLL)
# LOAD_CONST 1 ('library.so')
op2, oparg2, conditional2, curline2 = instrs[i+1]
if op2 == LOAD_ATTR:
if co.co_names[oparg2] in ("CDLL", "WinDLL"):
# Fetch next, and finally get the library name
_libFromConst(i+2)
elif name in ("cdll", "windll"):
# Guesses ctypes imports of these types:
# * cdll.library (only valid on Windows)
# LOAD_GLOBAL 0 (cdll) <--- we "are" here right now
# LOAD_ATTR 1 (library)
# * cdll.LoadLibrary("library.so")
# LOAD_GLOBAL 0 (cdll) <--- we "are" here right now
# LOAD_ATTR 1 (LoadLibrary)
# LOAD_CONST 1 ('library.so')
op2, oparg2, conditional2, curline2 = instrs[i+1]
if op2 == LOAD_ATTR:
if co.co_names[oparg2] != "LoadLibrary":
# First type
soname = co.co_names[oparg2] + ".dll"
b.append(soname)
else:
# Second type, needs to fetch one more instruction
_libFromConst(i+2)
# If any of the libraries has been requested with anything different from
# the bare filename, drop that entry and warn the user - pyinstaller would
# need to patch the compiled pyc file to make it work correctly!
w = []
for bin in list(b):
if bin != os.path.basename(bin):
b.remove(bin)
w.append("W: ignoring %s - ctypes imports only supported using bare filenames" % (bin,))
return b, w
def _resolveCtypesImports(cbinaries):
"""Completes ctypes BINARY entries for modules with their full path.
"""
if sys.platform.startswith("linux"):
envvar = "LD_LIBRARY_PATH"
elif sys.platform.startswith("darwin"):
envvar = "DYLD_LIBRARY_PATH"
else:
envvar = "PATH"
def _savePaths():
old = os.environ.get(envvar, None)
os.environ[envvar] = os.pathsep.join(getattr(sys, "pathex", []))
if old is not None:
os.environ[envvar] = os.pathsep.join([os.environ[envvar], old])
return old
def _restorePaths(old):
del os.environ[envvar]
if old is not None:
os.environ[envvar] = old
ret = []
# Try to locate the shared library on disk. This is done by
# executing ctypes.utile.find_library prepending ImportTracker's
# local paths to library search paths, then replaces original values.
old = _savePaths()
for cbin in cbinaries:
ext = os.path.splitext(cbin)[1]
# On Windows, only .dll files can be loaded.
if os.name == "nt" and ext.lower() in [".so", ".dylib"]:
continue
cpath = find_library(os.path.splitext(cbin)[0])
if sys.platform == "linux2":
# CAVEAT: find_library() is not the correct function. Ctype's
# documentation says that it is meant to resolve only the filename
# (as a *compiler* does) not the full path. Anyway, it works well
# enough on Windows and Mac. On Linux, we need to implement
# more code to find out the full path.
if cpath is None:
cpath = cbin
# "man ld.so" says that we should first search LD_LIBRARY_PATH
# and then the ldcache
for d in os.environ["LD_LIBRARY_PATH"].split(":"):
if os.path.isfile(d + "/" + cpath):
cpath = d + "/" + cpath
break
else:
for L in os.popen("ldconfig -p").read().splitlines():
if cpath in L:
cpath = L.split("=>", 1)[1].strip()
assert os.path.isfile(cpath)
break
else:
cpath = None
if cpath is None:
print "W: library %s required via ctypes not found" % (cbin,)
else:
ret.append((cbin, cpath, "BINARY"))
_restorePaths(old)
return ret
| 25,945 | 911 | 1,829 |
e64e863a0b58c3f15bec05073314440019656578 | 1,201 | py | Python | examples/precession/run.py | davidcortesortuno/finmag | 9ac0268d2c0e45faf1284cee52a73525aa589e2b | [
"BSL-1.0"
] | 10 | 2018-03-24T07:43:17.000Z | 2022-03-26T10:42:27.000Z | examples/precession/run.py | davidcortesortuno/finmag | 9ac0268d2c0e45faf1284cee52a73525aa589e2b | [
"BSL-1.0"
] | 21 | 2018-03-26T15:08:53.000Z | 2021-07-10T16:11:14.000Z | examples/precession/run.py | davidcortesortuno/finmag | 9ac0268d2c0e45faf1284cee52a73525aa589e2b | [
"BSL-1.0"
] | 7 | 2018-04-09T11:50:48.000Z | 2021-06-10T09:23:25.000Z | import os
import numpy as np
import dolfin as df
import matplotlib.pyplot as plt
from finmag import Simulation
from finmag.energies import Demag, Exchange
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
IMAGE = os.path.join(MODULE_DIR, 'precession.png')
ts = np.linspace(0, 3e-10)
subfigures = ("without precession", "with precession")
figure, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
for i, subfigure_name in enumerate(subfigures):
m = zip(* run_simulation(bool(i)))
for dim in xrange(3):
axes[i].plot(ts, m[dim], label="m{}".format(chr(120+dim)))
axes[i].legend()
axes[i].set_title(subfigure_name)
axes[i].set_xlabel("time (s)")
axes[i].set_ylabel("unit magnetisation")
axes[i].set_ylim([-0.1, 1.0])
figure.savefig(IMAGE)
| 29.292683 | 82 | 0.666112 | import os
import numpy as np
import dolfin as df
import matplotlib.pyplot as plt
from finmag import Simulation
from finmag.energies import Demag, Exchange
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
IMAGE = os.path.join(MODULE_DIR, 'precession.png')
ts = np.linspace(0, 3e-10)
def run_simulation(do_precession):
Ms = 0.86e6
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(30e-9, 30e-9, 100e-9), 6, 6, 20)
sim = Simulation(mesh, Ms)
sim.set_m((1, 0, 1))
sim.do_precession = do_precession
sim.add(Demag())
sim.add(Exchange(13.0e-12))
averages = []
for t in ts:
sim.run_until(t)
averages.append(sim.m_average)
return np.array(averages)
subfigures = ("without precession", "with precession")
figure, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
for i, subfigure_name in enumerate(subfigures):
m = zip(* run_simulation(bool(i)))
for dim in xrange(3):
axes[i].plot(ts, m[dim], label="m{}".format(chr(120+dim)))
axes[i].legend()
axes[i].set_title(subfigure_name)
axes[i].set_xlabel("time (s)")
axes[i].set_ylabel("unit magnetisation")
axes[i].set_ylim([-0.1, 1.0])
figure.savefig(IMAGE)
| 390 | 0 | 23 |
40225abe760d693f16341c302f8c573ee9c5473a | 816 | py | Python | examples/frequency_analysis_histogram.py | gregariouspanda/typex | 750008dcdfe40b185ae8b3737807d0e639d0cc68 | [
"MIT"
] | null | null | null | examples/frequency_analysis_histogram.py | gregariouspanda/typex | 750008dcdfe40b185ae8b3737807d0e639d0cc68 | [
"MIT"
] | null | null | null | examples/frequency_analysis_histogram.py | gregariouspanda/typex | 750008dcdfe40b185ae8b3737807d0e639d0cc68 | [
"MIT"
] | 2 | 2017-12-11T04:22:55.000Z | 2017-12-11T19:50:13.000Z | #!/usr/bin/env python3
import sys
import matplotlib.pyplot as plt
import numpy as np
from typex.encryptor import Encryptor
plt.rcdefaults()
input_text = sys.stdin.read()
letter_appearances = {}
for char in Encryptor.ALPHABET:
letter_appearances[char] = 0
for char in input_text:
if char.upper() in Encryptor.ALPHABET:
letter_appearances[char.upper()] += 1
sorted_letters = sorted(
letter_appearances.keys(),
key=lambda letter: letter_appearances[letter], reverse=True)
sorted_appearances = sorted(letter_appearances.values(), reverse=True)
y_pos = np.arange(len(sorted_appearances))
plt.bar(y_pos, sorted_appearances, align='center', alpha=1)
plt.xticks(y_pos, sorted_letters)
plt.ylabel('Number of occurances')
plt.xlabel('Letter')
plt.title('Letter Frequency')
plt.show()
| 26.322581 | 70 | 0.747549 | #!/usr/bin/env python3
import sys
import matplotlib.pyplot as plt
import numpy as np
from typex.encryptor import Encryptor
plt.rcdefaults()
input_text = sys.stdin.read()
letter_appearances = {}
for char in Encryptor.ALPHABET:
letter_appearances[char] = 0
for char in input_text:
if char.upper() in Encryptor.ALPHABET:
letter_appearances[char.upper()] += 1
sorted_letters = sorted(
letter_appearances.keys(),
key=lambda letter: letter_appearances[letter], reverse=True)
sorted_appearances = sorted(letter_appearances.values(), reverse=True)
y_pos = np.arange(len(sorted_appearances))
plt.bar(y_pos, sorted_appearances, align='center', alpha=1)
plt.xticks(y_pos, sorted_letters)
plt.ylabel('Number of occurances')
plt.xlabel('Letter')
plt.title('Letter Frequency')
plt.show()
| 0 | 0 | 0 |
74b752dff6dd5efadda095f5b6bf56e760c586a4 | 7,851 | py | Python | inventory/inventory/inventory/api/controllers/v1/__init__.py | xe1gyq/metal | 25a21d840d4b846c5aacd054b266cdcb6db799e5 | [
"Apache-2.0"
] | null | null | null | inventory/inventory/inventory/api/controllers/v1/__init__.py | xe1gyq/metal | 25a21d840d4b846c5aacd054b266cdcb6db799e5 | [
"Apache-2.0"
] | null | null | null | inventory/inventory/inventory/api/controllers/v1/__init__.py | xe1gyq/metal | 25a21d840d4b846c5aacd054b266cdcb6db799e5 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pecan
from pecan import rest
from inventory.api.controllers.v1 import base
from inventory.api.controllers.v1 import cpu
from inventory.api.controllers.v1 import ethernet_port
from inventory.api.controllers.v1 import host
from inventory.api.controllers.v1 import link
from inventory.api.controllers.v1 import lldp_agent
from inventory.api.controllers.v1 import lldp_neighbour
from inventory.api.controllers.v1 import memory
from inventory.api.controllers.v1 import node
from inventory.api.controllers.v1 import pci_device
from inventory.api.controllers.v1 import port
from inventory.api.controllers.v1 import sensor
from inventory.api.controllers.v1 import sensorgroup
from inventory.api.controllers.v1 import system
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
class MediaType(base.APIBase):
"""A media type representation."""
base = wtypes.text
type = wtypes.text
class V1(base.APIBase):
"""The representation of the version 1 of the API."""
id = wtypes.text
"The ID of the version, also acts as the release number"
media_types = [MediaType]
"An array of supported media types for this version"
links = [link.Link]
"Links that point to a specific URL for this version and documentation"
systems = [link.Link]
"Links to the system resource"
hosts = [link.Link]
"Links to the host resource"
lldp_agents = [link.Link]
"Links to the lldp agents resource"
lldp_neighbours = [link.Link]
"Links to the lldp neighbours resource"
@classmethod
class Controller(rest.RestController):
"""Version 1 API controller root."""
systems = system.SystemController()
hosts = host.HostController()
nodes = node.NodeController()
cpus = cpu.CPUController()
memorys = memory.MemoryController()
ports = port.PortController()
ethernet_ports = ethernet_port.EthernetPortController()
lldp_agents = lldp_agent.LLDPAgentController()
lldp_neighbours = lldp_neighbour.LLDPNeighbourController()
pci_devices = pci_device.PCIDeviceController()
sensors = sensor.SensorController()
sensorgroups = sensorgroup.SensorGroupController()
@wsme_pecan.wsexpose(V1)
__all__ = ('Controller',)
| 39.452261 | 75 | 0.440708 | #
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pecan
from pecan import rest
from inventory.api.controllers.v1 import base
from inventory.api.controllers.v1 import cpu
from inventory.api.controllers.v1 import ethernet_port
from inventory.api.controllers.v1 import host
from inventory.api.controllers.v1 import link
from inventory.api.controllers.v1 import lldp_agent
from inventory.api.controllers.v1 import lldp_neighbour
from inventory.api.controllers.v1 import memory
from inventory.api.controllers.v1 import node
from inventory.api.controllers.v1 import pci_device
from inventory.api.controllers.v1 import port
from inventory.api.controllers.v1 import sensor
from inventory.api.controllers.v1 import sensorgroup
from inventory.api.controllers.v1 import system
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
class MediaType(base.APIBase):
"""A media type representation."""
base = wtypes.text
type = wtypes.text
def __init__(self, base, type):
self.base = base
self.type = type
class V1(base.APIBase):
"""The representation of the version 1 of the API."""
id = wtypes.text
"The ID of the version, also acts as the release number"
media_types = [MediaType]
"An array of supported media types for this version"
links = [link.Link]
"Links that point to a specific URL for this version and documentation"
systems = [link.Link]
"Links to the system resource"
hosts = [link.Link]
"Links to the host resource"
lldp_agents = [link.Link]
"Links to the lldp agents resource"
lldp_neighbours = [link.Link]
"Links to the lldp neighbours resource"
@classmethod
def convert(self):
v1 = V1()
v1.id = "v1"
v1.links = [link.Link.make_link('self', pecan.request.host_url,
'v1', '', bookmark=True),
link.Link.make_link('describedby',
'http://www.starlingx.io/',
'developer/inventory/dev',
'api-spec-v1.html',
bookmark=True, type='text/html')
]
v1.media_types = [MediaType('application/json',
'application/vnd.openstack.inventory.v1+json')]
v1.systems = [link.Link.make_link('self', pecan.request.host_url,
'systems', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'systems', '',
bookmark=True)
]
v1.hosts = [link.Link.make_link('self', pecan.request.host_url,
'hosts', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'hosts', '',
bookmark=True)
]
v1.nodes = [link.Link.make_link('self', pecan.request.host_url,
'nodes', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'nodes', '',
bookmark=True)
]
v1.cpus = [link.Link.make_link('self', pecan.request.host_url,
'cpus', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'cpus', '',
bookmark=True)
]
v1.memory = [link.Link.make_link('self', pecan.request.host_url,
'memory', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'memory', '',
bookmark=True)
]
v1.ports = [link.Link.make_link('self',
pecan.request.host_url,
'ports', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'ports', '',
bookmark=True)
]
v1.ethernet_ports = [link.Link.make_link('self',
pecan.request.host_url,
'ethernet_ports', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'ethernet_ports', '',
bookmark=True)
]
v1.lldp_agents = [link.Link.make_link('self',
pecan.request.host_url,
'lldp_agents', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'lldp_agents', '',
bookmark=True)
]
v1.lldp_neighbours = [link.Link.make_link('self',
pecan.request.host_url,
'lldp_neighbours', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'lldp_neighbours', '',
bookmark=True)
]
v1.sensors = [link.Link.make_link('self',
pecan.request.host_url,
'sensors', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'sensors', '',
bookmark=True)
]
v1.sensorgroups = [link.Link.make_link('self',
pecan.request.host_url,
'sensorgroups', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'sensorgroups', '',
bookmark=True)
]
return v1
class Controller(rest.RestController):
"""Version 1 API controller root."""
systems = system.SystemController()
hosts = host.HostController()
nodes = node.NodeController()
cpus = cpu.CPUController()
memorys = memory.MemoryController()
ports = port.PortController()
ethernet_ports = ethernet_port.EthernetPortController()
lldp_agents = lldp_agent.LLDPAgentController()
lldp_neighbours = lldp_neighbour.LLDPNeighbourController()
pci_devices = pci_device.PCIDeviceController()
sensors = sensor.SensorController()
sensorgroups = sensorgroup.SensorGroupController()
@wsme_pecan.wsexpose(V1)
def get(self):
return V1.convert()
__all__ = ('Controller',)
| 5,450 | 0 | 79 |
583941451aed5816930ab28bef8e0198021e08fd | 475 | py | Python | thumbor_extras/detectors/tests/dnn_face_detector_test.py | imaus10/thumbor_extras | f58180c20b158944c428287bdc36715454ac88ea | [
"MIT"
] | null | null | null | thumbor_extras/detectors/tests/dnn_face_detector_test.py | imaus10/thumbor_extras | f58180c20b158944c428287bdc36715454ac88ea | [
"MIT"
] | 1 | 2022-01-24T01:58:52.000Z | 2022-01-24T01:58:52.000Z | thumbor_extras/detectors/tests/dnn_face_detector_test.py | imaus10/thumbor_extras | f58180c20b158944c428287bdc36715454ac88ea | [
"MIT"
] | null | null | null | import pytest
from thumbor_extras.detectors.dnn_face_detector import Detector
@pytest.mark.parametrize('image_context_arg', [
'face_image_context', 'gray_face_image_context', 'cmyk_face_image_context'
])
| 39.583333 | 78 | 0.804211 | import pytest
from thumbor_extras.detectors.dnn_face_detector import Detector
@pytest.mark.parametrize('image_context_arg', [
'face_image_context', 'gray_face_image_context', 'cmyk_face_image_context'
])
def test_should_detect_one_face(image_context_arg, request):
image_context = request.getfixturevalue(image_context_arg)
Detector(image_context, 0, None).detect(lambda: None)
detections = image_context.request.focal_points
assert len(detections) == 1
| 244 | 0 | 22 |
e4e4e7645e91953a23f21606ced25be76fbd2205 | 746 | py | Python | src/__main__.py | Ewpratten/Longboard-HUD | 973745f895c8c0a71e6684c853799a9b35160c67 | [
"MIT"
] | null | null | null | src/__main__.py | Ewpratten/Longboard-HUD | 973745f895c8c0a71e6684c853799a9b35160c67 | [
"MIT"
] | null | null | null | src/__main__.py | Ewpratten/Longboard-HUD | 973745f895c8c0a71e6684c853799a9b35160c67 | [
"MIT"
] | null | null | null | import time
hall1 = Sensor(1)
hall1.enable()
# set start times to show display is working
t1 = 999
while True:
td1 = hall1.getDiff()
if td1 != None:
t1 = td1
printToDisplay(t1)
| 16.577778 | 65 | 0.663539 | import time
def printToDisplay(text):
print("Display: " + str(text))
class Sensor(object):
def __init__(self, id):
self.id = id
self.enabled = False
self.saftey = True # Must disable to use, just a saftey feature
self.lasttime = time.clock()
def enable(self):
self.saftey = False
def disable(self):
self.saftey = True
def read(self):
value = True
self.enabled = value
return self.enabled
def getDiff(self):
if self.read():
diff = time.clock() - self.lasttime
self.lasttime = time.clock()
else:
diff = None
return diff
hall1 = Sensor(1)
hall1.enable()
# set start times to show display is working
t1 = 999
while True:
td1 = hall1.getDiff()
if td1 != None:
t1 = td1
printToDisplay(t1)
| 389 | 0 | 169 |
68078f27b729fd91dbdb5c54a57c4c3759703cec | 15,955 | py | Python | jc_curve25519.py | david-oswald/jc_curve25519 | fa65318cb37507bdc2e5dab6433fd9518991953f | [
"MIT"
] | 23 | 2016-02-07T16:17:50.000Z | 2021-04-24T13:58:25.000Z | jc_curve25519.py | petrs/jc_curve25519 | d1b13138fe80145200c42eba713480525c50ec19 | [
"MIT"
] | 5 | 2019-04-12T14:33:08.000Z | 2021-05-08T07:31:58.000Z | jc_curve25519.py | petrs/jc_curve25519 | d1b13138fe80145200c42eba713480525c50ec19 | [
"MIT"
] | 5 | 2018-08-20T21:00:42.000Z | 2022-03-24T06:29:38.000Z | """
By David Oswald, d.f.oswald@cs.bham.ac.uk
26 August 2015
Some of this code is based on information or code from
- Sam Kerr: http://samuelkerr.com/?p=431
- Eli Bendersky: http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python/
- http://cr.yp.to/highspeed/naclcrypto-20090310.pdf, page 7
The code of Eli is in the public domain:
"Some of the blog posts contain code; unless otherwise stated, all of it is
in the public domain"
=======================================================================
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
=======================================================================
If this software is useful to you, I'd appreciate an attribution,
contribution (e.g. bug fixes, improvements, ...), or a beer.
"""
from smartcard.Exceptions import NoCardException
from smartcard.System import *
from smartcard.util import toHexString
from struct import *
from timeit import default_timer as timer
if __name__ == '__main__':
main()
| 30.741811 | 120 | 0.558195 | """
By David Oswald, d.f.oswald@cs.bham.ac.uk
26 August 2015
Some of this code is based on information or code from
- Sam Kerr: http://samuelkerr.com/?p=431
- Eli Bendersky: http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python/
- http://cr.yp.to/highspeed/naclcrypto-20090310.pdf, page 7
The code of Eli is in the public domain:
"Some of the blog posts contain code; unless otherwise stated, all of it is
in the public domain"
=======================================================================
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
=======================================================================
If this software is useful to you, I'd appreciate an attribution,
contribution (e.g. bug fixes, improvements, ...), or a beer.
"""
from smartcard.Exceptions import NoCardException
from smartcard.System import *
from smartcard.util import toHexString
from struct import *
from timeit import default_timer as timer
class JCCurve25519:
# Montgomery parameters of Curve25519
p = pow(2, 255) - 19
a_m = 486662
b_m = 1
r = pow(2, 252) + 27742317777372353535851937790883648493
# Precomputed Weierstrass parameters of Curve25510
a_w = 0x2aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa984914a144
b_w = 0x7b425ed097b425ed097b425ed097b425ed097b425ed097b4260b5e9c7710c864
Gx_w = 0x2aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaad245a
Gy_w = 0x20ae19a1b8a086b4e01edd2c7748d14c923d4d7e6d7c61b229e9c5a27eced3d9
@staticmethod
def modular_sqrt(a, p):
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned is no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if JCCurve25519.legendre_symbol(a, p) != 1:
return 0
elif a == 0:
return 0
elif p == 2:
return p
elif p % 4 == 3:
return pow(a, (p + 1) // 4, p)
# Partition p-1 to s * 2^e for an odd s (i.e.
# reduce all the powers of 2 from p-1)
#
s = p - 1
e = 0
while s % 2 == 0:
s //= 2
e += 1
# Find some 'n' with a legendre symbol n|p = -1.
# Shouldn't take long.
#
n = 2
while JCCurve25519.legendre_symbol(n, p) != -1:
n += 1
# Here be dragons!
# Read the paper "Square roots from 1; 24, 51,
# 10 to Dan Shanks" by Ezra Brown for more
# information
#
# x is a guess of the square root that gets better
# with each iteration.
# b is the "fudge factor" - by how much we're off
# with the guess. The invariant x^2 = ab (mod p)gx_w = (9 + a_m/3)%p
# is maintained throughout the loop.
# g is used for successive powers of n to update
# both a and b
# r is the exponent - decreases with each update
#
x = pow(a, (s + 1) // 2, p)
b = pow(a, s, p)
g = pow(n, s, p)
r = e
while True:
t = b
m = 0
for m in range(r):
if t == 1:
break
t = pow(t, 2, p)
if m == 0:
return x
gs = pow(g, 2 ** (r - m - 1), p)
g = (gs * gs) % p
x = (x * gs) % p
b = (b * g) % p
r = m
@staticmethod
def legendre_symbol(a, q):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (q - 1) // 2, q)
return -1 if ls == q - 1 else ls
@staticmethod
def weierstrass_to_montgomery(xW):
xM = (((JCCurve25519.b_m * xW) % JCCurve25519.p) - JCCurve25519.a_m * JCCurve25519.inv(3)) % JCCurve25519.p
return xM
@staticmethod
def montgomery_to_weierstrass(xp):
xp = (xp + JCCurve25519.a_m * JCCurve25519.inv(3)) % JCCurve25519.p
yp2 = (((pow(xp,
3) % JCCurve25519.p) + JCCurve25519.a_w * xp) % JCCurve25519.p + JCCurve25519.b_w) % JCCurve25519.p
yp = JCCurve25519.modular_sqrt(yp2, JCCurve25519.p)
return [xp, yp]
@staticmethod
def unpack_le(s):
if len(s) != 32:
raise Exception("Length != 32")
return sum((s[i]) << (8 * i) for i in range(32))
@staticmethod
def pack_le(n):
r = []
for i in range(32):
r.append(int((n >> (8 * i)) & 0xff))
return r
@staticmethod
def unpack_be(s):
if len(s) != 32:
raise Exception("Length != 32")
return sum((s[i]) << (8 * (31 - i)) for i in range(32))
@staticmethod
def pack_be(n):
r = []
for i in range(32):
r.append(int((n >> (8 * (31 - i))) & 0xff))
return r
# The follwing code is based on
# http://cr.yp.to/highspeed/naclcrypto-20090310.pdf, page 7
@staticmethod
def clamp(n):
n &= ~7
n &= ~(128 << 8 * 31)
n |= 64 << 8 * 31
return n
@staticmethod
def expmod(b, e, m):
if e == 0:
return 1
t = JCCurve25519.expmod(b, e // 2, m) ** 2 % m
if e & 1:
t = (t * b) % m
return t
@staticmethod
def inv(x):
return JCCurve25519.expmod(x, JCCurve25519.p - 2, JCCurve25519.p)
# Addition and doubling formulas taken
# from Appendix D of "Curve25519:
# new Diffie-Hellman speed records".
@staticmethod
# def add((xn,zn), (xm,zm), (xd,zd)):
def add(n, m, z):
(xn, zn), (xm, zm), (xd, zd) = n, m, z
x = 4 * (xm * xn - zm * zn) ** 2 * zd
z = 4 * (xm * zn - zm * xn) ** 2 * xd
return (x % JCCurve25519.p, z % JCCurve25519.p)
@staticmethod
def double(n):
(xn, zn) = n
x = (xn ** 2 - zn ** 2) ** 2
z = 4 * xn * zn * (xn ** 2 + JCCurve25519.a_m * xn * zn + zn ** 2)
return (x % JCCurve25519.p, z % JCCurve25519.p)
@staticmethod
def smul(s, base):
one = (base, 1)
two = JCCurve25519.double(one)
# f(m) evaluates to a tuple
# containing the mth multiple and the
# (m+1)th multiple of base.
def f(m):
if m == 1:
return (one, two)
(pm, pm1) = f(m // 2)
if m & 1:
return JCCurve25519.add(pm, pm1, one), JCCurve25519.double(pm1)
return JCCurve25519.double(pm), JCCurve25519.add(pm, pm1, one)
((x, z), _) = f(s)
return (x * JCCurve25519.inv(z)) % JCCurve25519.p
def __init__(self):
self.connected = False
def isConnected(self):
return self.connected
def transmitReceive(self, apdu):
response, sw1, sw2 = self.c.transmit(apdu)
if sw1 == 0x61:
GET_RESPONSE = [0x00, 0xC0, 0x00, 0x00]
apdu = GET_RESPONSE + [sw2]
response, sw1, sw2 = self.c.transmit(apdu)
if sw1 == 0x6c:
apdu[4] = sw2
response, sw1, sw2 = self.c.transmit(apdu)
return response, sw1, sw2
def connect(self):
print("== Available readers:")
self.connected = False
rl = smartcard.System.readers()
i = 0
for r in rl:
print(str(i) + ") " + r.name)
i = i + 1
if len(rl) == 0:
raise Exception("No readers available")
print(" Connecting to a first reader with a card ... ")
usable_card_found = False
for r in rl:
try:
self.c = r.createConnection()
self.c.connect() # try to connect
print(" ATR: " + toHexString(self.c.getATR()))
usable_card_found = True
break # we found it, stop searching
# if no card is found, NoCardException is emmit, but capture broadly for other reader-related errors
except Exception:
continue # try next reader
if not usable_card_found:
raise Exception("No reader with card was found")
# select app
# SELECT = [0x00, 0xA4, 0x04, 0x00, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]
SELECT = [0x00, 0xA4, 0x04, 0x00, 0x08, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8]
response, sw1, sw2 = self.transmitReceive(SELECT)
if sw1 == 0x90 and sw2 == 0x00:
print(" App selected")
self.connected = True
else:
raise Exception("App select failed")
def generateKeypair(self):
""" Generates a key pair on card for debug purposes, will
return public and private key
This method handles the conversion to Montgomery coordinates etc.
"""
if self.connected == False:
raise Exception("Not connected")
# Generate key APDU
GENKEY = [0x00, 0x01, 0x0, 0x00, 0x00]
b = timer()
response, sw1, sw2 = self.transmitReceive(GENKEY)
e = timer()
print("Execution time: " + str((e - b) * 1000) + ' ms')
if sw1 != 0x90 or sw2 != 0x00:
raise Exception("Card error")
return False
if len(response) != 64:
raise Exception("Response is " + str(len(response)) + " byte")
# Unpack and convert internally
skW = JCCurve25519.unpack_be(response[0:32])
pkW = JCCurve25519.unpack_be(response[32:64])
# print "skW = " + hex(skW)
# print "pkW = " + hex(pkW)
# convert to Curve25519 standards
sk = skW << 3
pk = JCCurve25519.weierstrass_to_montgomery(pkW)
# Multiply PK by 8 (three doublings)
pk = JCCurve25519.smul(8, pk)
return sk, pk
def setPrivateKey(self, sk):
""" Sets a private key and returns the public key
This method handles the conversion to Montgomery coordinates etc.
"""
if self.connected == False:
raise Exception("Not connected")
# swap endianess
sk = JCCurve25519.pack_be(sk)
# Generate key APDU
SETKEY = [0x00, 0x02, 0x0, 0x00, 0x20] + sk
b = timer()
response, sw1, sw2 = self.transmitReceive(SETKEY)
e = timer()
print("Execution time: " + str((e - b) * 1000) + ' ms')
if sw1 != 0x90 or sw2 != 0x00:
raise Exception("Card error")
return False
if len(response) != 32:
raise Exception("Response is " + str(len(response)) + " byte")
# Unpack and convert internally
pkW = JCCurve25519.unpack_be(response)
# convert to Curve25519 standards
pk = JCCurve25519.weierstrass_to_montgomery(pkW)
# Multiply PK by 8 (three doublings)
pk = JCCurve25519.smul(8, pk)
return pk
def generateSharedSecret(self, pk):
""" Generates a shared secret from the internal private key and the
passed public key
This method handles the conversion to Montgomery coordinates etc.
"""
if self.connected == False:
raise Exception("Not connected")
# Generate key APDU
pkW = JCCurve25519.montgomery_to_weierstrass(pk);
# send to card MSByte first
pkCard = JCCurve25519.pack_be(pkW[0]) + JCCurve25519.pack_be(pkW[1])
GENSECRET = [0x00, 0x03, 0x0, 0x00, 0x40] + pkCard
b = timer()
response, sw1, sw2 = self.transmitReceive(GENSECRET)
e = timer()
print("Execution time: " + str((e - b) * 1000) + ' ms')
if sw1 != 0x90 or sw2 != 0x00:
raise Exception("Card error")
return False
if len(response) != 32:
raise Exception("Response is " + str(len(response)) + " byte")
# Unpack and convert internally
sharedSecretW = JCCurve25519.unpack_be(response)
# convert to Curve25519 standards
sharedSecret = JCCurve25519.weierstrass_to_montgomery(sharedSecretW)
# Multiply secret by 8 (three doublings)
sharedSecret = JCCurve25519.smul(8, sharedSecret)
return sharedSecret
def main():
# test vector
skTV = [0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d, 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45, 0xdf, 0x4c,
0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a, 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a]
pkTV = [0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54, 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a, 0x0d, 0xbf,
0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4, 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a]
pkN = JCCurve25519.unpack_le(pkTV)
skN = JCCurve25519.unpack_le(skTV)
skN = JCCurve25519.clamp(skN)
pkTest = JCCurve25519.smul(skN, 9)
print('\n')
print("== Testing against test vector == ")
print("pkRef = " + hex(pkN))
print("pkTest = " + hex(pkTest))
print("diff = " + hex(pkTest - pkN))
print('\n')
if (pkTest - pkN) != 0:
return
# Operations with Javacard
curve = JCCurve25519()
curve.connect()
print('\n')
print("== Testing on-card key generation")
sk, pk = curve.generateKeypair()
# Compute reference
pkRef = JCCurve25519.smul(sk, 9)
diff = pkRef - pk
print("pkRef = " + hex(pkRef))
print("pkTest = " + hex(pk))
print("diff = " + hex(diff))
print('\n')
if diff != 0:
return
print("== Testing setting the private key")
pkGen = curve.setPrivateKey(skN)
diff = pkN - pkGen
print("pkRef = " + hex(pkN))
print("pkTest = " + hex(pkGen))
print("diff = " + hex(diff))
print('\n')
if diff != 0:
return
print("== Testing generating shared secret")
pkBob = [0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4, 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37, 0x3f, 0x83,
0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d, 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f]
sharedSecret = [0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42]
pkBobN = JCCurve25519.unpack_le(pkBob)
sharedSecretN = JCCurve25519.unpack_le(sharedSecret)
ssGen = curve.generateSharedSecret(pkBobN)
diff = sharedSecretN - ssGen
print("secretRef = " + hex(sharedSecretN))
print("secretTest = " + hex(ssGen))
print("diff = " + hex(diff))
print('\n')
if diff != 0:
return
if __name__ == '__main__':
main()
| 6,376 | 7,813 | 46 |
36aeae624d800b1126072615eb1b4ee584a4eae3 | 1,126 | py | Python | 027_solution.py | ed-cetera/project-euler-python | 59359991ba9bbd7d419e2c7e133d67a6992695b1 | [
"MIT"
] | null | null | null | 027_solution.py | ed-cetera/project-euler-python | 59359991ba9bbd7d419e2c7e133d67a6992695b1 | [
"MIT"
] | null | null | null | 027_solution.py | ed-cetera/project-euler-python | 59359991ba9bbd7d419e2c7e133d67a6992695b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import math
import time
if __name__ == "__main__":
start = time.time()
main()
end = time.time()
print("Duration: {0:0.6f}s".format(end - start))
| 24.478261 | 62 | 0.550622 | #!/usr/bin/env python3
import math
import time
def is_prime(number):
if number < 2:
return False
for divisor in range(2, int(math.sqrt(number)) + 1):
if number % divisor == 0:
return False
return True
def main():
upper_limit_b = 1000
noninclusive_absolute_limit_a = 1000
b_primes = [2]
counter = 1
while counter < upper_limit_b:
counter += 2
for prime in b_primes[1:]:
if counter % prime == 0:
break
else:
b_primes.append(counter)
longest_prime_series = 0
for b in b_primes:
for a in range(-1 * noninclusive_absolute_limit_a + 1,
noninclusive_absolute_limit_a):
n = 0
while is_prime(n**2 + a * n + b):
n += 1
if n > longest_prime_series:
longest_prime_series = n
coefficient_product = a * b
print("Solution:", coefficient_product)
if __name__ == "__main__":
start = time.time()
main()
end = time.time()
print("Duration: {0:0.6f}s".format(end - start))
| 891 | 0 | 46 |
5eea5e403210f775ae6dcdbeaa2ec5a1460617ab | 4,687 | py | Python | Scripts/unitydownloadurl.py | litefeel/Unity-CrossPlatformAPI | 93ed5f643b681ad318b871ea27cbf3cdb9b27e43 | [
"Apache-2.0"
] | 15 | 2018-03-07T07:09:09.000Z | 2021-03-11T01:27:30.000Z | Scripts/unitydownloadurl.py | litefeel/Unity-CrossPlatformAPI | 93ed5f643b681ad318b871ea27cbf3cdb9b27e43 | [
"Apache-2.0"
] | null | null | null | Scripts/unitydownloadurl.py | litefeel/Unity-CrossPlatformAPI | 93ed5f643b681ad318b871ea27cbf3cdb9b27e43 | [
"Apache-2.0"
] | 5 | 2018-03-16T03:51:20.000Z | 2021-11-18T17:20:30.000Z | #!/bin/python
# coding=utf-8
from optparse import OptionParser
import urllib
import re
osmap = {
'mac' : 'Mac',
'osx' : 'Mac',
'win' : 'Windows',
'win64' : 'Windows64',
'win32' : 'Windows32',
}
# -------------- main --------------
if __name__ == '__main__':
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option(
'-v', '--version', dest='version',
help='filter unity version, beta last latest 5 5.3[+|-] 5.3.5')
parser.add_option(
'-o', '--os', dest='os',
help='filter os, win[64|32] or osx or mac')
parser.add_option(
'-l', '--list', dest='list', action='store_true', default=False,
help='show a list')
(opts, args) = parser.parse_args()
if opts.version == 'beta':
urlTuples = getBetaUrlTuples()
else:
urlTuples = getUrlTuples()
urlTuples = filterUrlTuples(urlTuples, opts.os, opts.version)
if opts.list:
for urlTuple in urlTuples:
print urlTuple[0]
elif len(urlTuples) > 0:
print urlTuples[0][0]
| 36.333333 | 157 | 0.610412 | #!/bin/python
# coding=utf-8
from optparse import OptionParser
import urllib
import re
def getRealBetaUrlTuples(pageUrl):
f = urllib.urlopen(pageUrl)
data = f.read()
# <a href="http://beta.unity3d.com/download/0df597686c75/Windows64EditorInstaller/UnitySetup64-5.4.0b19.exe">Unity Editor 64-bit (Win)</a>
# <a href="http://beta.unity3d.com/download/0df597686c75/MacEditorInstaller/Unity-5.4.0b19.pkg">Unity Editor (Mac)</a>
urlTuples = re.findall(r'<a href="(https?://[^"]*?(UnityDownloadAssistant|EditorInstaller/Unity.*?)-([^"]*?)\.(pkg|dmg|exe))">Unity Editor.*?</a>', data)
return urlTuples
def getBetaUrlTuples():
f = urllib.urlopen("https://unity3d.com/unity/beta/archive")
data = f.read()
# <a href="/unity/beta/unity5.4.0b19">Download</a>
urlTuples = re.findall(r'<a href="([^"]*?/unity/beta/[^"]*?)">Download</a>', data)
if len(urlTuples) > 0:
url = urlTuples[0]
if url.startswith("/"):
url = "https://unity3d.com" + url
return getRealBetaUrlTuples(url)
return None
def getUrlTuples():
f = urllib.urlopen("https://unity3d.com/get-unity/download/archive")
data = f.read()
# <a href="http://netstorage.unity3d.com/unity/960ebf59018a/Windows64EditorInstaller/UnitySetup64-5.3.5f1.exe">Unity Editor 64-bit</a>
# <a href="http://download.unity3d.com/download_unity/0b02744d4013/MacEditorInstaller/Unity-5.0.2f1.pkg">Unity Editor</a>
# <a href="http://download.unity3d.com/download_unity/a6d8d714de6f/UnityDownloadAssistant-5.4.0f3.dmg">Unity编辑器</a>
# urlTuples = re.findall(r'<a href="(https?://[^"]*?/(Mac|Windows32|Windows64)EditorInstaller/Unity-(5\..*?)\.(pkg|exe))">Unity Editor</a>', data)
urlTuples = re.findall(r'<a href="(https?://[^"]*?(UnityDownloadAssistant|EditorInstaller/Unity.*?)-(.*?)\.(pkg|dmg|exe))">Unity Editor.*?</a>', data)
return urlTuples
osmap = {
'mac' : 'Mac',
'osx' : 'Mac',
'win' : 'Windows',
'win64' : 'Windows64',
'win32' : 'Windows32',
}
def splitByOS(urlTuples):
macs, win64s, win32s = [], [], []
for urlTuple in urlTuples:
if urlTuple[0].find('Mac') >= 0 or urlTuple[0].find('UnityDownloadAssistant') >= 0:
macs.append(urlTuple)
elif urlTuple[0].find('Windows64') >= 0:
win64s.append(urlTuple)
elif urlTuple[0].find('Windows32') >= 0:
win32s.append(urlTuple)
return macs, win64s, win32s
def filterByVersion(urlTuples, version):
if version is None:
return urlTuples
if version == 'last' or version == 'latest' or version == 'beta':
del urlTuples[1:]
return urlTuples
isNewst = version.endswith('+')
isOldest = version.endswith('-')
if isNewst or isOldest:
version = version[0:-1]
urlTuples = filter(lambda urlTuple: urlTuple[2].find(version) >= 0, urlTuples)
if isNewst:
del urlTuples[1:]
elif isOldest:
del urlTuples[-1:]
return urlTuples
def filterUrlTuples(urlTuples, os, version):
if len(urlTuples) == 0 :
return urlTuples
macs, win64s, win32s = splitByOS(urlTuples)
if os is not None:
os = osmap[os.lower()]
if os == 'Mac':
win64s, win32s = [], []
elif os == 'Windows64':
macs, win32s = [], []
elif os == 'Windows32':
macs, win64s = [], []
elif os == 'Windows':
macs = []
if version is not None:
macs = filterByVersion(macs, version)
win64s = filterByVersion(win64s, version)
win32s = filterByVersion(win32s, version)
urlTuples = macs
urlTuples[len(urlTuples):len(urlTuples)+len(win64s)] = win64s
urlTuples[len(urlTuples):len(urlTuples)+len(win32s)] = win32s
return urlTuples
# -------------- main --------------
if __name__ == '__main__':
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option(
'-v', '--version', dest='version',
help='filter unity version, beta last latest 5 5.3[+|-] 5.3.5')
parser.add_option(
'-o', '--os', dest='os',
help='filter os, win[64|32] or osx or mac')
parser.add_option(
'-l', '--list', dest='list', action='store_true', default=False,
help='show a list')
(opts, args) = parser.parse_args()
if opts.version == 'beta':
urlTuples = getBetaUrlTuples()
else:
urlTuples = getUrlTuples()
urlTuples = filterUrlTuples(urlTuples, opts.os, opts.version)
if opts.list:
for urlTuple in urlTuples:
print urlTuple[0]
elif len(urlTuples) > 0:
print urlTuples[0][0]
| 3,453 | 0 | 138 |
1c9cb6f9f6c27e46815710c307595f67692f16b9 | 10,220 | py | Python | esp8266/clock/dictionary.py | JiangYangJie/Embedded | 70dba3a1e5c1fb7b9a7d8b633a5fc05138894456 | [
"MIT"
] | 1 | 2019-07-23T07:14:07.000Z | 2019-07-23T07:14:07.000Z | esp8266/clock/dictionary.py | JiangYangJie/Embedded | 70dba3a1e5c1fb7b9a7d8b633a5fc05138894456 | [
"MIT"
] | null | null | null | esp8266/clock/dictionary.py | JiangYangJie/Embedded | 70dba3a1e5c1fb7b9a7d8b633a5fc05138894456 | [
"MIT"
] | 2 | 2019-07-22T11:42:55.000Z | 2019-12-15T01:43:19.000Z | dicts={
0xe88f9c:
[0x00,0x00,0x00,0x00,0x00,0x3F,0x00,0x00,0x00,0x00,0x07,0x00,0x02,0x01,0x00,0x00,0x00,0x00,0x7F,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x0C,0x30,0x00,0x00,
0x00,0x00,0x1C,0x18,0x18,0xFF,0x18,0x18,0x10,0x00,0xFF,0x02,0x01,0x81,0xC0,0xC0,0x41,0x01,0xFF,0x07,0x0D,0x19,0x31,0x61,0xC1,0x01,0x01,0x01,0x01,0x00,
0x00,0x00,0x38,0x30,0x30,0xFF,0x30,0x30,0x07,0xFF,0x00,0x01,0x81,0xC3,0xC2,0x86,0xCC,0x88,0xFF,0xA0,0xA0,0x90,0x8C,0x86,0x83,0x81,0x80,0x80,0x00,0x00,
0x00,0x00,0x00,0x00,0x38,0xFC,0x00,0x80,0xC0,0xC0,0x00,0x80,0xC0,0x00,0x00,0x00,0x00,0x18,0xFC,0x00,0x00,0x00,0x00,0x00,0x80,0xF0,0x7C,0x10,0x00,0x00],#/*"菜",0*/
0xe58d95:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x3F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x40,0x20,0x38,0x18,0x0C,0x08,0xFF,0x01,0x01,0x01,0x01,0xFF,0x01,0x01,0x01,0x01,0xFF,0x01,0x01,0x01,0xFF,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x00,
0x00,0x00,0x0C,0x0C,0x18,0x10,0x20,0xFF,0x80,0x80,0x80,0x80,0xFF,0x80,0x80,0x80,0x80,0xFF,0x80,0x80,0x80,0xFF,0x80,0x80,0x80,0x80,0x80,0x80,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x00,0x00,0x38,0xFC,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"单",1*/
0xe697b6:
[0x00,0x00,0x00,0x00,0x00,0x10,0x1F,0x18,0x18,0x18,0x18,0x18,0x18,0x1F,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1F,0x18,0x18,0x10,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x30,0xF0,0x30,0x30,0x3F,0x30,0x30,0x30,0xF3,0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0xF0,0x30,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0xFF,0x03,0x03,0x03,0x03,0x83,0xC3,0xE3,0x63,0x43,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x3F,0x06,0x04,0x00,
0x00,0x00,0x00,0x80,0x00,0x00,0x00,0x00,0x18,0xFC,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"时",0*/
0xe9929f:
[0x00,0x00,0x03,0x03,0x03,0x06,0x06,0x05,0x0C,0x08,0x08,0x1F,0x13,0x23,0x43,0x03,0x03,0x3F,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x18,0xE0,0x01,0x01,0x11,0xF9,0x01,0x01,0x01,0x01,0x19,0xE1,0x01,0x00,0x00,0x08,0x10,0x60,0xC0,0x80,0x00,0x00,0x00,0x00,
0x00,0x00,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0xFF,0x86,0x86,0x86,0x86,0x86,0x86,0xFF,0x86,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0xF8,0x10,0x10,0x10,0x10,0x10,0x10,0xF0,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"钟",1*/
0xe997b9:
[0x00,0x00,0x01,0x00,0x00,0x00,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x08,0x00,
0x00,0x00,0x00,0xC0,0xE7,0x60,0x42,0x03,0x01,0x00,0xFF,0x01,0x01,0x01,0x7F,0x61,0x61,0x61,0x61,0x61,0x61,0x61,0x61,0x01,0x01,0x01,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x80,0x80,0x82,0xFF,0x80,0x80,0x80,0xFE,0x86,0x86,0x86,0x86,0x86,0x86,0xBC,0x8C,0x80,0x80,0x80,0x03,0x00,0x00,0x00,
0x00,0x00,0x00,0x30,0xF0,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0xE0,0xE0,0x00,0x00],#/*"闹",0*/
'30':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x03,0x07,0x0F,0x0F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x0F,0x0F,0x07,0x03,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x07,0x7C,0xF0,0xC0,0x80,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x80,0xC0,0xE0,0x7C,0x07,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0x1E,0x07,0x03,0x01,0x01,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x01,0x03,0x07,0x1E,0xE0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xE0,0xF0,0xF0,0xF8,0xF8,0xF8,0xF8,0xF8,0xF0,0xF0,0xE0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"0",0*/
'31':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xFF,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x07,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x40,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xF0,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0x00,0x00,0x00,0x00],#/*"1",1*/
'32':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x07,0x0F,0x0F,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0C,0x1F,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0x80,0x00,0x00,0xC0,0xC0,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x1C,0x70,0xC0,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xF0,0x1F,0x03,0x01,0x01,0x01,0x01,0x03,0x07,0x0F,0x1C,0x70,0xC0,0x00,0x00,0x00,0x00,0x00,0x01,0xFF,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xE0,0xE0,0xE0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x30,0x60,0xE0,0xE0,0xE0,0x00,0x00,0x00,0x00],#/*"2",2*/
'33':
[0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x07,0x07,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x0F,0x0F,0x07,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x1F,0xE0,0x80,0x80,0x80,0x80,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x80,0x80,0x80,0x80,0xE0,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0x3E,0x0F,0x07,0x03,0x03,0x03,0x07,0x1E,0xE0,0x7C,0x07,0x03,0x01,0x01,0x01,0x01,0x01,0x03,0x1E,0xE0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xC0,0xC0,0xC0,0xC0,0x80,0x00,0x00,0x00,0x80,0xE0,0xE0,0xF0,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00],#/*"3",3*/
'34':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0C,0x18,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0C,0x18,0x70,0xC0,0x80,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0E,0x1E,0x7E,0xDE,0x9E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0xFF,0x1E,0x1E,0x1E,0x1E,0x3F,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF8,0x00,0x00,0x00,0x00,0x00,0xF8,0x00,0x00,0x00,0x00],#/*"4",4*/
'35':
[0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x01,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00,0x0F,0x0F,0x0F,0x07,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x3F,0xE0,0x80,0x00,0x00,0x00,0x00,0x80,0x80,0x00,0x00,0xE0,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0xFE,0x07,0x03,0x01,0x00,0x00,0x00,0x00,0x01,0x01,0x07,0x1F,0xF0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0xE0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xE0,0xE0,0xF0,0xF0,0xF0,0xF0,0xE0,0xE0,0x80,0x00,0x00,0x00,0x00,0x00,0x00],#/*"5",5*/
'36':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x03,0x07,0x0F,0x0F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x0F,0x07,0x07,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x01,0x3E,0xE0,0xC0,0x80,0x80,0x00,0x00,0x0F,0x78,0xC0,0x80,0x00,0x00,0x00,0x00,0x80,0xC0,0xE0,0x78,0x07,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFC,0x07,0x07,0x07,0x00,0x00,0x00,0x00,0xFF,0x07,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0xF8,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xC0,0xC0,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xF0,0xF8,0xF8,0xF8,0xF8,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00],#/*"6",6*/
'37':
[0x00,0x00,0x00,0x00,0x00,0x03,0x07,0x07,0x06,0x0C,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x03,0x07,0x07,0x0F,0x0F,0x1F,0x1F,0x1F,0x07,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x01,0x03,0x06,0x0C,0x18,0x30,0x70,0xE0,0xC0,0xC0,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"7",7*/
'38':
[0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0F,0x0E,0x0F,0x0F,0x07,0x01,0x00,0x00,0x03,0x07,0x0E,0x1C,0x1C,0x1C,0x0E,0x07,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0x80,0x00,0x00,0x00,0x80,0xE0,0xFC,0x3F,0x77,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE0,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xF0,0x0F,0x01,0x00,0x00,0x00,0x00,0x01,0x07,0xF8,0xFC,0x3F,0x07,0x01,0x00,0x00,0x00,0x00,0x01,0x0F,0xF0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x80,0xC0,0xE0,0xF0,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00],#/*"8",8*/
'39':
[0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0F,0x1E,0x1E,0x1E,0x1E,0x1E,0x1F,0x0F,0x03,0x00,0x00,0x00,0x00,0x00,0x07,0x07,0x03,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x1F,0xE0,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xE0,0xFF,0x00,0x00,0x00,0x00,0xC0,0xC0,0xE0,0x7F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0x1E,0x07,0x03,0x01,0x00,0x00,0x00,0x01,0x03,0x06,0x38,0xE1,0x01,0x01,0x03,0x03,0x07,0x1E,0x78,0x80,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xC0,0xE0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xE0,0xC0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"9",9*/
'20':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*" ",1*/
'3a':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x40,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x40,0x40,0x00,0x00,0x00,0x00]#/*":",1*/
} | 116.136364 | 162 | 0.773092 | dicts={
0xe88f9c:
[0x00,0x00,0x00,0x00,0x00,0x3F,0x00,0x00,0x00,0x00,0x07,0x00,0x02,0x01,0x00,0x00,0x00,0x00,0x7F,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x0C,0x30,0x00,0x00,
0x00,0x00,0x1C,0x18,0x18,0xFF,0x18,0x18,0x10,0x00,0xFF,0x02,0x01,0x81,0xC0,0xC0,0x41,0x01,0xFF,0x07,0x0D,0x19,0x31,0x61,0xC1,0x01,0x01,0x01,0x01,0x00,
0x00,0x00,0x38,0x30,0x30,0xFF,0x30,0x30,0x07,0xFF,0x00,0x01,0x81,0xC3,0xC2,0x86,0xCC,0x88,0xFF,0xA0,0xA0,0x90,0x8C,0x86,0x83,0x81,0x80,0x80,0x00,0x00,
0x00,0x00,0x00,0x00,0x38,0xFC,0x00,0x80,0xC0,0xC0,0x00,0x80,0xC0,0x00,0x00,0x00,0x00,0x18,0xFC,0x00,0x00,0x00,0x00,0x00,0x80,0xF0,0x7C,0x10,0x00,0x00],#/*"菜",0*/
0xe58d95:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x3F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x40,0x20,0x38,0x18,0x0C,0x08,0xFF,0x01,0x01,0x01,0x01,0xFF,0x01,0x01,0x01,0x01,0xFF,0x01,0x01,0x01,0xFF,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x00,
0x00,0x00,0x0C,0x0C,0x18,0x10,0x20,0xFF,0x80,0x80,0x80,0x80,0xFF,0x80,0x80,0x80,0x80,0xFF,0x80,0x80,0x80,0xFF,0x80,0x80,0x80,0x80,0x80,0x80,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x00,0x00,0x38,0xFC,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"单",1*/
0xe697b6:
[0x00,0x00,0x00,0x00,0x00,0x10,0x1F,0x18,0x18,0x18,0x18,0x18,0x18,0x1F,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1F,0x18,0x18,0x10,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x30,0xF0,0x30,0x30,0x3F,0x30,0x30,0x30,0xF3,0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0xF0,0x30,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0xFF,0x03,0x03,0x03,0x03,0x83,0xC3,0xE3,0x63,0x43,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x3F,0x06,0x04,0x00,
0x00,0x00,0x00,0x80,0x00,0x00,0x00,0x00,0x18,0xFC,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"时",0*/
0xe9929f:
[0x00,0x00,0x03,0x03,0x03,0x06,0x06,0x05,0x0C,0x08,0x08,0x1F,0x13,0x23,0x43,0x03,0x03,0x3F,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x18,0xE0,0x01,0x01,0x11,0xF9,0x01,0x01,0x01,0x01,0x19,0xE1,0x01,0x00,0x00,0x08,0x10,0x60,0xC0,0x80,0x00,0x00,0x00,0x00,
0x00,0x00,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0xFF,0x86,0x86,0x86,0x86,0x86,0x86,0xFF,0x86,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0xF8,0x10,0x10,0x10,0x10,0x10,0x10,0xF0,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"钟",1*/
0xe997b9:
[0x00,0x00,0x01,0x00,0x00,0x00,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x08,0x00,
0x00,0x00,0x00,0xC0,0xE7,0x60,0x42,0x03,0x01,0x00,0xFF,0x01,0x01,0x01,0x7F,0x61,0x61,0x61,0x61,0x61,0x61,0x61,0x61,0x01,0x01,0x01,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x80,0x80,0x82,0xFF,0x80,0x80,0x80,0xFE,0x86,0x86,0x86,0x86,0x86,0x86,0xBC,0x8C,0x80,0x80,0x80,0x03,0x00,0x00,0x00,
0x00,0x00,0x00,0x30,0xF0,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0xE0,0xE0,0x00,0x00],#/*"闹",0*/
'30':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x03,0x07,0x0F,0x0F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x0F,0x0F,0x07,0x03,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x07,0x7C,0xF0,0xC0,0x80,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x80,0xC0,0xE0,0x7C,0x07,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0x1E,0x07,0x03,0x01,0x01,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x01,0x03,0x07,0x1E,0xE0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xE0,0xF0,0xF0,0xF8,0xF8,0xF8,0xF8,0xF8,0xF0,0xF0,0xE0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"0",0*/
'31':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xFF,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x07,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x40,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xF0,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0x00,0x00,0x00,0x00],#/*"1",1*/
'32':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x07,0x0F,0x0F,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0C,0x1F,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0x80,0x00,0x00,0xC0,0xC0,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x1C,0x70,0xC0,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xF0,0x1F,0x03,0x01,0x01,0x01,0x01,0x03,0x07,0x0F,0x1C,0x70,0xC0,0x00,0x00,0x00,0x00,0x00,0x01,0xFF,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xE0,0xE0,0xE0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x30,0x60,0xE0,0xE0,0xE0,0x00,0x00,0x00,0x00],#/*"2",2*/
'33':
[0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x07,0x07,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x0F,0x0F,0x07,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x1F,0xE0,0x80,0x80,0x80,0x80,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x80,0x80,0x80,0x80,0xE0,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0x3E,0x0F,0x07,0x03,0x03,0x03,0x07,0x1E,0xE0,0x7C,0x07,0x03,0x01,0x01,0x01,0x01,0x01,0x03,0x1E,0xE0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xC0,0xC0,0xC0,0xC0,0x80,0x00,0x00,0x00,0x80,0xE0,0xE0,0xF0,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00],#/*"3",3*/
'34':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0C,0x18,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0C,0x18,0x70,0xC0,0x80,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0E,0x1E,0x7E,0xDE,0x9E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0xFF,0x1E,0x1E,0x1E,0x1E,0x3F,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF8,0x00,0x00,0x00,0x00,0x00,0xF8,0x00,0x00,0x00,0x00],#/*"4",4*/
'35':
[0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x01,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00,0x0F,0x0F,0x0F,0x07,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x3F,0xE0,0x80,0x00,0x00,0x00,0x00,0x80,0x80,0x00,0x00,0xE0,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0xFE,0x07,0x03,0x01,0x00,0x00,0x00,0x00,0x01,0x01,0x07,0x1F,0xF0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0xE0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xE0,0xE0,0xF0,0xF0,0xF0,0xF0,0xE0,0xE0,0x80,0x00,0x00,0x00,0x00,0x00,0x00],#/*"5",5*/
'36':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x03,0x07,0x0F,0x0F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x0F,0x07,0x07,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x01,0x3E,0xE0,0xC0,0x80,0x80,0x00,0x00,0x0F,0x78,0xC0,0x80,0x00,0x00,0x00,0x00,0x80,0xC0,0xE0,0x78,0x07,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFC,0x07,0x07,0x07,0x00,0x00,0x00,0x00,0xFF,0x07,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0xF8,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xC0,0xC0,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xF0,0xF8,0xF8,0xF8,0xF8,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00],#/*"6",6*/
'37':
[0x00,0x00,0x00,0x00,0x00,0x03,0x07,0x07,0x06,0x0C,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x03,0x07,0x07,0x0F,0x0F,0x1F,0x1F,0x1F,0x07,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x01,0x03,0x06,0x0C,0x18,0x30,0x70,0xE0,0xC0,0xC0,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"7",7*/
'38':
[0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0F,0x0E,0x0F,0x0F,0x07,0x01,0x00,0x00,0x03,0x07,0x0E,0x1C,0x1C,0x1C,0x0E,0x07,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0x80,0x00,0x00,0x00,0x80,0xE0,0xFC,0x3F,0x77,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE0,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xF0,0x0F,0x01,0x00,0x00,0x00,0x00,0x01,0x07,0xF8,0xFC,0x3F,0x07,0x01,0x00,0x00,0x00,0x00,0x01,0x0F,0xF0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x80,0xC0,0xE0,0xF0,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00],#/*"8",8*/
'39':
[0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0F,0x1E,0x1E,0x1E,0x1E,0x1E,0x1F,0x0F,0x03,0x00,0x00,0x00,0x00,0x00,0x07,0x07,0x03,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x1F,0xE0,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xE0,0xFF,0x00,0x00,0x00,0x00,0xC0,0xC0,0xE0,0x7F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0x1E,0x07,0x03,0x01,0x00,0x00,0x00,0x01,0x03,0x06,0x38,0xE1,0x01,0x01,0x03,0x03,0x07,0x1E,0x78,0x80,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xC0,0xE0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xE0,0xC0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"9",9*/
'20':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*" ",1*/
'3a':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x40,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x40,0x40,0x00,0x00,0x00,0x00]#/*":",1*/
} | 0 | 0 | 0 |
7d4a312a91cae98ca206b778df797f5e09d5ada2 | 3,468 | py | Python | models/losses.py | jlfilho/sr-tf2 | 5309c69d252aad7a8e9260106353fd8acca29c6a | [
"MIT"
] | null | null | null | models/losses.py | jlfilho/sr-tf2 | 5309c69d252aad7a8e9260106353fd8acca29c6a | [
"MIT"
] | null | null | null | models/losses.py | jlfilho/sr-tf2 | 5309c69d252aad7a8e9260106353fd8acca29c6a | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
# computes VGG loss or content loss
| 45.631579 | 233 | 0.674164 | import tensorflow as tf
import numpy as np
class VGGLossNoActivation(object):
def __init__(self, image_shape,loss_fn):
self.model = self.create_model(image_shape)
self.loss_fn = loss_fn
def create_model(self,image_shape):
vgg19 = tf.keras.applications.vgg19.VGG19(include_top=False, weights='imagenet', input_shape=image_shape)
x = tf.keras.layers.Conv2D(512, (3, 3),padding='same',
name='block5_conv4')(vgg19.get_layer('block5_conv3').output)
model = tf.keras.Model(inputs=vgg19.input, outputs=x)
model.trainable = False
return model
def preprocess_vgg(self, x):
if isinstance(x, np.ndarray):
return tf.keras.applications.vgg19.preprocess_input((x))
else:
return tf.keras.layers.Lambda(lambda x: tf.keras.applications.vgg19.preprocess_input((x)))(x)
# computes VGG loss or content loss
def perceptual_loss(self, y_true, y_pred):
return tf.math.reduce_mean(tf.math.square(self.model(self.preprocess_vgg(y_true)) - self.model(self.preprocess_vgg(y_pred))),None)
def custom_perceptual_loss(self, y_true, y_pred):
y_true = tf.keras.layers.Concatenate()([y_true, y_true, y_true])
y_pred = tf.keras.layers.Concatenate()([y_pred, y_pred, y_pred])
return self.loss_fn(self.model(self.preprocess_vgg(y_true)),self.model(self.preprocess_vgg(y_pred)))
def euclidean_content_loss(self, y_true, y_pred):
return tf.math.sqrt(tf.math.reduce_sum(tf.math.square(self.model(self.preprocess_vgg(y_true)) - self.model(self.preprocess_vgg(y_pred))), axis=None))
def compoundLoss(self, y_true, y_pred,alfa=10e-2,beta=10e0):
return (alfa * tf.math.reduce_mean(tf.math.square(self.model(self.preprocess_vgg(y_true)) - self.model(self.preprocess_vgg(y_pred))),None) + beta * tf.math.sqrt(tf.math.reduce_sum(tf.math.square(y_pred - y_true), axis=None)))
class GANLoss(object):
def __init__(self, loss_pix, loss_fea, loss_dis, adv_loss, alfa, eta, lbd, mu):
self.loss_pix=loss_pix
self.loss_fea=loss_fea
self.loss_dis=loss_dis
self.adv_loss=adv_loss
self.alfa=alfa
self.eta=eta
self.lbd=lbd
self.mu=mu
def discriminator_loss(self,real_output, fake_output):
noise = 0.05 * tf.random.uniform(tf.shape(real_output))
real_loss = self.adv_loss(tf.ones_like(real_output)-noise, real_output)
fake_loss = self.adv_loss(tf.zeros_like(fake_output)+noise, fake_output)
total_loss = 0.5 * (real_loss + fake_loss)
return total_loss
def generative_loss(self, real_output, fake_output, img_hr,img_sr, teacher_img_sr):
loss_dis=self.loss_dis(teacher_img_sr,img_sr)
loss_adv = self.adv_loss(real_output, fake_output)
loss_pix = self.loss_pix(img_hr, img_sr)
#img_hr = tf.keras.layers.Concatenate()([img_hr, img_hr, img_hr])
#img_sr = tf.keras.layers.Concatenate()([img_sr, img_sr, img_sr])
loss_fea = self.loss_fea(img_hr,img_sr)
total_loss = self.alfa * loss_pix + self.eta * loss_fea + self.lbd * loss_dis
return total_loss, self.alfa * loss_pix , self.eta * loss_fea, self.lbd * loss_dis, self.mu*loss_adv
def charbonnier_loss(y_true, y_pred):
return tf.reduce_mean(tf.sqrt(tf.square(y_true - y_pred) + tf.square(1e-3))) | 2,996 | 14 | 360 |
5e138e09c8b64263e8eeb8357515801717bba6fb | 738 | py | Python | pytglib/api/functions/get_suitable_discussion_chats.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/functions/get_suitable_discussion_chats.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/functions/get_suitable_discussion_chats.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class GetSuitableDiscussionChats(Object):
"""
Returns a list of basic group and supergroup chats, which can be used as a discussion group for a channel. Basic group chats need to be first upgraded to supergroups before they can be set as a discussion group
Attributes:
ID (:obj:`str`): ``GetSuitableDiscussionChats``
No parameters required.
Returns:
Chats
Raises:
:class:`telegram.Error`
"""
ID = "getSuitableDiscussionChats"
@staticmethod
| 23.806452 | 214 | 0.663957 |
from ..utils import Object
class GetSuitableDiscussionChats(Object):
"""
Returns a list of basic group and supergroup chats, which can be used as a discussion group for a channel. Basic group chats need to be first upgraded to supergroups before they can be set as a discussion group
Attributes:
ID (:obj:`str`): ``GetSuitableDiscussionChats``
No parameters required.
Returns:
Chats
Raises:
:class:`telegram.Error`
"""
ID = "getSuitableDiscussionChats"
def __init__(self, extra=None, **kwargs):
self.extra = extra
pass
@staticmethod
def read(q: dict, *args) -> "GetSuitableDiscussionChats":
return GetSuitableDiscussionChats()
| 149 | 0 | 53 |