hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
725c4ff3713f6ec63c248cb9d509fa4ef3f2fc51 | 5,418 | py | Python | switch_model/hawaii/fuel_markets_expansion.py | ashutosh-pande/switch3 | 769d25a42c8323f24740567aa15c980f905a03e2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | switch_model/hawaii/fuel_markets_expansion.py | ashutosh-pande/switch3 | 769d25a42c8323f24740567aa15c980f905a03e2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | switch_model/hawaii/fuel_markets_expansion.py | ashutosh-pande/switch3 | 769d25a42c8323f24740567aa15c980f905a03e2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # For large systems, each fuel market tier is a category of capacity expansion, and
# it can be built fractionally. For small systems, each fuel market tier is one
# capacity-expansion project, and it must be fully built and/or activated each period.
# To do this, we add binary variables and confine additions and activations to match them.
# Each tier has a capital cost and duration (locked in if it is developed)
# and a fixed and variable cost. Variable costs are already shown in fuel_markets.py,
# and this module adds fixed costs (some economies of scale, but assuming 100% salvage
# value at all times, i.e., projects can be deactivated without losing any capital cost.)
# Later we may add a more complete capital cost system.
import os
from pyomo.environ import *
inf = float('inf')
| 51.6 | 138 | 0.684755 | # For large systems, each fuel market tier is a category of capacity expansion, and
# it can be built fractionally. For small systems, each fuel market tier is one
# capacity-expansion project, and it must be fully built and/or activated each period.
# To do this, we add binary variables and confine additions and activations to match them.
# Each tier has a capital cost and duration (locked in if it is developed)
# and a fixed and variable cost. Variable costs are already shown in fuel_markets.py,
# and this module adds fixed costs (some economies of scale, but assuming 100% salvage
# value at all times, i.e., projects can be deactivated without losing any capital cost.)
# Later we may add a more complete capital cost system.
import os
from pyomo.environ import *
inf = float('inf')
def define_components(m):
# eventually this should be extended to include capital costs and fixed lifetimes
# for fuel supply infrastructure, but then it gets fairly complicated (equivalent
# to the project build / activate / operate system)
# Maybe we can setup some sort of inheritance system for different types of object
# -- base capital assets, which could then be power production projects (of which some
# are generators (fuel-based or intermittent), and some are storage), fuel-supply projects,
# transmission lines, etc.
# fixed cost (per mmBtu/year of capacity) of having each tier in service during each period
# note: this must be zero if a tier has unlimited capacity, to avoid having infinite cost
m.rfm_supply_tier_fixed_cost = Param(m.RFM_SUPPLY_TIERS, default=0.0,
validate=lambda m, v, r, p, st: v == 0.0 or m.rfm_supply_tier_limit[r, p, st] < inf)
# lifetime for each tier, once it is placed in service
# (default is one period)
m.rfm_supply_tier_max_age = Param(m.RFM_SUPPLY_TIERS, default=lambda m, r, p, st: m.period_length_years[p])
# Note: in large regions, a tier represents a block of expandable capacity,
# so this could be continuous, but then you could just lump the fixed cost
# into the variable cost and not use this module.
m.RFMBuildSupplyTier = Var(m.RFM_SUPPLY_TIERS, within=Binary)
# will the tier be active during each period?
m.RFMSupplyTierActivate = Var(m.RFM_SUPPLY_TIERS, within=PercentFraction)
# force activation to match build decision
m.RFM_Build_Activate_Consistency = Constraint(m.RFM_SUPPLY_TIERS, rule=lambda m, r, p, st:
m.RFMSupplyTierActivate[r, p, st]
==
sum(
m.RFMBuildSupplyTier[r, vintage, st]
for vintage in m.PERIODS
if vintage < m.period_start[p] + m.period_length_years[p] # starts before end of current period
and vintage + m.rfm_supply_tier_max_age[r, vintage, st] > m.period_start[p] # ends after start of current period
)
)
# force all unlimited tiers to be activated (since they must have no cost,
# and to avoid a limit of 0.0 * inf in the constraint below)
m.Force_Activate_Unlimited_RFM_Supply_Tier = Constraint(m.RFM_SUPPLY_TIERS,
rule=lambda m, r, p, st:
(m.RFMSupplyTierActivate[r, p, st] == 1) if (m.rfm_supply_tier_limit[r, p, st] == inf)
else Constraint.Skip
)
# only allow delivery from activated tiers
# (and skip unlimited tiers to avoid a complaint by glpk about these)
# note: this could be merged with the previous constraint, since they are complementary
m.Enforce_RFM_Supply_Tier_Activated = Constraint(
m.RFM_SUPPLY_TIERS,
rule=lambda m, r, p, st:
(
m.ConsumeFuelTier[r, p, st]
<=
m.RFMSupplyTierActivate[r, p, st] * m.rfm_supply_tier_limit[r, p, st]
) if m.rfm_supply_tier_limit[r, p, st] < inf else Constraint.Skip
)
# Eventually, when we add capital costs for capacity expansion, we will need a
# variable showing how much of each tier to build each period (and then the upper
# limit could be a lifetime limit rather than a limit on what can be added each
# period). Then we may want to make the expansion variable Binary for small systems
# and continuous for large systems. That could be done by building a shadow list
# of binary variables and constraining the actual decisions to match the binary
# version if some flag is set in the data.
m.RFM_Fixed_Costs_Annual = Expression(
m.PERIODS,
rule=lambda m, p: sum(
(
# note: we dance around projects with unlimited supply and 0.0 fixed cost
0.0 if m.rfm_supply_tier_fixed_cost[rfm_st] == 0.0
else m.rfm_supply_tier_fixed_cost[rfm_st]
* m.RFMSupplyTierActivate[rfm_st] * m.rfm_supply_tier_limit[rfm_st]
)
for r in m.REGIONAL_FUEL_MARKETS
for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[r, p]))
m.Cost_Components_Per_Period.append('RFM_Fixed_Costs_Annual')
def load_inputs(m, switch_data, inputs_dir):
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'fuel_supply_curves.tab'),
select=('regional_fuel_market', 'period', 'tier', 'fixed_cost', 'max_age'),
param=(m.rfm_supply_tier_fixed_cost,m.rfm_supply_tier_max_age))
| 4,576 | 0 | 46 |
35b986ad00605141edcdf85fe76d44b9e67a9062 | 4,384 | py | Python | pyntpg/dataset_tabs/file_picker.py | 5tefan/py-netcdf-timeseries-gui | 47a46c576eef51e7b840b1cd48f58fc508b6beda | [
"MIT"
] | null | null | null | pyntpg/dataset_tabs/file_picker.py | 5tefan/py-netcdf-timeseries-gui | 47a46c576eef51e7b840b1cd48f58fc508b6beda | [
"MIT"
] | 5 | 2019-03-14T20:22:45.000Z | 2021-03-11T20:14:52.000Z | pyntpg/dataset_tabs/file_picker.py | 5tefan/py-netcdf-timeseries-gui | 47a46c576eef51e7b840b1cd48f58fc508b6beda | [
"MIT"
] | null | null | null | from PyQt5.QtCore import pyqtSignal, Qt, QSize
from PyQt5.QtWidgets import QLabel, QListWidget, QListWidgetItem, QFileDialog
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QStyle, QSizePolicy, QPushButton, QAbstractItemView
# For testing individual widget
if __name__ == "__main__":
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
main = FilePicker()
main.show()
exit(app.exec_())
| 44.282828 | 115 | 0.683622 | from PyQt5.QtCore import pyqtSignal, Qt, QSize
from PyQt5.QtWidgets import QLabel, QListWidget, QListWidgetItem, QFileDialog
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QStyle, QSizePolicy, QPushButton, QAbstractItemView
class FilePicker(QWidget):
selected_files = pyqtSignal(list)
def __init__(self):
super(FilePicker, self).__init__()
self.layout = QVBoxLayout()
self.layout.setSpacing(0)
self.setLayout(self.layout)
# Add buttons for to add and remove files from the list
buttons = QWidget()
self.buttons_layout = QHBoxLayout()
buttons.setLayout(self.buttons_layout)
file_icon = QLabel()
file_icon.setPixmap(self.style().standardIcon(QStyle.SP_DialogOpenButton).pixmap(10))
self.buttons_layout.addWidget(file_icon)
add_file = QPushButton("Add Files")
add_file.setIcon(add_file.style().standardIcon(QStyle.SP_DialogOpenButton))
add_file.setIconSize(QSize(10, 10))
add_file.clicked.connect(self.add_file_clicked)
self.buttons_layout.addWidget(add_file)
#self.buttons_layout.addStretch()
self.remove_file = QPushButton("Remove File")
self.remove_file.setIcon(self.remove_file.style().standardIcon(QStyle.SP_DialogCloseButton))
self.remove_file.setIconSize(QSize(10, 10))
self.remove_file.clicked.connect(self.remove_file_clicked)
self.remove_file.setVisible(False)
self.buttons_layout.addWidget(self.remove_file)
buttons.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
self.layout.addWidget(buttons)
# Create the actual file listing
self.filelist = QListWidget()
self.filelist.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel)
# self.filelist.setMaximumHeight(300)
# self.filelist.setMinimumHeight(300)
self.filelist.setGridSize(QSize(100, 20))
self.filelist.setTextElideMode(Qt.ElideMiddle)
self.filelist.setSelectionMode(QAbstractItemView.MultiSelection)
self.filelist.itemClicked.connect(self.file_item_clicked)
self.filelist.itemSelectionChanged.connect(self.file_item_clicked)
self.layout.addWidget(self.filelist)
def add_file_clicked(self):
# Add files to the list of selected files
file_list = [i.text() for i in self.filelist.findItems("", Qt.MatchContains)]
file_names = QFileDialog.getOpenFileNames(None, "Open files", "~")[0] # [0] is the list of files, seelcted
if len(file_names) > 0: # file_names will be empty if cancel pressed
self.remove_file.setVisible(True)
file_names = [str(name) for name in file_names] # Convert to Python string
for file_name in file_names:
if file_name not in file_list:
item = QListWidgetItem(file_name)
item.setTextAlignment(Qt.AlignLeft)
self.filelist.addItem(item)
self.filelist.setCurrentIndex(self.filelist.indexFromItem(item))
self.file_item_clicked()
self.emit_file_list()
def remove_file_clicked(self):
# Remove a file from the list of selected files
for item in self.filelist.selectedItems():
self.filelist.takeItem(self.filelist.row(item))
if len(self.filelist) == 0:
self.remove_file.setHidden(True)
else:
self.filelist.setCurrentIndex(self.filelist.currentIndex())
self.emit_file_list()
def file_item_clicked(self):
# Depending on the number of items selected, make sure the
# remove files button has the correct plural
if len(self.filelist.selectedItems()) > 1:
self.remove_file.setText("Remove Files")
else:
self.remove_file.setText("Remove File")
def emit_file_list(self):
# updated_file_list = [i.text() for i in self.filelist.findItems("", Qt.MatchContains)]
updated_file_list = [self.filelist.item(i).text() for i in range(self.filelist.count())]
self.selected_files.emit(updated_file_list) # empty list is ok
# For testing individual widget
if __name__ == "__main__":
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
main = FilePicker()
main.show()
exit(app.exec_())
| 3,724 | 178 | 23 |
72de97fcc221369113f3d1d8e8942f5c5862693c | 977 | py | Python | utils/callbacks.py | lukasgd/cosmoflow-benchmark | 2823a308c80827b900214acfe48275858ea4d2c4 | [
"BSD-3-Clause-LBNL"
] | null | null | null | utils/callbacks.py | lukasgd/cosmoflow-benchmark | 2823a308c80827b900214acfe48275858ea4d2c4 | [
"BSD-3-Clause-LBNL"
] | null | null | null | utils/callbacks.py | lukasgd/cosmoflow-benchmark | 2823a308c80827b900214acfe48275858ea4d2c4 | [
"BSD-3-Clause-LBNL"
] | 1 | 2020-12-15T10:24:13.000Z | 2020-12-15T10:24:13.000Z | """
This module contains some utility callbacks for Keras training.
"""
# System
from time import time
# Externals
import tensorflow as tf
class TimingCallback(tf.keras.callbacks.Callback):
"""A Keras Callback which records the time of each epoch"""
#class LearningRateScheduleCallback(tf.keras.callbacks.Callback):
# def __init__(self, multiplier,
# start_epoch=0, end_epoch=None,
# momentum_correction=True):
# super().__init__()
# self.start_epoch = start_epoch
# self.end_epoch = end_epoch
# self.momentum_correction = momentum_correction
# self.initial_lr = None
# self.restore_momentum = None
| 28.735294 | 65 | 0.653019 | """
This module contains some utility callbacks for Keras training.
"""
# System
from time import time
# Externals
import tensorflow as tf
class TimingCallback(tf.keras.callbacks.Callback):
"""A Keras Callback which records the time of each epoch"""
def __init__(self):
self.times = []
def on_epoch_begin(self, epoch, logs={}):
self.starttime = time()
def on_epoch_end(self, epoch, logs={}):
epoch_time = time() - self.starttime
self.times.append(epoch_time)
logs['time'] = epoch_time
#class LearningRateScheduleCallback(tf.keras.callbacks.Callback):
# def __init__(self, multiplier,
# start_epoch=0, end_epoch=None,
# momentum_correction=True):
# super().__init__()
# self.start_epoch = start_epoch
# self.end_epoch = end_epoch
# self.momentum_correction = momentum_correction
# self.initial_lr = None
# self.restore_momentum = None
| 209 | 0 | 84 |
2734a96bcfa2700d55f2008b598a89c3b8bbd4d5 | 127 | py | Python | market/apps/orders/apps.py | QuiZ-Dev/django_web | eba14589e959d35ff5bd799bb6075a3b5bb91638 | [
"Unlicense"
] | null | null | null | market/apps/orders/apps.py | QuiZ-Dev/django_web | eba14589e959d35ff5bd799bb6075a3b5bb91638 | [
"Unlicense"
] | null | null | null | market/apps/orders/apps.py | QuiZ-Dev/django_web | eba14589e959d35ff5bd799bb6075a3b5bb91638 | [
"Unlicense"
] | null | null | null | from django.apps import AppConfig
| 18.142857 | 33 | 0.732283 | from django.apps import AppConfig
class OrdersConfig(AppConfig):
name = 'market.apps.orders'
verbose_name = 'Заказы'
| 0 | 75 | 23 |
809b104103fa2d4ee3d5206465c5502fd81164e3 | 285 | py | Python | twisted/plugins/otter_metrics.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 20 | 2015-02-11T16:32:07.000Z | 2019-11-12T03:27:54.000Z | twisted/plugins/otter_metrics.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 1,145 | 2015-01-01T00:00:47.000Z | 2022-02-11T03:40:39.000Z | twisted/plugins/otter_metrics.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 29 | 2015-01-08T15:00:11.000Z | 2021-02-16T16:33:53.000Z | """
Otter twisted application plugins for the various services.
"""
from twisted.application.service import ServiceMaker
OtterMetrics = ServiceMaker(
"Otter Metrics Collector",
"otter.metrics",
"Collects metrics for a region on an interval basis",
"otter-metrics"
)
| 20.357143 | 59 | 0.733333 | """
Otter twisted application plugins for the various services.
"""
from twisted.application.service import ServiceMaker
OtterMetrics = ServiceMaker(
"Otter Metrics Collector",
"otter.metrics",
"Collects metrics for a region on an interval basis",
"otter-metrics"
)
| 0 | 0 | 0 |
168565f7227e392a29b126d620a0f1f7a7f5ffaa | 983 | py | Python | jobs/common.py | nSimonFR/spoken_language_dataset | 07c018f28be72cec3ba5e9ec07608f79a6d32031 | [
"MIT"
] | 23 | 2018-06-25T10:22:57.000Z | 2021-07-09T09:53:47.000Z | jobs/common.py | nSimonFR/spoken_language_dataset | 07c018f28be72cec3ba5e9ec07608f79a6d32031 | [
"MIT"
] | 3 | 2018-07-19T18:47:07.000Z | 2021-06-01T22:11:53.000Z | jobs/common.py | nSimonFR/spoken_language_dataset | 07c018f28be72cec3ba5e9ec07608f79a6d32031 | [
"MIT"
] | 6 | 2018-07-14T17:48:51.000Z | 2020-12-24T01:31:41.000Z | import os
import shutil
import requests
| 19.27451 | 53 | 0.704985 | import os
import shutil
import requests
def remove_extension(file):
return os.path.splitext(file)[0]
def get_filename(file):
return os.path.basename(remove_extension(file))
def get_dirname(file):
return os.path.dirname(file)
def append_suffix_to_filename(file, suffix):
root_and_ext = os.path.splitext(file)
return root_and_ext[0] + suffix + root_and_ext[1]
def change_extension(file, new_extension):
root_and_ext = os.path.splitext(file)
return root_and_ext[0] + '.' + new_extension
def remove_file(file):
os.remove(file)
def rename_file(src, dst):
os.rename(src, dst)
def remove_directory(path):
shutil.rmtree(path)
def create_directory(path):
if not os.path.isdir(path):
os.mkdir(path)
def fetch(url, output_file):
print("Downloading {0}".format(url))
response = requests.get(url, stream=True)
with open(output_file, 'wb') as file:
shutil.copyfileobj(response.raw, file)
del response
| 703 | 0 | 230 |
e0cc409326c892b874fc79ca394dfd56968e25c0 | 4,065 | py | Python | anpcp/utils.py | netotz/alpha-neighbor-p-center-problem | 9ebe2718b749dcfc07884063bc118734457e36a9 | [
"MIT"
] | 2 | 2021-06-26T22:16:37.000Z | 2022-02-04T16:44:38.000Z | anpcp/utils.py | netotz/alpha-neighbor-p-center-problem | 9ebe2718b749dcfc07884063bc118734457e36a9 | [
"MIT"
] | 11 | 2021-08-31T15:09:28.000Z | 2022-03-26T06:55:25.000Z | anpcp/utils.py | netotz/alpha-neighbor-p-center-problem | 9ebe2718b749dcfc07884063bc118734457e36a9 | [
"MIT"
] | null | null | null | import timeit
from typing import Any, Dict, Iterable, Mapping, Callable, Optional, Set
from models.solver import Solver
import pandas as pd
def get_stats_df(
solvers: Iterable[Solver],
constructive: Optional[Callable[..., Set[int]]],
local_search: Callable[..., Set[int]],
args: Mapping[str, Any]
) -> pd.DataFrame:
'''
TODO: Refactor to include Solution class.
Formats the statistics of each solver as a DataFrame.
Both `constructive` and `local_search` need to be a method of `Solver` class,
e.g. `constructive=Solver.pdp_based`.
`args` is a dictionary of custom arguments for local search methods.
'''
data = list()
for solver in solvers:
start = timeit.default_timer()
if constructive:
constructive(solver)
constructive_name = constructive.__name__
else:
solver.set_random_solution()
constructive_time = 0
constructive_name = "random"
constructive_time = timeit.default_timer() - start
constructive_of = solver.objective_function
start = timeit.default_timer()
if local_search:
local_search(solver, **args)
local_search_time = timeit.default_timer() - start
strategy = 'first' if args['is_first'] else 'best'
local_search_name = f"{local_search.__name__}_{strategy}_{args['k']}"
data.append((
solver.instance.n,
solver.p,
solver.alpha,
constructive_name,
constructive_of,
constructive_time,
local_search_name,
solver.objective_function,
local_search_time
))
common_cols = ('heuristic', 'OF', 'seconds')
df = pd.DataFrame(
data,
columns=(
'n', 'p', 'alpha',
*common_cols * 2
)
)
params = df.loc[:, ('n', 'p', 'alpha')]
# create column multiindex
params = pd.concat({'instance': params}, axis=1)
stats = (
# create column multiindexes
pd.concat(
{
'constructive': df.iloc[:, [3, 4, 5]],
'local search': df.iloc[:, [6, 7, 8]]
},
axis=1
)
.join(params)
# reorder multiindexes and columns
.loc[:, (
('instance', 'constructive', 'local search'),
('n', 'p', 'alpha', *common_cols)
)]
)
stats['improvement', 'absolute'] = stats['constructive', 'OF'] - stats['local search', 'OF']
stats['improvement', 'relative %'] = (stats['improvement', 'absolute'] / stats['constructive', 'OF']) * 100
return stats
def add_improvement_stats(dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Adds how many improvements were made and the average of results.
`dataframe` needs to be the return value of `get_stats_df`,
but filtered by instance paramaters.
'''
stats = dataframe.copy()
improved = [
'', '', '', '', '', '', '',
stats[stats['improvement', 'absolute'] > 0].count()[0],
'', '', ''
]
average = [
stats[top, sub].mean()
if sub in {'OF', 'seconds', 'absolute', 'relative %'}
else ''
for top, sub in stats.columns
]
stats.loc['number improved'] = improved
stats.loc['average'] = average
return stats
| 29.671533 | 111 | 0.534563 | import timeit
from typing import Any, Dict, Iterable, Mapping, Callable, Optional, Set
from models.solver import Solver
import pandas as pd
def get_stats_df(
solvers: Iterable[Solver],
constructive: Optional[Callable[..., Set[int]]],
local_search: Callable[..., Set[int]],
args: Mapping[str, Any]
) -> pd.DataFrame:
'''
TODO: Refactor to include Solution class.
Formats the statistics of each solver as a DataFrame.
Both `constructive` and `local_search` need to be a method of `Solver` class,
e.g. `constructive=Solver.pdp_based`.
`args` is a dictionary of custom arguments for local search methods.
'''
data = list()
for solver in solvers:
start = timeit.default_timer()
if constructive:
constructive(solver)
constructive_name = constructive.__name__
else:
solver.set_random_solution()
constructive_time = 0
constructive_name = "random"
constructive_time = timeit.default_timer() - start
constructive_of = solver.objective_function
start = timeit.default_timer()
if local_search:
local_search(solver, **args)
local_search_time = timeit.default_timer() - start
strategy = 'first' if args['is_first'] else 'best'
local_search_name = f"{local_search.__name__}_{strategy}_{args['k']}"
data.append((
solver.instance.n,
solver.p,
solver.alpha,
constructive_name,
constructive_of,
constructive_time,
local_search_name,
solver.objective_function,
local_search_time
))
common_cols = ('heuristic', 'OF', 'seconds')
df = pd.DataFrame(
data,
columns=(
'n', 'p', 'alpha',
*common_cols * 2
)
)
params = df.loc[:, ('n', 'p', 'alpha')]
# create column multiindex
params = pd.concat({'instance': params}, axis=1)
stats = (
# create column multiindexes
pd.concat(
{
'constructive': df.iloc[:, [3, 4, 5]],
'local search': df.iloc[:, [6, 7, 8]]
},
axis=1
)
.join(params)
# reorder multiindexes and columns
.loc[:, (
('instance', 'constructive', 'local search'),
('n', 'p', 'alpha', *common_cols)
)]
)
stats['improvement', 'absolute'] = stats['constructive', 'OF'] - stats['local search', 'OF']
stats['improvement', 'relative %'] = (stats['improvement', 'absolute'] / stats['constructive', 'OF']) * 100
return stats
def add_improvement_stats(dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Adds how many improvements were made and the average of results.
`dataframe` needs to be the return value of `get_stats_df`,
but filtered by instance paramaters.
'''
stats = dataframe.copy()
improved = [
'', '', '', '', '', '', '',
stats[stats['improvement', 'absolute'] > 0].count()[0],
'', '', ''
]
average = [
stats[top, sub].mean()
if sub in {'OF', 'seconds', 'absolute', 'relative %'}
else ''
for top, sub in stats.columns
]
stats.loc['number improved'] = improved
stats.loc['average'] = average
return stats
def filter_dataframe(dataframe: pd.DataFrame) -> Dict[int, Any]:
return {
n: {
p: {
alpha: add_improvement_stats(
dataframe[
(dataframe['instance', 'n'] == n) &
(dataframe['instance', 'p'] == p) &
(dataframe['instance', 'alpha'] == alpha)
]
)
for alpha in dataframe['instance', 'alpha'].unique()
}
for p in dataframe['instance', 'p'].unique()
}
for n in dataframe['instance', 'n'].unique()
}
| 598 | 0 | 23 |
d9419561cd99d1600af603d6ded1ba6a38df456a | 934 | py | Python | server/app/utils/jwt.py | SheetWithoutShit/sws-server | 16b7154188f08b33f84d88caea217673cf989b2b | [
"MIT"
] | null | null | null | server/app/utils/jwt.py | SheetWithoutShit/sws-server | 16b7154188f08b33f84d88caea217673cf989b2b | [
"MIT"
] | 57 | 2020-08-01T14:58:59.000Z | 2020-09-20T13:27:16.000Z | server/app/utils/jwt.py | SpentlessInc/spentless-server | 16b7154188f08b33f84d88caea217673cf989b2b | [
"MIT"
] | null | null | null | """This module provides helper functionality with JWT."""
from datetime import datetime
import jwt
from app.utils.errors import TokenError
def generate_token(secret_key, private_claims=None, exp_days=None):
"""Return encoded json web token."""
token_exp = None
now = int(datetime.now().timestamp())
payload = {"iat": now}
if exp_days is not None:
token_exp = now + (exp_days * 60 * 60 * 24)
payload.update({"exp": token_exp})
if private_claims:
payload.update(private_claims)
token = jwt.encode(payload, secret_key).decode("UTF-8")
return token, token_exp
def decode_token(token, secret_key):
"""Return decoded payload from json web token."""
try:
return jwt.decode(token, secret_key)
except jwt.DecodeError:
raise TokenError("The token is invalid.")
except jwt.ExpiredSignatureError:
raise TokenError("The token has expired.")
| 26.685714 | 67 | 0.679872 | """This module provides helper functionality with JWT."""
from datetime import datetime
import jwt
from app.utils.errors import TokenError
def generate_token(secret_key, private_claims=None, exp_days=None):
"""Return encoded json web token."""
token_exp = None
now = int(datetime.now().timestamp())
payload = {"iat": now}
if exp_days is not None:
token_exp = now + (exp_days * 60 * 60 * 24)
payload.update({"exp": token_exp})
if private_claims:
payload.update(private_claims)
token = jwt.encode(payload, secret_key).decode("UTF-8")
return token, token_exp
def decode_token(token, secret_key):
"""Return decoded payload from json web token."""
try:
return jwt.decode(token, secret_key)
except jwt.DecodeError:
raise TokenError("The token is invalid.")
except jwt.ExpiredSignatureError:
raise TokenError("The token has expired.")
| 0 | 0 | 0 |
77f18ed4256f973128a7f7ad9247e1d005ef8599 | 8,409 | py | Python | lib/redfin/api.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 206 | 2015-10-15T07:05:08.000Z | 2021-02-19T11:48:36.000Z | lib/redfin/api.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 8 | 2017-10-16T10:18:31.000Z | 2022-03-09T14:24:27.000Z | lib/redfin/api.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 61 | 2015-10-15T08:12:44.000Z | 2022-03-10T12:25:06.000Z | # Python Standard Library Imports
import json
# Third Party (PyPI) Imports
import requests
import rollbar
# HTK Imports
from htk.lib.redfin.constants import *
from htk.utils.urls import build_url_with_query_params
| 43.123077 | 974 | 0.609704 | # Python Standard Library Imports
import json
# Third Party (PyPI) Imports
import requests
import rollbar
# HTK Imports
from htk.lib.redfin.constants import *
from htk.utils.urls import build_url_with_query_params
class RedfinAPI(object):
def __init__(self):
pass
def _get_api_request(self, endpoint_name, params):
url = '%s%s' % (REDFIN_API_BASE_URL, self._get_api_endpoint_url(endpoint_name),)
referrer_url = self.get_home_worth_url(params.get('propertyId'), params.get('listingId'))
headers = {
'Accept' : '*/*',
#'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding' : 'gzip, deflate, br',
'Accept-Language' : 'en-US,en;q=0.9',
'Cache-Control' : 'no-cache',
'Connection' : 'keep-alive',
'Content-Type' : 'application/json',
'Host' : 'www.redfin.com',
'Pragma' : 'no-cache',
'Upgrade-Insecure-Requests' : '1',
'Referer' : referrer_url,
'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36',
}
cookies = {
'RF_BROWSER_ID' : '',
'RF_ACCESS_LEVEL' : '',
'RF_AUTH' : '',
'RF_LAST_ACCESS' : '',
'RF_SECURE_AUTH' : '',
'RF_W_AUTH' : '',
}
response = requests.get(url, params=params, headers=headers, cookies=cookies, timeout=3)
#print(response.text)
return response
def _get_api_endpoint_url(self, endpoint_name):
url = REDFIN_API_ENDPOINTS.get(endpoint_name)
return url
def get_home_worth_url(self, property_id, listing_id=None):
"""https://www.redfin.com/what-is-my-home-worth?propertyId={property_id}&listingId={listing_id}
"""
base_url = 'https://www.redfin.com/what-is-my-home-worth'
params = {
'propertyId' : property_id,
'listingId' : listing_id,
}
url = build_url_with_query_params(base_url, params)
r = requests.PreparedRequest()
r.prepare_url(base_url, params)
return r.url
def get_property_listing_id(self, property_id):
"""Get property listing id
https://www.redfin.com/stingray/do/api-get-property-listing-id?propertyId={property_id}
Response:
{}&&{"resultCode":0,"errorMessage":"Success","version":156,"payload":{"listingId":"123456789"}}
"""
params = {
'propertyId' : property_id,
}
response = self._get_api_request('get_property_listing_id', params)
if response.status_code == 200:
response_json = json.loads(response.text[4:])
listing_id = response_json.get('payload', {}).get('listingId', None)
else:
listing_id = None
return listing_id
def get_avm(self, property_id, listing_id=None):
"""Get AVM for `property_id`
AVM = Automated Valuation Model
https://www.redfin.com/stingray/api/home/details/avm?propertyId={property_id}&listingId={listing_id}&accessLevel=3
Response:
{}&&{"errorMessage":"Success","resultCode":0,"payload":{"displayLevel":1,"propertyId":1234567,"predictedValue":1234567.00,"predictedValueHistorical":1234567.00,"lastSoldPrice":123456,"lastSoldDate":1234567800000,"listingTimezone":"US/Pacific","numBeds":10,"numBaths":12.0,"sqFt":{"value":12000},"comparables":[],"isServiced":true,"isActivish":false,"showInHeader":true,"isHidden":false,"isVisibilityRestricted":false,"soldDate":1234567800000,"soldDateTimeZone":"US/Pacific","latLong":{"latitude":37.00,"longitude":-121.00},"priceInfo":{"amount":1234567,"label":"Sold Jan 30, 2017","displayLevel":1},"searchStatusId":4,"streetAddress":{"streetNumber":"12345","directionalPrefix":"","streetName":"MAIN","streetType":"St","directionalSuffix":"","unitType":"","unitValue":"","addressDisplayLevel":{"displayLevel":1,"displayText":""},"assembledAddress":"12345 MAIN St","includeStreetNumber":true},"sectionPreviewText":"$1,234,567 (+$123k since last sold)"},"version":156}
"""
if listing_id is None:
listing_id = self.get_property_listing_id(property_id)
params = {
'propertyId' : property_id,
'listingId' : listing_id,
'accessLevel' : 1,
}
response = self._get_api_request('get_avm', params)
if response.status_code == 200:
response_json = json.loads(response.text[4:])
avm_data = response_json.get('payload', None)
if avm_data is None:
extra_data = {
'property_id' : property_id,
'listing_id' : listing_id,
}
rollbar.report_message('Redfin API Missing AVM Payload', 'info', extra_data=extra_data)
avm_data = {}
else:
avm_data = {}
home_worth_url = self.get_home_worth_url(property_id, listing_id=listing_id)
avm = RedfinAVM(property_id, home_worth_url, avm_data)
return avm
def get_property_parcel_info(self, property_id, listing_id=None):
"""Get Property Parcel Info
https://www.redfin.com/stingray/api/home/details/propertyParcelInfo?propertyId=1474895&listingId=37807759&accessLevel=3
Response:
{}&&{"errorMessage":"Success","resultCode":0,"payload":{"staticMapUrl":"https://maps.google.com/maps/api/staticmap?...,"staticMapUrl2x":"https://maps.google.com/maps/api/staticmap?...","bounds":[{"lat":"","long":""},{"lat":"","long":""},{"lat":"","long":""},{"lat":"","long":""},{"lat":"","long":""}],"mapLocationDisplayLevel":1,"sectionPreviewText":"Map and directions"},"version":156}
"""
if listing_id is None:
listing_id = self.get_property_listing_id(property_id)
params = {
'propertyId' : property_id,
'listingId' : listing_id,
'accessLevel' : 1,
}
response = self._get_api_request('get_property_parcel_info', params)
if response.status_code == 200:
response_json = json.loads(response.text[4:])
data = response_json
else:
data = None
return data
class RedfinAVM(object):
def __init__(self, property_id, home_worth_url, raw_data):
self.property_id = property_id
self.home_worth_url = home_worth_url
self.raw_data = raw_data
self.property_value = raw_data.get('predictedValue', 0)
self.property_value_historical = raw_data.get('predictedValueHistorical', 0)
if self.property_value_historical != 0:
self.property_value_change = self.property_value - self.property_value_historical
else:
self.property_value_change = self.property_value
lat_long = raw_data.get('latLong', {})
if lat_long:
self.latitude = lat_long.get('latitude')
self.longitude = lat_long.get('longitude')
# build the address
street_address = raw_data.get('streetAddress', {}).get('assembledAddress', None)
if street_address:
self.street_address = street_address
# full address is not directly returned
# assume the first comparable is in same city, state, zip
# alternatively, could reverse geocode the lat-lng
comparables = raw_data.get('comparables', [])
comparable = comparables[0] if len(comparables) > 0 else {}
self.city = comparable.get('city', 'Unknown City')
self.state = comparable.get('state', 'Unknown State')
self.zipcode = comparable.get('zip', '')
self.address = '%s, %s, %s %s' % (
street_address,
self.city,
self.state,
self.zipcode,
)
def to_json(self):
"""Returns a JSON-encodable dictionary representation of the Redfin AVM `self.property_id`
"""
data = {
'property_id' : self.property_id,
'property_value' : self.property_value,
'city' : self.city,
'state' : self.state,
'zipcode' : self.zipcode,
'address' : self.address,
}
return json
| 2,987 | 5,159 | 46 |
4301c7dd2ad14bd659c8a7faeaaff565fc156f66 | 461 | py | Python | crop_test.py | DevinAhmad/EMDP_House_Counting_MRCNN | 6c14e0a1eb0c40e30e7eed3edfc56d6b510fde5e | [
"MIT"
] | null | null | null | crop_test.py | DevinAhmad/EMDP_House_Counting_MRCNN | 6c14e0a1eb0c40e30e7eed3edfc56d6b510fde5e | [
"MIT"
] | null | null | null | crop_test.py | DevinAhmad/EMDP_House_Counting_MRCNN | 6c14e0a1eb0c40e30e7eed3edfc56d6b510fde5e | [
"MIT"
] | null | null | null | import numpy as np
import imutils
import cv2
import argparse
| 15.896552 | 49 | 0.672451 | import numpy as np
import imutils
import cv2
import argparse
def crop(image, size=300):
img = cv2.imread(image)
height, width, channels = img.shape
x=width
y=height
x0=0
y0=0
cropSize=int(size)
crop=[]
while y0<y:
while x0<x:
cropped = img[y0:y0+cropSize , x0:x0+cropSize]
name = str(y0)+str(x0)
crop.append(cropped)
#cv2.imwrite("crop/"+name+".jpg", cropped)
x0=x0+cropSize
x0=0
y0=y0+cropSize
#print(len(crop))
return crop
| 376 | 0 | 23 |
bbda6963b4d42582470130a18342ad45b6a14d72 | 3,048 | py | Python | gen_fp.py | PatWalters/faiss_kmeans | 07cd8d1d0745e4232758c613fc8900c08b4f1ae2 | [
"MIT"
] | 4 | 2020-02-03T03:13:05.000Z | 2020-12-05T14:27:24.000Z | gen_fp.py | PatWalters/faiss_kmeans | 07cd8d1d0745e4232758c613fc8900c08b4f1ae2 | [
"MIT"
] | 2 | 2019-10-03T23:26:20.000Z | 2020-02-25T03:29:31.000Z | gen_fp.py | PatWalters/faiss_kmeans | 07cd8d1d0745e4232758c613fc8900c08b4f1ae2 | [
"MIT"
] | 4 | 2020-05-01T13:57:04.000Z | 2021-09-09T08:41:27.000Z | #!/usr/bin/env python
import sys
import h5py
import numpy as np
from rdkit import Chem, DataStructs
from rdkit.Chem import rdMolDescriptors as rdmd
from tqdm import tqdm
from functools import wraps
from time import time
def timing(f):
"""
Decorator to measure execution time, adapted from
# https://medium.com/pythonhive/python-decorator-to-measure-the-execution-time-of-methods-fa04cb6bb36d
# https://codereview.stackexchange.com/questions/169870/decorator-to-measure-execution-time-of-a-function
"""
@wraps(f)
return wrapper
@timing
def make_np_array(lst, dtype=np.float32):
"""
Convert a list to a numpy array
:param lst: input list
:param dtype: data type
:return: output array
"""
return np.array(lst, dtype=dtype)
@timing
def save_data(fp_array, smiles_list, name_list, outfile_name):
"""
Write the fingerprints to an hdf5 file
:param fp_array: numpy array with fingerprints
:param smiles_list: list of SMILES
:param name_list: list of molecule names
:param outfile_name: output file name
:return: None
"""
h5f = h5py.File(outfile_name, 'w')
dt = h5py.special_dtype(vlen=bytes)
h5f.create_dataset('fp_list', data=fp_array)
h5f.create_dataset('smiles_list', (len(smiles_list), 1), dt, smiles_list)
h5f.create_dataset('name_list', (len(name_list), 1), dt, name_list)
h5f.close()
@timing
def generate_fingerprints(infile_name):
"""
Generate fingerprints from an input file, currently generates a 256 bit morgan fingerprint
:param infile_name: input file name
:return: lists with fingerprints, SMILES, and molecule names
"""
ifs = open(infile_name)
fp_list = []
smiles_list = []
name_list = []
for line in tqdm(ifs):
toks = line.strip().split(" ", 1)
if len(toks) >= 2:
smiles, name = toks
mol = Chem.MolFromSmiles(smiles)
if mol:
fp = rdmd.GetMorganFingerprintAsBitVect(mol, 2, 256)
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
fp_list.append(arr)
smiles_list.append(smiles.encode("ascii", "ignore"))
name_list.append(name.encode("ascii", "ignore"))
return fp_list, smiles_list, name_list
@timing
def main(input_smiles_file, output_fp_file):
"""
Generate fingerprints and write to an hdf5 file
:return:
"""
fp_list, smiles_list, name_list = generate_fingerprints(input_smiles_file)
outfile_name = output_fp_file
fp_array = make_np_array(fp_list)
save_data(fp_array, smiles_list, name_list, outfile_name)
if __name__ == "__main__":
if len(sys.argv) != 3:
print(f"usage: {sys.argv[0]} infile.smi outfile.h5")
sys.exit(1)
main(sys.argv[1], sys.argv[2])
| 29.307692 | 109 | 0.656496 | #!/usr/bin/env python
import sys
import h5py
import numpy as np
from rdkit import Chem, DataStructs
from rdkit.Chem import rdMolDescriptors as rdmd
from tqdm import tqdm
from functools import wraps
from time import time
def timing(f):
"""
Decorator to measure execution time, adapted from
# https://medium.com/pythonhive/python-decorator-to-measure-the-execution-time-of-methods-fa04cb6bb36d
# https://codereview.stackexchange.com/questions/169870/decorator-to-measure-execution-time-of-a-function
"""
@wraps(f)
def wrapper(*args, **kwargs):
start = time()
result = f(*args, **kwargs)
end = time()
print(f.__name__, f"Elapsed time: {end - start:.2f} sec")
return result
return wrapper
@timing
def make_np_array(lst, dtype=np.float32):
"""
Convert a list to a numpy array
:param lst: input list
:param dtype: data type
:return: output array
"""
return np.array(lst, dtype=dtype)
@timing
def save_data(fp_array, smiles_list, name_list, outfile_name):
"""
Write the fingerprints to an hdf5 file
:param fp_array: numpy array with fingerprints
:param smiles_list: list of SMILES
:param name_list: list of molecule names
:param outfile_name: output file name
:return: None
"""
h5f = h5py.File(outfile_name, 'w')
dt = h5py.special_dtype(vlen=bytes)
h5f.create_dataset('fp_list', data=fp_array)
h5f.create_dataset('smiles_list', (len(smiles_list), 1), dt, smiles_list)
h5f.create_dataset('name_list', (len(name_list), 1), dt, name_list)
h5f.close()
@timing
def generate_fingerprints(infile_name):
"""
Generate fingerprints from an input file, currently generates a 256 bit morgan fingerprint
:param infile_name: input file name
:return: lists with fingerprints, SMILES, and molecule names
"""
ifs = open(infile_name)
fp_list = []
smiles_list = []
name_list = []
for line in tqdm(ifs):
toks = line.strip().split(" ", 1)
if len(toks) >= 2:
smiles, name = toks
mol = Chem.MolFromSmiles(smiles)
if mol:
fp = rdmd.GetMorganFingerprintAsBitVect(mol, 2, 256)
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
fp_list.append(arr)
smiles_list.append(smiles.encode("ascii", "ignore"))
name_list.append(name.encode("ascii", "ignore"))
return fp_list, smiles_list, name_list
@timing
def main(input_smiles_file, output_fp_file):
"""
Generate fingerprints and write to an hdf5 file
:return:
"""
fp_list, smiles_list, name_list = generate_fingerprints(input_smiles_file)
outfile_name = output_fp_file
fp_array = make_np_array(fp_list)
save_data(fp_array, smiles_list, name_list, outfile_name)
if __name__ == "__main__":
if len(sys.argv) != 3:
print(f"usage: {sys.argv[0]} infile.smi outfile.h5")
sys.exit(1)
main(sys.argv[1], sys.argv[2])
| 176 | 0 | 26 |
6616d131b8bf5736ae5fe2bc0242fa879111be9c | 4,064 | py | Python | odps/df/tools/lib/hll.py | wjsi/aliyun-odps-python-sdk | 8b064340e4376def201b8d8fdc0c2fa021aae9be | [
"Apache-2.0"
] | 412 | 2015-11-01T09:27:52.000Z | 2022-03-26T05:04:03.000Z | odps/df/tools/lib/hll.py | wjsi/aliyun-odps-python-sdk | 8b064340e4376def201b8d8fdc0c2fa021aae9be | [
"Apache-2.0"
] | 168 | 2015-11-16T09:46:39.000Z | 2022-03-17T06:35:26.000Z | odps/df/tools/lib/hll.py | wjsi/aliyun-odps-python-sdk | 8b064340e4376def201b8d8fdc0c2fa021aae9be | [
"Apache-2.0"
] | 103 | 2015-12-01T08:10:09.000Z | 2022-02-21T12:46:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from math import ceil, log, pow
if sys.version < '3':
PY3 = False
else:
PY3 = True
def get_SHA1_bin(word):
"""
Return SHA1 hash of any string
:param word:
:return:
"""
from hashlib import sha1
if PY3 and isinstance(word, str):
word = word.encode('utf-8')
hash_s = sha1()
hash_s.update(word)
return bin(int(hash_s.hexdigest(), 16))[2:].zfill(160)
def get_index(binstr, end_index=160):
"""
Return the position of the first 1 bit
from the left in the word until end_index
:param binstr:
:param end_index:
:return:
"""
res = -1
try:
res = binstr.index('1') + 1
except ValueError:
res = end_index
return res
class HyperLogLog(object):
"""
Implements a HyperLogLog
"""
__ALPHA16 = 0.673
__ALPHA32 = 0.697
__ALPHA64 = 0.709
def __call__(self, buffer, item):
"""
Add the items to the HyperLogLog.
:param buffer:
:param item:
:return:
"""
items = [str(item), ]
if self._splitter is not None:
items = str(item).split(self._splitter)
for item in items:
binword = get_SHA1_bin(item)
pos = int(binword[:self._k], 2)
# retrieve the position of leftmost 1
aux = get_index(binword[self._k:], 160 - self._k)
# set its own register value to maximum value seen so far
buffer[pos] = max(aux, buffer[pos])
def _estimate(self, buffer):
"""
Return the estimate of the cardinality
:return: esitimate of the cardinality
"""
m = self._bucket_number
raw_e = self._alpha * pow(m, 2) / sum([pow(2, -x) for x in buffer])
if raw_e <= 5 / 2.0 * m:
v = buffer.count(0)
if v != 0:
return m * log(m / float(v), 2)
else:
return raw_e
elif raw_e <= 1 / 30.0 * 2 ** 160:
return raw_e
else:
return -2 ** 160 * log(1 - raw_e / 2.0 ** 160, 2)
def merge(self, buffer, other_hyper_log_log):
"""
Merge the HyperLogLog
:param other_hyper_log_log:
:return:
"""
for i in range(len(buffer)):
buffer[i] = max(buffer[i], other_hyper_log_log[i])
def _word_size_calculator(self, n_max):
"""
Estimate the size of the memory units, using the maximum cardinality as an argument
:param n_max: maximum cardinality
:return: size of the memory units
"""
return int(ceil(log(log(n_max, 2), 2)))
| 26.38961 | 91 | 0.579724 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from math import ceil, log, pow
if sys.version < '3':
PY3 = False
else:
PY3 = True
def get_SHA1_bin(word):
"""
Return SHA1 hash of any string
:param word:
:return:
"""
from hashlib import sha1
if PY3 and isinstance(word, str):
word = word.encode('utf-8')
hash_s = sha1()
hash_s.update(word)
return bin(int(hash_s.hexdigest(), 16))[2:].zfill(160)
def get_index(binstr, end_index=160):
"""
Return the position of the first 1 bit
from the left in the word until end_index
:param binstr:
:param end_index:
:return:
"""
res = -1
try:
res = binstr.index('1') + 1
except ValueError:
res = end_index
return res
class HyperLogLog(object):
"""
Implements a HyperLogLog
"""
__ALPHA16 = 0.673
__ALPHA32 = 0.697
__ALPHA64 = 0.709
def __init__(self, error_rate, splitter=None):
if not (0 < error_rate < 1):
raise ValueError('error_rate must be between 0 and 1.')
self._k = int(ceil(log(pow(1.04 / error_rate, 2), 2)))
self._bucket_number = 1 << self._k
self._alpha = self.__get_alpha(self._bucket_number)
self._splitter = splitter
def buffer(self):
return [0] * self._bucket_number
def __get_alpha(self, m):
if m <= 16:
return self.__ALPHA16
elif m <= 32:
return self.__ALPHA32
elif m <= 64:
return self.__ALPHA64
else:
return 0.7213 / (1 + 1.079 / float(m))
def __call__(self, buffer, item):
"""
Add the items to the HyperLogLog.
:param buffer:
:param item:
:return:
"""
items = [str(item), ]
if self._splitter is not None:
items = str(item).split(self._splitter)
for item in items:
binword = get_SHA1_bin(item)
pos = int(binword[:self._k], 2)
# retrieve the position of leftmost 1
aux = get_index(binword[self._k:], 160 - self._k)
# set its own register value to maximum value seen so far
buffer[pos] = max(aux, buffer[pos])
def _estimate(self, buffer):
"""
Return the estimate of the cardinality
:return: esitimate of the cardinality
"""
m = self._bucket_number
raw_e = self._alpha * pow(m, 2) / sum([pow(2, -x) for x in buffer])
if raw_e <= 5 / 2.0 * m:
v = buffer.count(0)
if v != 0:
return m * log(m / float(v), 2)
else:
return raw_e
elif raw_e <= 1 / 30.0 * 2 ** 160:
return raw_e
else:
return -2 ** 160 * log(1 - raw_e / 2.0 ** 160, 2)
def getvalue(self, buffer):
return int(round(self._estimate(buffer)))
def merge(self, buffer, other_hyper_log_log):
"""
Merge the HyperLogLog
:param other_hyper_log_log:
:return:
"""
for i in range(len(buffer)):
buffer[i] = max(buffer[i], other_hyper_log_log[i])
def _word_size_calculator(self, n_max):
"""
Estimate the size of the memory units, using the maximum cardinality as an argument
:param n_max: maximum cardinality
:return: size of the memory units
"""
return int(ceil(log(log(n_max, 2), 2)))
| 660 | 0 | 108 |
018d6c2fcea71c46d33050e71da40d26ddf183ff | 4,752 | py | Python | fooof/plts/templates.py | TheCheeseToast/fooof | f3f8422af7d87fa73772e083deaf8439ca59908d | [
"Apache-2.0"
] | null | null | null | fooof/plts/templates.py | TheCheeseToast/fooof | f3f8422af7d87fa73772e083deaf8439ca59908d | [
"Apache-2.0"
] | null | null | null | fooof/plts/templates.py | TheCheeseToast/fooof | f3f8422af7d87fa73772e083deaf8439ca59908d | [
"Apache-2.0"
] | null | null | null | """Plot templates for the FOOOF module."""
import numpy as np
from fooof.core.modutils import safe_import, check_dependency
plt = safe_import('.pyplot', 'matplotlib')
###################################################################################################
###################################################################################################
@check_dependency(plt, 'matplotlib')
def plot_spectrum(freqs, power_spectrum, plt_log=False, ax=None, **kwargs):
"""Plot a line plot of a power-spectrum.
Parameters
----------
freqs : 1d array
X-axis data, frequency values.
power_spectrum : 1d array
Y-axis data, power_spectrum power values.
plt_log : boolean, optional
Whether or not to plot the frequency axis in log space. default: False
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
**kwargs
Keyword arguments to be passed to the plot call.
"""
# Create plot axes, if not provided
if not ax:
_, ax = plt.subplots(figsize=(12, 10))
# Set frequency vector, logged if requested
plt_freqs = np.log10(freqs) if plt_log else freqs
# Create the plot
ax.plot(plt_freqs, power_spectrum, **kwargs)
# Aesthetics and axis labels
ax.set_xlabel('Frequency', fontsize=20)
ax.set_ylabel('Power', fontsize=20)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.grid(True)
# If labels were provided, add a legend
if ax.get_legend_handles_labels()[0]:
ax.legend(prop={'size': 16})
@check_dependency(plt, 'matplotlib')
def plot_scatter_1(data, label, title=None, x_val=0, ax=None):
"""Plot a scatter plot with the given data.
Parameters
----------
data : 1d array
Data to plot.
label : str
Label for the data, to be set as the y-axis label.
title : str, optional
Title for the plot.
x_val : int, optional
Position along the x-axis to plot set of data. default: 0
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
Notes
-----
Data is jittered slightly, for visualization purposes (deviations on x-axis are meaningless).
"""
if not ax:
_, ax = plt.subplots()
# Create x-axis data, with small jitter for visualization purposes
x_data = np.ones_like(data) * x_val + np.random.normal(0, 0.025, data.shape)
ax.scatter(x_data, data, s=36, alpha=0.5)
if label:
ax.set_ylabel(label, fontsize=16)
if title:
ax.set_title(title, fontsize=20)
plt.xticks([x_val], [label])
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlim([-0.5, 0.5])
@check_dependency(plt, 'matplotlib')
def plot_scatter_2(data_0, label_0, data_1, label_1, title=None, ax=None):
"""Plot a scatter plot with two y-axes, with the given data.
Parameters
----------
data_0 : 1d array
Data to plot on the first axis.
label_0 : str
Label for the data on the first axis, to be set as the y-axis label.
data_1 : 1d array
Data to plot on the second axis.
label_0 : str
Label for the data on the second axis, to be set as the y-axis label.
title : str
Title for the plot.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
Notes
-----
Data is jittered slightly, for visualization purposes (deviations on x-axis are meaningless).
"""
if not ax:
_, ax = plt.subplots()
ax1 = ax.twinx()
plot_scatter_1(data_0, label_0, ax=ax)
plot_scatter_1(data_1, label_1, x_val=1, ax=ax1)
if title:
ax.set_title(title, fontsize=20)
ax.set_xlim([-0.5, 1.5])
plt.xticks([0, 1], [label_0, label_1])
ax.tick_params(axis='x', labelsize=16)
@check_dependency(plt, 'matplotlib')
def plot_hist(data, label, title=None, n_bins=25, x_lims=None, ax=None):
"""Plot a histogram with the given data.
Parameters
----------
data : 1d array
Data to plot.
label : str
Label for the data, to be set as the y-axis label.
title : str, optional
Title for the plot.
n_bins : int, optional
Number of bins to use for the histogram. Default: 20
x_lims : list of float
X-axis limits for the plot.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
"""
if not ax:
_, ax = plt.subplots()
ax.hist(data[~np.isnan(data)], n_bins, alpha=0.8)
ax.set_xlabel(label, fontsize=16)
ax.set_ylabel('Count', fontsize=16)
if x_lims:
ax.set_xlim(x_lims)
if title:
ax.set_title(title, fontsize=20)
ax.tick_params(axis='both', labelsize=12)
| 27.952941 | 99 | 0.608165 | """Plot templates for the FOOOF module."""
import numpy as np
from fooof.core.modutils import safe_import, check_dependency
plt = safe_import('.pyplot', 'matplotlib')
###################################################################################################
###################################################################################################
@check_dependency(plt, 'matplotlib')
def plot_spectrum(freqs, power_spectrum, plt_log=False, ax=None, **kwargs):
"""Plot a line plot of a power-spectrum.
Parameters
----------
freqs : 1d array
X-axis data, frequency values.
power_spectrum : 1d array
Y-axis data, power_spectrum power values.
plt_log : boolean, optional
Whether or not to plot the frequency axis in log space. default: False
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
**kwargs
Keyword arguments to be passed to the plot call.
"""
# Create plot axes, if not provided
if not ax:
_, ax = plt.subplots(figsize=(12, 10))
# Set frequency vector, logged if requested
plt_freqs = np.log10(freqs) if plt_log else freqs
# Create the plot
ax.plot(plt_freqs, power_spectrum, **kwargs)
# Aesthetics and axis labels
ax.set_xlabel('Frequency', fontsize=20)
ax.set_ylabel('Power', fontsize=20)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.grid(True)
# If labels were provided, add a legend
if ax.get_legend_handles_labels()[0]:
ax.legend(prop={'size': 16})
@check_dependency(plt, 'matplotlib')
def plot_scatter_1(data, label, title=None, x_val=0, ax=None):
"""Plot a scatter plot with the given data.
Parameters
----------
data : 1d array
Data to plot.
label : str
Label for the data, to be set as the y-axis label.
title : str, optional
Title for the plot.
x_val : int, optional
Position along the x-axis to plot set of data. default: 0
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
Notes
-----
Data is jittered slightly, for visualization purposes (deviations on x-axis are meaningless).
"""
if not ax:
_, ax = plt.subplots()
# Create x-axis data, with small jitter for visualization purposes
x_data = np.ones_like(data) * x_val + np.random.normal(0, 0.025, data.shape)
ax.scatter(x_data, data, s=36, alpha=0.5)
if label:
ax.set_ylabel(label, fontsize=16)
if title:
ax.set_title(title, fontsize=20)
plt.xticks([x_val], [label])
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlim([-0.5, 0.5])
@check_dependency(plt, 'matplotlib')
def plot_scatter_2(data_0, label_0, data_1, label_1, title=None, ax=None):
"""Plot a scatter plot with two y-axes, with the given data.
Parameters
----------
data_0 : 1d array
Data to plot on the first axis.
label_0 : str
Label for the data on the first axis, to be set as the y-axis label.
data_1 : 1d array
Data to plot on the second axis.
label_0 : str
Label for the data on the second axis, to be set as the y-axis label.
title : str
Title for the plot.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
Notes
-----
Data is jittered slightly, for visualization purposes (deviations on x-axis are meaningless).
"""
if not ax:
_, ax = plt.subplots()
ax1 = ax.twinx()
plot_scatter_1(data_0, label_0, ax=ax)
plot_scatter_1(data_1, label_1, x_val=1, ax=ax1)
if title:
ax.set_title(title, fontsize=20)
ax.set_xlim([-0.5, 1.5])
plt.xticks([0, 1], [label_0, label_1])
ax.tick_params(axis='x', labelsize=16)
@check_dependency(plt, 'matplotlib')
def plot_hist(data, label, title=None, n_bins=25, x_lims=None, ax=None):
"""Plot a histogram with the given data.
Parameters
----------
data : 1d array
Data to plot.
label : str
Label for the data, to be set as the y-axis label.
title : str, optional
Title for the plot.
n_bins : int, optional
Number of bins to use for the histogram. Default: 20
x_lims : list of float
X-axis limits for the plot.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
"""
if not ax:
_, ax = plt.subplots()
ax.hist(data[~np.isnan(data)], n_bins, alpha=0.8)
ax.set_xlabel(label, fontsize=16)
ax.set_ylabel('Count', fontsize=16)
if x_lims:
ax.set_xlim(x_lims)
if title:
ax.set_title(title, fontsize=20)
ax.tick_params(axis='both', labelsize=12)
| 0 | 0 | 0 |
92137cbc9f05fa026ca9e6467c6e97454bd526a5 | 2,630 | py | Python | backtrader/backtrader/analyzers/sqn.py | harshabakku/live-back-testing-trader | 1fd69c7598dc15bea740f160eed886f396bcba2c | [
"MIT"
] | 1 | 2021-07-14T22:04:08.000Z | 2021-07-14T22:04:08.000Z | backtrader/backtrader/analyzers/sqn.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | null | null | null | backtrader/backtrader/analyzers/sqn.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | 3 | 2021-03-07T16:29:40.000Z | 2022-03-17T21:42:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from backtrader import Analyzer
from backtrader.mathsupport import average, standarddev
from backtrader.utils import AutoOrderedDict
class SQN(Analyzer):
'''SQN or SystemQualityNumber. Defined by Van K. Tharp to categorize trading
systems.
- 1.6 - 1.9 Below average
- 2.0 - 2.4 Average
- 2.5 - 2.9 Good
- 3.0 - 5.0 Excellent
- 5.1 - 6.9 Superb
- 7.0 - Holy Grail?
The formula:
- SquareRoot(NumberTrades) * Average(TradesProfit) / StdDev(TradesProfit)
The sqn value should be deemed reliable when the number of trades >= 30
Methods:
- get_analysis
Returns a dictionary with keys "sqn" and "trades" (number of
considered trades)
'''
alias = ('SystemQualityNumber',)
def create_analysis(self):
'''Replace default implementation to instantiate an AutoOrdereDict
rather than an OrderedDict'''
self.rets = AutoOrderedDict()
| 30.581395 | 80 | 0.604563 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from backtrader import Analyzer
from backtrader.mathsupport import average, standarddev
from backtrader.utils import AutoOrderedDict
class SQN(Analyzer):
'''SQN or SystemQualityNumber. Defined by Van K. Tharp to categorize trading
systems.
- 1.6 - 1.9 Below average
- 2.0 - 2.4 Average
- 2.5 - 2.9 Good
- 3.0 - 5.0 Excellent
- 5.1 - 6.9 Superb
- 7.0 - Holy Grail?
The formula:
- SquareRoot(NumberTrades) * Average(TradesProfit) / StdDev(TradesProfit)
The sqn value should be deemed reliable when the number of trades >= 30
Methods:
- get_analysis
Returns a dictionary with keys "sqn" and "trades" (number of
considered trades)
'''
alias = ('SystemQualityNumber',)
def create_analysis(self):
'''Replace default implementation to instantiate an AutoOrdereDict
rather than an OrderedDict'''
self.rets = AutoOrderedDict()
def start(self):
super(SQN, self).start()
self.pnl = list()
self.count = 0
def notify_trade(self, trade):
if trade.status == trade.Closed:
self.pnl.append(trade.pnlcomm)
self.count += 1
def stop(self):
if self.count > 1:
pnl_av = average(self.pnl)
pnl_stddev = standarddev(self.pnl)
try:
sqn = math.sqrt(len(self.pnl)) * pnl_av / pnl_stddev
except ZeroDivisionError:
sqn = None
else:
sqn = 0
self.rets.sqn = sqn
self.rets.trades = self.count
| 557 | 0 | 81 |
1a2cfabc718285a45f77995165dc8cc087cb7a47 | 470 | py | Python | django/tweets/migrations/0003_auto_20190113_0618.py | aleGpereira/django.twitter | 1e57d79e0839bec6df61f2d7d67de68bc2739a04 | [
"MIT"
] | null | null | null | django/tweets/migrations/0003_auto_20190113_0618.py | aleGpereira/django.twitter | 1e57d79e0839bec6df61f2d7d67de68bc2739a04 | [
"MIT"
] | null | null | null | django/tweets/migrations/0003_auto_20190113_0618.py | aleGpereira/django.twitter | 1e57d79e0839bec6df61f2d7d67de68bc2739a04 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2019-01-13 06:18
from __future__ import unicode_literals
from django.db import migrations, models
| 22.380952 | 76 | 0.625532 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2019-01-13 06:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tweets', '0002_auto_20190113_0617'),
]
operations = [
migrations.AlterField(
model_name='tweet',
name='id',
field=models.BigIntegerField(primary_key=True, serialize=False),
),
]
| 0 | 292 | 23 |
ab05eaceb31b3ba84683af44cb7bbb918b9dcc55 | 2,168 | py | Python | tests/unit/test_s3.py | pmakarov/gbdxtools | 07840cd15f64f20c852a90ca9f83749fd376b6bf | [
"MIT"
] | null | null | null | tests/unit/test_s3.py | pmakarov/gbdxtools | 07840cd15f64f20c852a90ca9f83749fd376b6bf | [
"MIT"
] | null | null | null | tests/unit/test_s3.py | pmakarov/gbdxtools | 07840cd15f64f20c852a90ca9f83749fd376b6bf | [
"MIT"
] | null | null | null | from gbdxtools import Interface
from gbdxtools.s3 import S3
from auth_mock import get_mock_gbdx_session
import vcr
import os
import tempfile
import unittest
"""
How to use the mock_gbdx_session and vcr to create unit tests:
1. Add a new test that is dependent upon actually hitting GBDX APIs.
2. Decorate the test with @vcr appropriately, supply a yaml file path to gbdxtools/tests/unit/cassettes
note: a yaml file will be created after the test is run
3. Replace "dummytoken" with a real gbdx token after running test successfully
4. Run the tests (existing test shouldn't be affected by use of a real token). This will record a "cassette".
5. Replace the real gbdx token with "dummytoken" again
6. Edit the cassette to remove any possibly sensitive information (s3 creds for example)
"""
cassette_name = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cassettes', 'test_s3_download.yaml')
| 38.714286 | 111 | 0.712177 | from gbdxtools import Interface
from gbdxtools.s3 import S3
from auth_mock import get_mock_gbdx_session
import vcr
import os
import tempfile
import unittest
"""
How to use the mock_gbdx_session and vcr to create unit tests:
1. Add a new test that is dependent upon actually hitting GBDX APIs.
2. Decorate the test with @vcr appropriately, supply a yaml file path to gbdxtools/tests/unit/cassettes
note: a yaml file will be created after the test is run
3. Replace "dummytoken" with a real gbdx token after running test successfully
4. Run the tests (existing test shouldn't be affected by use of a real token). This will record a "cassette".
5. Replace the real gbdx token with "dummytoken" again
6. Edit the cassette to remove any possibly sensitive information (s3 creds for example)
"""
cassette_name = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cassettes', 'test_s3_download.yaml')
class S3Tests(unittest.TestCase):
_temp_path = None
@classmethod
def setUpClass(cls):
# create mock session, replace dummytoken with real token to create cassette
mock_gbdx_session = get_mock_gbdx_session(token="dummytoken")
cls.gbdx = Interface(gbdx_connection=mock_gbdx_session)
#cls.gbdx = Interface()
cls._temp_path = tempfile.mkdtemp()
print("Created: {}".format(cls._temp_path))
def test_bucket_init(self):
s = S3()
assert isinstance(s, S3)
@vcr.use_cassette('tests/unit/cassettes/test_get_s3creds.yaml', filter_headers=['authorization'])
def test_get_s3_creds(self):
s = S3()
assert s.info is not None
assert "bucket" in s.info.keys()
assert "prefix" in s.info.keys()
assert "S3_secret_key" in s.info.keys()
assert "S3_access_key" in s.info.keys()
assert "S3_session_token" in s.info.keys()
#@vcr.use_cassette(cassette_name, filter_headers=['authorization'])
#def test_download(self):
# location = 'gbdxtools/ski_areas.geojson'
# s = S3()
# s.download(location, local_dir=self._temp_path)
# assert os.path.isfile(os.path.join(self._temp_path, 'ski_areas.geojson'))
| 689 | 546 | 23 |
4834caa0eb3f36e7e46b784ae632a0f86de34560 | 1,182 | py | Python | python/stabpoly/randomly_check_bistochastic_bounds.py | grawies/stable-poly | 14e78c7b9771b548953c637d0c1fda1ee076fdbb | [
"MIT"
] | null | null | null | python/stabpoly/randomly_check_bistochastic_bounds.py | grawies/stable-poly | 14e78c7b9771b548953c637d0c1fda1ee076fdbb | [
"MIT"
] | null | null | null | python/stabpoly/randomly_check_bistochastic_bounds.py | grawies/stable-poly | 14e78c7b9771b548953c637d0c1fda1ee076fdbb | [
"MIT"
] | null | null | null | #!/home/sam/shared-space/linux-system/anaconda3/bin/python
from math import factorial
import sympy as sp
from bistochastic import generate_matrix
import fpiter
from polynomials import product_polynomial
from functionals import elementary_symmetric_differential_operator
import numpy.random
SEED = 1
numpy.random.seed(SEED)
if __name__ == '__main__':
main()
| 26.863636 | 84 | 0.71489 | #!/home/sam/shared-space/linux-system/anaconda3/bin/python
from math import factorial
import sympy as sp
from bistochastic import generate_matrix
import fpiter
from polynomials import product_polynomial
from functionals import elementary_symmetric_differential_operator
import numpy.random
SEED = 1
numpy.random.seed(SEED)
def main():
NUM_MATRICES = 30
NUM_SYMBOLS = 3
DEGREE = 3
symbols = fpiter.getvars(count=NUM_SYMBOLS)
valfunc = lambda p: elementary_symmetric_differential_operator(p, DEGREE, symbols)
# test a bunch of matrices
minval = 1e10
minimizer = 0
for i in range(NUM_MATRICES):
# get a matrix
A = generate_matrix(DEGREE, NUM_SYMBOLS)
# get the polynomial
p = product_polynomial(A)
# compute value
val = valfunc(p)
# check if new minimum
if val < minval:
minval = val
minimizer = A
print('test {}: val = {}'.format(i+1, val))
# report
minval_theory = factorial(NUM_SYMBOLS) / NUM_SYMBOLS**NUM_SYMBOLS
print('theoretical minimum val = {}'.format(minval_theory))
print('computed min val = {}'.format(minval))
print('minimizer:')
print(minimizer)
if __name__ == '__main__':
main()
| 797 | 0 | 23 |
be3df371b3bbbcb0dd2f8780cfe35e3ce3531c15 | 607 | py | Python | pypy/rpython/ootypesystem/rvirtualizable2.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | 1 | 2020-01-21T11:10:51.000Z | 2020-01-21T11:10:51.000Z | pypy/rpython/ootypesystem/rvirtualizable2.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | pypy/rpython/ootypesystem/rvirtualizable2.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | from pypy.rpython.rmodel import inputconst
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.ootypesystem.rclass import InstanceRepr, mangle, OBJECT
from pypy.rpython.rvirtualizable2 import AbstractVirtualizable2InstanceRepr
VABLERTI = OBJECT
| 31.947368 | 83 | 0.769357 | from pypy.rpython.rmodel import inputconst
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.ootypesystem.rclass import InstanceRepr, mangle, OBJECT
from pypy.rpython.rvirtualizable2 import AbstractVirtualizable2InstanceRepr
VABLERTI = OBJECT
class Virtualizable2InstanceRepr(AbstractVirtualizable2InstanceRepr, InstanceRepr):
def _setup_repr_llfields(self):
llfields = []
if self.top_of_virtualizable_hierarchy:
llfields.append(('vable_token', VABLERTI))
return llfields
def set_vable(self, llops, vinst, force_cast=False):
pass # TODO
| 210 | 62 | 77 |
d28aaf15565b3e96d30f9aa2e8cd6f3568524b4e | 114 | py | Python | openFlow/generateFlags.py | wsharif/thesis | 96a9bd6c86ed027c3eeee231b2eae1c14394d728 | [
"MIT"
] | null | null | null | openFlow/generateFlags.py | wsharif/thesis | 96a9bd6c86ed027c3eeee231b2eae1c14394d728 | [
"MIT"
] | null | null | null | openFlow/generateFlags.py | wsharif/thesis | 96a9bd6c86ed027c3eeee231b2eae1c14394d728 | [
"MIT"
] | null | null | null | import random | 12.666667 | 33 | 0.666667 | import random
def generateFlags(flags):
sum = 0
for i in flags:
sum += i * random.randint(0, 1)
return sum | 78 | 0 | 23 |
2a6e2a7d82a3601d92a2ac56cfd23eea4d59da4c | 7,478 | py | Python | laptimize/solver.py | uichathurika/laptimize | 973a25468d5a9bac96467391595d04dcac780e7a | [
"MIT"
] | null | null | null | laptimize/solver.py | uichathurika/laptimize | 973a25468d5a9bac96467391595d04dcac780e7a | [
"MIT"
] | null | null | null | laptimize/solver.py | uichathurika/laptimize | 973a25468d5a9bac96467391595d04dcac780e7a | [
"MIT"
] | null | null | null | import pandas as pd
from laptimize.branch_and_bound_solver import BranchAndBoundSolver
from laptimize.lap_model import LAPModel
from laptimize.log import LogFactory
class Solver:
"""This class does solving non linear optimization problems using piecewise linear approximated programming
branch and bond technique"""
def solve(self):
"""
solve the piecewise linear lp problems and create sub lp problems using branch and bond technique
Parameter
--------
constraints : dict
have all the problem related information
ex: {'objective':{'x1':lambda x: 12*x,'x2':lambda x: 7*x - x**2,'value':None},
'constraints_1': {'x1':lambda x:-2*(x**4), 'x2':lambda x: -x ,'value':-2},
'constraints_2': {'x1':lambda x: 2*x, 'x2':lambda x: x ,'value' :3},
'capacity': {'x1':[0,2], 'x2':[0,3],'value':None}}
Returns
-------
lp_intervals : dict
Approximated solution for decision variables
ex: {'x1':2.0,'X2':1.746}
"""
try:
self.constraints = self.constraints.fillna(0)
solution_df = pd.DataFrame()
piecewise_lp, segment, curve = self.lap_model.model_solver(self.constraints, self.partition_len)
segment_0 = segment.copy()
# create a combined dictionary with including segment and curve dictionaries.
combine_segment_curve = pd.concat([segment, curve], axis=1)
lb0, ub0, k, k_lower, k_upper, segment_key = BranchAndBoundSolver(self.error).create_subproblems(
piecewise_lp, combine_segment_curve, self.constraints)
solution_df = solution_df.append({'iteration_no': 0, 'sub_problem_no': 0, 'piecewise_lp': piecewise_lp,
'segment': segment, 'curve': curve,
'lb': lb0, 'ub': ub0, 'k': k, 'k_lower': k_lower, 'k_upper': k_upper,
'branching_node': segment_key}, ignore_index=True)
global_df = pd.DataFrame()
while (len(solution_df)) > 0 and (len(solution_df) <= 100):
solution_df, global_df = self.sub_problem_solve(solution_df, combine_segment_curve, global_df)
global_solution = global_df
global_solution['solution'] = pd.Series((dict() for i in range(len(global_solution))),
index=global_solution.index)
global_solution = global_df.sort_values(['lb']).reset_index(drop=True)
self.constraints = self.constraints.drop(['capacity'], axis=1)
self.constraints = self.constraints.drop(['value'], axis=0)
for index, row in global_solution.iterrows():
lap_intervals = self.final_solution(row.piecewise_lp, segment_0)
global_solution.at[index, 'solution'] = lap_intervals
lap_intervals = global_solution.solution[0]
lap_intervals['obj_value'] = global_solution.lb[0]
return lap_intervals
except Exception as err:
self.logger.info('solve method ended with error ')
self.logger.error(str(err))
raise
def sub_problem_solve(self, solution_df, combine_segment_curve, global_df):
"""
create and solve all the sub problems for each branching nodes
Parameter
--------
solution_df: pandas data frame
includes all the details related to the sub problems solutions
constraints: pandas data frame
problem definition
combine_segment_curve: pandas data frame
all piece wise linear segments and respective function values
global_df: pandas data frame
includes all the local and global solutions
Returns
-------
solution_df: pandas data frame
updated solution_df
global_df: pandas data frame
updated global_df
"""
iteration_no = 1
for index, node in solution_df.iterrows():
if (node.ub - node.lb) > self.error:
branches = [node.k, node.k_lower, node.k_upper]
sub_problem_no = 1
for branch in branches:
if len(branch) >= 2:
piecewise_lp1, segment1, curve1 = self.lap_model.initialize(
segment=node.segment,
curve=node.curve).global_solver(node.branching_node, branch, self.constraints)
lb1, ub1, k1, k_lower1, k_upper1, segment_key1 = BranchAndBoundSolver(
self.error).create_subproblems(
piecewise_lp1, combine_segment_curve, self.constraints)
if (node.lb < ub1 <= node.ub) | (node.lb <= lb1 < node.ub):
ub1 = min(node.ub, ub1)
lb1 = max(node.lb, lb1)
solution_df = solution_df.append(
{'iteration_no': iteration_no, 'sub_problem_no': sub_problem_no,
'piecewise_lp': piecewise_lp1,
'segment': segment1, 'curve': curve1,
'lb': lb1, 'ub': ub1, 'k': k1, 'k_lower': k_lower1, 'k_upper': k_upper1,
'branching_node': segment_key1}, ignore_index=True)
sub_problem_no += 1
iteration_no += 1
else:
global_df = global_df.append(node, ignore_index=True)
solution_df.drop([index], inplace=True)
solution_df = solution_df.reset_index(drop=True)
return solution_df, global_df
def final_solution(self, piecewise_lp, segment):
"""
calculate the final solutions for the decision variables using piecewise linear variables
Parameters
----------
piecewise_lp: dict
final lp solution
constraints: pandas data frame
problem definition
segment: pandas data frame
piecewise linear segments
Return
------
lap_value: dict
linear approximated solution for decision variables
"""
try:
lap_value = dict()
for _, lp_constraint in self.constraints.iterrows():
var_value = 0
lp_allocation = piecewise_lp[lp_constraint.name]
for key in lp_allocation:
try:
var_value = var_value + segment.loc[key].segment * lp_allocation[key].value()
except:
var_value = var_value + segment.loc[key].segment * lp_allocation[key]
lap_value[lp_constraint.name] = var_value
return lap_value
except Exception as err:
self.logger.info('final_solution method ended with error ')
self.logger.error(str(err))
raise
| 46.7375 | 115 | 0.569404 | import pandas as pd
from laptimize.branch_and_bound_solver import BranchAndBoundSolver
from laptimize.lap_model import LAPModel
from laptimize.log import LogFactory
class Solver:
"""This class does solving non linear optimization problems using piecewise linear approximated programming
branch and bond technique"""
def __init__(self, constraints, partition_len=0.25, error=0.001):
self.logger = LogFactory.get_logger()
self.constraints = pd.DataFrame(constraints)
self.error = error
self.partition_len = partition_len
self.lap_model = LAPModel()
self.branch_and_bound_solver = BranchAndBoundSolver(self.error)
def solve(self):
"""
solve the piecewise linear lp problems and create sub lp problems using branch and bond technique
Parameter
--------
constraints : dict
have all the problem related information
ex: {'objective':{'x1':lambda x: 12*x,'x2':lambda x: 7*x - x**2,'value':None},
'constraints_1': {'x1':lambda x:-2*(x**4), 'x2':lambda x: -x ,'value':-2},
'constraints_2': {'x1':lambda x: 2*x, 'x2':lambda x: x ,'value' :3},
'capacity': {'x1':[0,2], 'x2':[0,3],'value':None}}
Returns
-------
lp_intervals : dict
Approximated solution for decision variables
ex: {'x1':2.0,'X2':1.746}
"""
try:
self.constraints = self.constraints.fillna(0)
solution_df = pd.DataFrame()
piecewise_lp, segment, curve = self.lap_model.model_solver(self.constraints, self.partition_len)
segment_0 = segment.copy()
# create a combined dictionary with including segment and curve dictionaries.
combine_segment_curve = pd.concat([segment, curve], axis=1)
lb0, ub0, k, k_lower, k_upper, segment_key = BranchAndBoundSolver(self.error).create_subproblems(
piecewise_lp, combine_segment_curve, self.constraints)
solution_df = solution_df.append({'iteration_no': 0, 'sub_problem_no': 0, 'piecewise_lp': piecewise_lp,
'segment': segment, 'curve': curve,
'lb': lb0, 'ub': ub0, 'k': k, 'k_lower': k_lower, 'k_upper': k_upper,
'branching_node': segment_key}, ignore_index=True)
global_df = pd.DataFrame()
while (len(solution_df)) > 0 and (len(solution_df) <= 100):
solution_df, global_df = self.sub_problem_solve(solution_df, combine_segment_curve, global_df)
global_solution = global_df
global_solution['solution'] = pd.Series((dict() for i in range(len(global_solution))),
index=global_solution.index)
global_solution = global_df.sort_values(['lb']).reset_index(drop=True)
self.constraints = self.constraints.drop(['capacity'], axis=1)
self.constraints = self.constraints.drop(['value'], axis=0)
for index, row in global_solution.iterrows():
lap_intervals = self.final_solution(row.piecewise_lp, segment_0)
global_solution.at[index, 'solution'] = lap_intervals
lap_intervals = global_solution.solution[0]
lap_intervals['obj_value'] = global_solution.lb[0]
return lap_intervals
except Exception as err:
self.logger.info('solve method ended with error ')
self.logger.error(str(err))
raise
def sub_problem_solve(self, solution_df, combine_segment_curve, global_df):
"""
create and solve all the sub problems for each branching nodes
Parameter
--------
solution_df: pandas data frame
includes all the details related to the sub problems solutions
constraints: pandas data frame
problem definition
combine_segment_curve: pandas data frame
all piece wise linear segments and respective function values
global_df: pandas data frame
includes all the local and global solutions
Returns
-------
solution_df: pandas data frame
updated solution_df
global_df: pandas data frame
updated global_df
"""
iteration_no = 1
for index, node in solution_df.iterrows():
if (node.ub - node.lb) > self.error:
branches = [node.k, node.k_lower, node.k_upper]
sub_problem_no = 1
for branch in branches:
if len(branch) >= 2:
piecewise_lp1, segment1, curve1 = self.lap_model.initialize(
segment=node.segment,
curve=node.curve).global_solver(node.branching_node, branch, self.constraints)
lb1, ub1, k1, k_lower1, k_upper1, segment_key1 = BranchAndBoundSolver(
self.error).create_subproblems(
piecewise_lp1, combine_segment_curve, self.constraints)
if (node.lb < ub1 <= node.ub) | (node.lb <= lb1 < node.ub):
ub1 = min(node.ub, ub1)
lb1 = max(node.lb, lb1)
solution_df = solution_df.append(
{'iteration_no': iteration_no, 'sub_problem_no': sub_problem_no,
'piecewise_lp': piecewise_lp1,
'segment': segment1, 'curve': curve1,
'lb': lb1, 'ub': ub1, 'k': k1, 'k_lower': k_lower1, 'k_upper': k_upper1,
'branching_node': segment_key1}, ignore_index=True)
sub_problem_no += 1
iteration_no += 1
else:
global_df = global_df.append(node, ignore_index=True)
solution_df.drop([index], inplace=True)
solution_df = solution_df.reset_index(drop=True)
return solution_df, global_df
def final_solution(self, piecewise_lp, segment):
"""
calculate the final solutions for the decision variables using piecewise linear variables
Parameters
----------
piecewise_lp: dict
final lp solution
constraints: pandas data frame
problem definition
segment: pandas data frame
piecewise linear segments
Return
------
lap_value: dict
linear approximated solution for decision variables
"""
try:
lap_value = dict()
for _, lp_constraint in self.constraints.iterrows():
var_value = 0
lp_allocation = piecewise_lp[lp_constraint.name]
for key in lp_allocation:
try:
var_value = var_value + segment.loc[key].segment * lp_allocation[key].value()
except:
var_value = var_value + segment.loc[key].segment * lp_allocation[key]
lap_value[lp_constraint.name] = var_value
return lap_value
except Exception as err:
self.logger.info('final_solution method ended with error ')
self.logger.error(str(err))
raise
| 321 | 0 | 27 |
bf0d889e873b1b124dfb39a1fc0a36f29e4828ec | 1,717 | py | Python | tests/parsers/transform/test_base.py | motleystate/moonstone | 37c38fabf361722f7002626ef13c68c443ace4ac | [
"MIT"
] | null | null | null | tests/parsers/transform/test_base.py | motleystate/moonstone | 37c38fabf361722f7002626ef13c68c443ace4ac | [
"MIT"
] | 84 | 2020-07-27T13:01:12.000Z | 2022-03-16T17:10:23.000Z | tests/parsers/transform/test_base.py | motleystate/moonstone | 37c38fabf361722f7002626ef13c68c443ace4ac | [
"MIT"
] | null | null | null | from unittest import TestCase
import pandas as pd
from moonstone.parsers.transform.base import TransformBase
| 30.122807 | 79 | 0.540478 | from unittest import TestCase
import pandas as pd
from moonstone.parsers.transform.base import TransformBase
class TestTransformBase(TestCase):
def test_historize(self):
df = pd.DataFrame(
[
[1, 2, 3],
[4, 5, 6]
],
columns=['a', 'b', 'c']
)
action = 'an_action'
arguments = {'arg1': 1, 'arg2': 2}
col_name = 'a'
expected_history = [
[action, {'col_name': col_name, **arguments}]
]
transform_base = TransformBase(df)
pd.testing.assert_frame_equal(transform_base.raw_df, transform_base.df)
self.assertFalse(transform_base.history)
transform_base.historize(action, col_name, arguments)
self.assertTrue(transform_base.history)
self.assertListEqual(transform_base.history, expected_history)
def test_rename(self):
df = pd.DataFrame(
[
[1, 2, 3],
[4, 5, 6]
],
columns=['a', 'b', 'c']
)
expected_df = pd.DataFrame(
[
[1, 2, 3],
[4, 5, 6]
],
columns=['super a', 'b', 'c']
)
action = 'rename'
col_name = 'a'
arguments = {'new_name': 'super a'}
expected_history = [
[action, {'col_name': col_name, **arguments}]
]
transform_base = TransformBase(df)
getattr(transform_base, action)(col_name, **arguments)
self.assertTrue(transform_base.history)
pd.testing.assert_frame_equal(transform_base.df, expected_df)
self.assertListEqual(transform_base.history, expected_history)
| 1,515 | 13 | 77 |
a5f537bd9bf4d86e55c1520a890750f0f74b0e0d | 7,534 | py | Python | src/rubrix/server/tasks/commons/dao/dao.py | drahnreb/rubrix | 340e545baf4d65a0d94e3c671ad6c93ff1d59700 | [
"Apache-2.0"
] | null | null | null | src/rubrix/server/tasks/commons/dao/dao.py | drahnreb/rubrix | 340e545baf4d65a0d94e3c671ad6c93ff1d59700 | [
"Apache-2.0"
] | null | null | null | src/rubrix/server/tasks/commons/dao/dao.py | drahnreb/rubrix | 340e545baf4d65a0d94e3c671ad6c93ff1d59700 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, Iterable, List, Optional
from fastapi import Depends
from rubrix.server.commons.es_wrapper import ElasticsearchWrapper, create_es_wrapper
from rubrix.server.commons.helpers import unflatten_dict
from rubrix.server.commons.settings import settings
from rubrix.server.datasets.dao import (
DATASETS_RECORDS_INDEX_NAME,
dataset_records_index,
)
from rubrix.server.datasets.model import DatasetDB
from rubrix.server.tasks.commons.dao.model import RecordSearch, RecordSearchResults
from rubrix.server.tasks.commons.es_helpers import (
EsRecordDataFieldNames,
aggregations,
parse_aggregations,
)
from stopwordsiso import stopwords
SUPPORTED_LANGUAGES = ["es", "en", "fr", "de"]
DATASETS_RECORDS_INDEX_TEMPLATE = {
"settings": {
"number_of_shards": settings.es_records_index_shards,
"number_of_replicas": settings.es_records_index_replicas,
"analysis": {
"analyzer": {
"multilingual_stop_analyzer": {
"type": "stop",
"stopwords": [w for w in stopwords(SUPPORTED_LANGUAGES)],
}
}
},
},
"index_patterns": [DATASETS_RECORDS_INDEX_NAME.format("*")],
"mappings": {
"properties": {
"event_timestamp": {"type": "date"},
EsRecordDataFieldNames.words: {
"type": "text",
"fielddata": True,
"analyzer": "multilingual_stop_analyzer",
},
# TODO: Not here since is task dependant
"tokens": {"type": "text"},
},
"dynamic_templates": [
# TODO: Not here since is task dependant
{"inputs": {"path_match": "inputs.*", "mapping": {"type": "text"}}},
{
"status": {
"path_match": "*.status",
"mapping": {
"type": "keyword",
},
}
},
{
"predicted": {
"path_match": "*.predicted",
"mapping": {
"type": "keyword",
},
}
},
{
"strings": {
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 128, # Avoid bulk errors with too long keywords
# Some elasticsearch versions includes automatically raw fields, so
# we must limit those fields too
"fields": {"raw": {"type": "keyword", "ignore_above": 128}},
},
}
},
],
},
}
class DatasetRecordsDAO:
"""Datasets records DAO"""
def init(self):
"""Initializes dataset records dao. Used on app startup"""
self._es.create_index_template(
name=DATASETS_RECORDS_INDEX_NAME,
template=DATASETS_RECORDS_INDEX_TEMPLATE,
force_recreate=True,
)
def add_records(
self,
dataset: DatasetDB,
records: List[Dict[str, Any]],
) -> int:
"""
Add records to dataset
Parameters
----------
dataset:
The dataset
records:
The list of records
Returns
-------
The number of failed records
"""
return self._es.add_documents(
index=dataset_records_index(dataset.id),
documents=records,
doc_id=lambda r: r.get("id"),
)
def search_records(
self,
dataset: DatasetDB,
search: Optional[RecordSearch] = None,
size: int = 100,
record_from: int = 0,
) -> RecordSearchResults:
"""
SearchRequest records under a dataset given a search parameters.
Parameters
----------
dataset:
The dataset
search:
The search params
size:
Number of records to retrieve (for pagination)
record_from:
Record from witch retrieve records (for pagination)
Returns
-------
The search result
"""
search = search or RecordSearch()
records_index = dataset_records_index(dataset.id)
metadata_fields = self._es.get_field_mapping(
index=records_index, field_name="metadata.*"
)
search_aggregations = (
{
**(search.aggregations or {}),
**aggregations.predicted_as(),
**aggregations.predicted_by(),
**aggregations.annotated_as(),
**aggregations.annotated_by(),
**aggregations.status(),
**aggregations.predicted(),
**aggregations.words_cloud(),
**aggregations.score(), # TODO: calculate score directly from dataset
**aggregations.custom_fields(metadata_fields),
}
if record_from == 0
else None
)
if record_from > 0:
search_aggregations = None
es_query = {
"size": size,
"from": record_from,
"query": search.query or {"match_all": {}},
"sort": [{"_id": {"order": "asc"}}], # TODO: Sort by event timestamp?
"aggs": search_aggregations or {},
}
results = self._es.search(index=records_index, query=es_query, size=size)
hits = results["hits"]
total = hits["total"]
docs = hits["hits"]
search_aggregations = results.get("aggregations", {})
result = RecordSearchResults(
total=total,
records=[doc["_source"] for doc in docs],
)
if search_aggregations:
parsed_aggregations = parse_aggregations(search_aggregations)
parsed_aggregations = unflatten_dict(
parsed_aggregations, stop_keys=["metadata"]
)
result.words = parsed_aggregations.pop("words")
result.metadata = parsed_aggregations.pop("metadata", {})
result.aggregations = parsed_aggregations
return result
def scan_dataset(
self,
dataset: DatasetDB,
search: Optional[RecordSearch] = None,
) -> Iterable[Dict[str, Any]]:
"""
Iterates over a dataset records
Parameters
----------
dataset:
The dataset
search:
The search parameters. Optional
Returns
-------
An iterable over found dataset records
"""
search = search or RecordSearch()
es_query = {
"query": search.query,
}
docs = self._es.list_documents(
dataset_records_index(dataset.id), query=es_query
)
for doc in docs:
yield doc["_source"]
_instance: Optional[DatasetRecordsDAO] = None
def dataset_records_dao(
es: ElasticsearchWrapper = Depends(create_es_wrapper),
) -> DatasetRecordsDAO:
"""
Creates a dataset records dao instance
Parameters
----------
es:
The elasticserach wrapper dependency
"""
global _instance
if not _instance:
_instance = DatasetRecordsDAO(es)
return _instance
| 29.896825 | 91 | 0.530794 | from typing import Any, Dict, Iterable, List, Optional
from fastapi import Depends
from rubrix.server.commons.es_wrapper import ElasticsearchWrapper, create_es_wrapper
from rubrix.server.commons.helpers import unflatten_dict
from rubrix.server.commons.settings import settings
from rubrix.server.datasets.dao import (
DATASETS_RECORDS_INDEX_NAME,
dataset_records_index,
)
from rubrix.server.datasets.model import DatasetDB
from rubrix.server.tasks.commons.dao.model import RecordSearch, RecordSearchResults
from rubrix.server.tasks.commons.es_helpers import (
EsRecordDataFieldNames,
aggregations,
parse_aggregations,
)
from stopwordsiso import stopwords
SUPPORTED_LANGUAGES = ["es", "en", "fr", "de"]
DATASETS_RECORDS_INDEX_TEMPLATE = {
"settings": {
"number_of_shards": settings.es_records_index_shards,
"number_of_replicas": settings.es_records_index_replicas,
"analysis": {
"analyzer": {
"multilingual_stop_analyzer": {
"type": "stop",
"stopwords": [w for w in stopwords(SUPPORTED_LANGUAGES)],
}
}
},
},
"index_patterns": [DATASETS_RECORDS_INDEX_NAME.format("*")],
"mappings": {
"properties": {
"event_timestamp": {"type": "date"},
EsRecordDataFieldNames.words: {
"type": "text",
"fielddata": True,
"analyzer": "multilingual_stop_analyzer",
},
# TODO: Not here since is task dependant
"tokens": {"type": "text"},
},
"dynamic_templates": [
# TODO: Not here since is task dependant
{"inputs": {"path_match": "inputs.*", "mapping": {"type": "text"}}},
{
"status": {
"path_match": "*.status",
"mapping": {
"type": "keyword",
},
}
},
{
"predicted": {
"path_match": "*.predicted",
"mapping": {
"type": "keyword",
},
}
},
{
"strings": {
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 128, # Avoid bulk errors with too long keywords
# Some elasticsearch versions includes automatically raw fields, so
# we must limit those fields too
"fields": {"raw": {"type": "keyword", "ignore_above": 128}},
},
}
},
],
},
}
class DatasetRecordsDAO:
"""Datasets records DAO"""
def __init__(self, es: ElasticsearchWrapper):
self._es = es
self.init()
def init(self):
"""Initializes dataset records dao. Used on app startup"""
self._es.create_index_template(
name=DATASETS_RECORDS_INDEX_NAME,
template=DATASETS_RECORDS_INDEX_TEMPLATE,
force_recreate=True,
)
def add_records(
self,
dataset: DatasetDB,
records: List[Dict[str, Any]],
) -> int:
"""
Add records to dataset
Parameters
----------
dataset:
The dataset
records:
The list of records
Returns
-------
The number of failed records
"""
return self._es.add_documents(
index=dataset_records_index(dataset.id),
documents=records,
doc_id=lambda r: r.get("id"),
)
def search_records(
self,
dataset: DatasetDB,
search: Optional[RecordSearch] = None,
size: int = 100,
record_from: int = 0,
) -> RecordSearchResults:
"""
SearchRequest records under a dataset given a search parameters.
Parameters
----------
dataset:
The dataset
search:
The search params
size:
Number of records to retrieve (for pagination)
record_from:
Record from witch retrieve records (for pagination)
Returns
-------
The search result
"""
search = search or RecordSearch()
records_index = dataset_records_index(dataset.id)
metadata_fields = self._es.get_field_mapping(
index=records_index, field_name="metadata.*"
)
search_aggregations = (
{
**(search.aggregations or {}),
**aggregations.predicted_as(),
**aggregations.predicted_by(),
**aggregations.annotated_as(),
**aggregations.annotated_by(),
**aggregations.status(),
**aggregations.predicted(),
**aggregations.words_cloud(),
**aggregations.score(), # TODO: calculate score directly from dataset
**aggregations.custom_fields(metadata_fields),
}
if record_from == 0
else None
)
if record_from > 0:
search_aggregations = None
es_query = {
"size": size,
"from": record_from,
"query": search.query or {"match_all": {}},
"sort": [{"_id": {"order": "asc"}}], # TODO: Sort by event timestamp?
"aggs": search_aggregations or {},
}
results = self._es.search(index=records_index, query=es_query, size=size)
hits = results["hits"]
total = hits["total"]
docs = hits["hits"]
search_aggregations = results.get("aggregations", {})
result = RecordSearchResults(
total=total,
records=[doc["_source"] for doc in docs],
)
if search_aggregations:
parsed_aggregations = parse_aggregations(search_aggregations)
parsed_aggregations = unflatten_dict(
parsed_aggregations, stop_keys=["metadata"]
)
result.words = parsed_aggregations.pop("words")
result.metadata = parsed_aggregations.pop("metadata", {})
result.aggregations = parsed_aggregations
return result
def scan_dataset(
self,
dataset: DatasetDB,
search: Optional[RecordSearch] = None,
) -> Iterable[Dict[str, Any]]:
"""
Iterates over a dataset records
Parameters
----------
dataset:
The dataset
search:
The search parameters. Optional
Returns
-------
An iterable over found dataset records
"""
search = search or RecordSearch()
es_query = {
"query": search.query,
}
docs = self._es.list_documents(
dataset_records_index(dataset.id), query=es_query
)
for doc in docs:
yield doc["_source"]
_instance: Optional[DatasetRecordsDAO] = None
def dataset_records_dao(
es: ElasticsearchWrapper = Depends(create_es_wrapper),
) -> DatasetRecordsDAO:
"""
Creates a dataset records dao instance
Parameters
----------
es:
The elasticserach wrapper dependency
"""
global _instance
if not _instance:
_instance = DatasetRecordsDAO(es)
return _instance
| 66 | 0 | 27 |
37688bc24ff106b9acbf4bcfc916309b0385f263 | 20,309 | py | Python | torchbenchmark/models/fastNLP/test/core/test_trainer.py | Chillee/benchmark | 91e1b2871327e44b9b7d24d173ca93720fb6565b | [
"BSD-3-Clause"
] | 1 | 2021-07-30T08:47:09.000Z | 2021-07-30T08:47:09.000Z | torchbenchmark/models/fastNLP/test/core/test_trainer.py | Chillee/benchmark | 91e1b2871327e44b9b7d24d173ca93720fb6565b | [
"BSD-3-Clause"
] | 1 | 2021-04-17T11:24:26.000Z | 2021-04-17T11:28:49.000Z | torchbenchmark/models/fastNLP/test/core/test_trainer.py | Chillee/benchmark | 91e1b2871327e44b9b7d24d173ca93720fb6565b | [
"BSD-3-Clause"
] | null | null | null | import time
import unittest
import os
import numpy as np
import torch.nn.functional as F
from torch import nn
import torch
from fastNLP import DataSet
from fastNLP import Instance
from fastNLP import BCELoss
from fastNLP import CrossEntropyLoss
from fastNLP import AccuracyMetric
from fastNLP import SGD
from fastNLP import Trainer
from fastNLP.models.base_model import NaiveClassifier
from fastNLP import TorchLoaderIter
| 35.818342 | 120 | 0.50746 | import time
import unittest
import os
import numpy as np
import torch.nn.functional as F
from torch import nn
import torch
from fastNLP import DataSet
from fastNLP import Instance
from fastNLP import BCELoss
from fastNLP import CrossEntropyLoss
from fastNLP import AccuracyMetric
from fastNLP import SGD
from fastNLP import Trainer
from fastNLP.models.base_model import NaiveClassifier
from fastNLP import TorchLoaderIter
def prepare_fake_dataset():
mean = np.array([-3, -3])
cov = np.array([[1, 0], [0, 1]])
class_A = np.random.multivariate_normal(mean, cov, size=(1000,))
mean = np.array([3, 3])
cov = np.array([[1, 0], [0, 1]])
class_B = np.random.multivariate_normal(mean, cov, size=(1000,))
data_set = DataSet([Instance(x=[float(item[0]), float(item[1])], y=[0.0]) for item in class_A] +
[Instance(x=[float(item[0]), float(item[1])], y=[1.0]) for item in class_B])
return data_set
def prepare_fake_dataset2(*args, size=100):
ys = np.random.randint(4, size=100, dtype=np.int64)
data = {'y': ys}
for arg in args:
data[arg] = np.random.randn(size, 5)
return DataSet(data=data)
class TrainerTestGround(unittest.TestCase):
def test_case(self):
data_set = prepare_fake_dataset()
data_set.set_input("x", flag=True)
data_set.set_target("y", flag=True)
train_set, dev_set = data_set.split(0.3)
model = NaiveClassifier(2, 1)
trainer = Trainer(train_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
batch_size=32, n_epochs=10, print_every=50, dev_data=dev_set,
metrics=AccuracyMetric(pred="predict", target="y"), validate_every=-1, save_path=None,
use_tqdm=True, check_code_level=2)
trainer.train()
"""
# 应该正确运行
"""
def test_save_path(self):
data_set = prepare_fake_dataset()
data_set.set_input("x", flag=True)
data_set.set_target("y", flag=True)
train_set, dev_set = data_set.split(0.3)
model = NaiveClassifier(2, 1)
save_path = 'test_save_models'
trainer = Trainer(train_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
batch_size=32, n_epochs=10, print_every=50, dev_data=dev_set,
metrics=AccuracyMetric(pred="predict", target="y"), validate_every=-1, save_path=save_path,
use_tqdm=True, check_code_level=2)
trainer.train()
import os
if os.path.exists(save_path):
import shutil
shutil.rmtree(save_path)
def test_trainer_suggestion1(self):
# 检查报错提示能否正确提醒用户。
# 这里没有传入forward需要的数据。需要trainer提醒用户如何设置。
dataset = prepare_fake_dataset2('x')
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 4)
def forward(self, x1, x2, y):
x1 = self.fc(x1)
x2 = self.fc(x2)
x = x1 + x2
loss = F.cross_entropy(x, y)
return {'loss': loss}
model = Model()
with self.assertRaises(RuntimeError):
trainer = Trainer(train_data=dataset, model=model)
"""
# 应该获取到的报错提示
NameError:
The following problems occurred when calling Model.forward(self, x1, x2, y)
missing param: ['y', 'x1', 'x2']
Suggestion: (1). You might need to set ['y'] as input.
(2). You need to provide ['x1', 'x2'] in DataSet and set it as input.
"""
def test_trainer_suggestion2(self):
# 检查报错提示能否正确提醒用户
# 这里传入forward需要的数据,看是否可以运行
dataset = prepare_fake_dataset2('x1', 'x2')
dataset.set_input('x1', 'x2', 'y', flag=True)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 4)
def forward(self, x1, x2, y):
x1 = self.fc(x1)
x2 = self.fc(x2)
x = x1 + x2
loss = F.cross_entropy(x, y)
return {'loss': loss}
model = Model()
trainer = Trainer(train_data=dataset, model=model, print_every=2, use_tqdm=False)
trainer.train()
"""
# 应该正确运行
"""
def test_trainer_suggestion3(self):
# 检查报错提示能否正确提醒用户
# 这里传入forward需要的数据,但是forward没有返回loss这个key
dataset = prepare_fake_dataset2('x1', 'x2')
dataset.set_input('x1', 'x2', 'y', flag=True)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 4)
def forward(self, x1, x2, y):
x1 = self.fc(x1)
x2 = self.fc(x2)
x = x1 + x2
loss = F.cross_entropy(x, y)
return {'wrong_loss_key': loss}
model = Model()
with self.assertRaises(NameError):
trainer = Trainer(train_data=dataset, model=model, print_every=2, use_tqdm=False)
trainer.train()
def test_trainer_suggestion4(self):
# 检查报错提示能否正确提醒用户
# 这里传入forward需要的数据,是否可以正确提示unused
dataset = prepare_fake_dataset2('x1', 'x2')
dataset.set_input('x1', 'x2', 'y', flag=True)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 4)
def forward(self, x1, x2, y):
x1 = self.fc(x1)
x2 = self.fc(x2)
x = x1 + x2
loss = F.cross_entropy(x, y)
return {'losses': loss}
model = Model()
with self.assertRaises(NameError):
trainer = Trainer(train_data=dataset, model=model, print_every=2, use_tqdm=False)
def test_trainer_suggestion5(self):
# 检查报错提示能否正确提醒用户
# 这里传入多余参数,让其duplicate, 但这里因为y不会被调用,所以其实不会报错
dataset = prepare_fake_dataset2('x1', 'x_unused')
dataset.rename_field('x_unused', 'x2')
dataset.set_input('x1', 'x2', 'y')
dataset.set_target('y')
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 4)
def forward(self, x1, x2, y):
x1 = self.fc(x1)
x2 = self.fc(x2)
x = x1 + x2
loss = F.cross_entropy(x, y)
return {'loss': loss}
model = Model()
trainer = Trainer(train_data=dataset, model=model, print_every=2, use_tqdm=False)
def test_trainer_suggestion6(self):
# 检查报错提示能否正确提醒用户
# 这里传入多余参数,让其duplicate
dataset = prepare_fake_dataset2('x1', 'x_unused')
dataset.rename_field('x_unused', 'x2')
dataset.set_input('x1', 'x2')
dataset.set_target('y', 'x1')
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 4)
def forward(self, x1, x2):
x1 = self.fc(x1)
x2 = self.fc(x2)
x = x1 + x2
time.sleep(0.1)
# loss = F.cross_entropy(x, y)
return {'preds': x}
model = Model()
with self.assertRaises(NameError):
trainer = Trainer(train_data=dataset, model=model, loss=CrossEntropyLoss(), print_every=2, dev_data=dataset,
metrics=AccuracyMetric(), use_tqdm=False)
@unittest.skipIf('TRAVIS' in os.environ, "Need to be tested in hosts with more than 1 gpus")
def test_trainer_data_parallel(self):
if torch.cuda.device_count()>1:
from fastNLP import AccuracyMetric
dataset = prepare_fake_dataset2('x1', 'x2')
dataset.set_input('x1', 'x2', 'y', flag=True)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 4)
def forward(self, x1, x2, y=None):
x1 = self.fc(x1)
x2 = self.fc(x2)
x = x1 + x2
if self.training:
loss = F.cross_entropy(x, y)
return {'loss': loss}
else:
return {'pred':x, 'target':y}
model = Model()
trainer = Trainer(train_data=dataset, model=model, print_every=2, use_tqdm=False,
dev_data=dataset, metrics=AccuracyMetric(), device=[0, 1])
trainer.train(load_best_model=False)
def test_udf_dataiter(self):
import random
import torch
class UdfDataSet:
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
x = [random.random() for _ in range(3)]
y = random.random()
return x,y
def __len__(self):
return self.num_samples
def collate_fn(data_list):
# [(x1,y1), (x2,y2), ...], 这里的输入实际上是将UdfDataSet的__getitem__输入结合为list
xs, ys = [], []
for l in data_list:
x, y = l
xs.append(x)
ys.append(y)
x,y = torch.FloatTensor(xs), torch.FloatTensor(ys)
return {'x':x, 'y':y}, {'y':y}
dataset = UdfDataSet(10)
dataset = TorchLoaderIter(dataset, collate_fn=collate_fn)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(3, 1)
def forward(self, x, y):
return {'loss':torch.pow(self.fc(x).squeeze(-1)-y, 2).sum()}
def predict(self, x):
return {'pred':self.fc(x).squeeze(0)}
model = Model()
trainer = Trainer(train_data=dataset, model=model, loss=None, print_every=2, dev_data=dataset,
metrics=AccuracyMetric(target='y'), use_tqdm=False)
trainer.train(load_best_model=False)
def test_batch_sampler_dataiter(self):
import random
import torch
class BatchSampler:
def __init__(self, dataset):
self.num_samples = len(dataset)
def __iter__(self):
index = 0
indexes = list(range(self.num_samples))
np.random.shuffle(indexes)
start_idx = 0
while index < self.num_samples:
if start_idx == 0:
end_index = self.num_samples//2
else:
end_index = self.num_samples
yield indexes[start_idx:end_index]
index = end_index
start_idx = end_index
def __len__(self):
return 2
class UdfDataSet:
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
x = [random.random() for _ in range(3)]
y = random.random()
return x,y
def __len__(self):
return self.num_samples
def collate_fn(data_list):
# [(x1,y1), (x2,y2), ...], 这里的输入实际上是将UdfDataSet的__getitem__输入结合为list
xs, ys = [], []
for l in data_list:
x, y = l
xs.append(x)
ys.append(y)
x,y = torch.FloatTensor(xs), torch.FloatTensor(ys)
return {'x':x, 'y':y}, {'y':y}
dataset = UdfDataSet(11)
batch_sampler = BatchSampler(dataset)
dataset = TorchLoaderIter(dataset, collate_fn=collate_fn, batch_sampler=batch_sampler)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(3, 1)
def forward(self, x, y):
return {'loss':torch.pow(self.fc(x).squeeze(-1)-y, 2).sum()}
def predict(self, x):
return {'pred':self.fc(x).squeeze(-1)}
model = Model()
trainer = Trainer(train_data=dataset, model=model, loss=None, print_every=2, dev_data=dataset,
metrics=AccuracyMetric(target='y'), use_tqdm=False)
trainer.train(load_best_model=False)
def test_onthefly_iter(self):
import tempfile
import random
import torch
tmp_file_handler, tmp_file_path = tempfile.mkstemp(text=True)
try:
num_samples = 10
data = []
for _ in range(num_samples):
x, y = [random.random() for _ in range(3)], random.random()
data.append(x + [y])
with open(tmp_file_path, 'w') as f:
for d in data:
f.write(' '.join(map(str, d)) + '\n')
class FileDataSet:
def __init__(self, tmp_file):
num_samples = 0
line_pos = [0] # 对应idx是某一行对应的位置
self.tmp_file_handler = open(tmp_file, 'r', encoding='utf-8')
line = self.tmp_file_handler.readline()
while line:
if line.strip():
num_samples += 1
line_pos.append(self.tmp_file_handler.tell())
line = self.tmp_file_handler.readline()
self.tmp_file_handler.seek(0)
self.num_samples = num_samples
self.line_pos = line_pos
def __getitem__(self, idx):
line_start, line_end = self.line_pos[idx], self.line_pos[idx + 1]
self.tmp_file_handler.seek(line_start)
line = self.tmp_file_handler.read(line_end - line_start).strip()
values = list(map(float, line.split()))
gold_d = data[idx]
assert all([g==v for g,v in zip(gold_d, values)]), "Should have the same data"
x, y = values[:3], values[-1]
return x, y
def __len__(self):
return self.num_samples
def collact_fn(data_list):
# [(x1,y1), (x2,y2), ...], 这里的输入实际上是将UdfDataSet的__getitem__输入结合为list
xs, ys = [], []
for l in data_list:
x, y = l
xs.append(x)
ys.append(y)
x, y = torch.FloatTensor(xs), torch.FloatTensor(ys)
return {'x': x, 'y': y}, {'y': y}
dataset = FileDataSet(tmp_file_path)
dataset = TorchLoaderIter(dataset, collate_fn=collact_fn)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(3, 1)
def forward(self, x, y):
return {'loss': torch.pow(self.fc(x).squeeze(-1) - y, 2).sum()}
def predict(self, x):
return {'pred': self.fc(x).squeeze(-1)}
model = Model()
trainer = Trainer(train_data=dataset, model=model, loss=None, print_every=2, dev_data=dataset,
metrics=AccuracyMetric(target='y'), use_tqdm=False, n_epochs=2)
trainer.train(load_best_model=False)
finally:
import os
if os.path.exists(tmp_file_path):
os.remove(tmp_file_path)
def test_collecct_fn(self):
dataset = prepare_fake_dataset2('x1', 'x2')
dataset.set_input('x1', 'x2')
dataset.set_target('y', 'x1')
import torch
def fn(ins_list):
x = []
for ind, ins in ins_list:
x.append(ins['x1']+ins['x2'])
x = torch.FloatTensor(x)
return {'x':x}, {}
dataset.add_collate_fn(fn)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 4)
def forward(self, x1, x2, x):
x1 = self.fc(x1)
x2 = self.fc(x2)
x = self.fc(x)
sum_x = x1 + x2 + x
time.sleep(0.1)
# loss = F.cross_entropy(x, y)
return {'pred': sum_x}
model = Model()
trainer = Trainer(train_data=dataset, model=model, loss=CrossEntropyLoss(target='y'), print_every=2,
dev_data=dataset, metrics=AccuracyMetric(target='y'), use_tqdm=False)
trainer.train()
def test_collate_fn2(self):
"""测试能否实现batch_x, batch_y"""
dataset = prepare_fake_dataset2('x1', 'x2')
dataset.set_input('x1', 'x2')
dataset.set_target('y', 'x1')
import torch
def fn(ins_list):
x = []
for ind, ins in ins_list:
x.append(ins['x1']+ins['x2'])
x = torch.FloatTensor(x)
return {'x':x}, {'target':x[:, :4].argmax(dim=-1)}
dataset.add_collate_fn(fn)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 4)
def forward(self, x1, x2, x):
x1 = self.fc(x1)
x2 = self.fc(x2)
x = self.fc(x)
sum_x = x1 + x2 + x
time.sleep(0.1)
# loss = F.cross_entropy(x, y)
return {'pred': sum_x}
model = Model()
trainer = Trainer(train_data=dataset, model=model, loss=CrossEntropyLoss(), print_every=2,
dev_data=dataset, metrics=AccuracyMetric(), use_tqdm=False)
trainer.train()
def test_collate_fn3(self):
"""
测试应该会覆盖
:return:
"""
dataset = prepare_fake_dataset2('x1', 'x2')
dataset.set_input('x1', 'x2')
dataset.set_target('y')
import torch
def fn(ins_list):
x = []
for ind, ins in ins_list:
x.append(ins['x1']+ins['x2'])
x = torch.FloatTensor(x)
return {'x1':torch.zeros_like(x)}, {'target':torch.zeros(x.size(0)).long(), 'y':x}
dataset.add_collate_fn(fn)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 1, bias=False)
def forward(self, x1):
x1 = self.fc(x1)
assert x1.sum()==0, "Should be replaced to one"
# loss = F.cross_entropy(x, y)
return {'pred': x1}
model = Model()
trainer = Trainer(train_data=dataset, model=model, loss=CrossEntropyLoss(), print_every=2,
dev_data=dataset, metrics=AccuracyMetric(), use_tqdm=False, n_epochs=1)
best_metric = trainer.train()['best_eval']['AccuracyMetric']['acc']
self.assertTrue(best_metric==1)
"""
def test_trainer_multiprocess(self):
dataset = prepare_fake_dataset2('x1', 'x2')
dataset.set_input('x1', 'x2', 'y', flag=True)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(5, 4)
def forward(self, x1, x2, y):
x1 = self.fc(x1)
x2 = self.fc(x2)
x = x1 + x2
loss = F.cross_entropy(x, y)
return {'loss': loss}
model = Model()
trainer = Trainer(
train_data=dataset,
model=model,
use_tqdm=True,
print_every=2,
num_workers=2,
pin_memory=False,
timeout=0,
)
trainer.train()
"""
| 17,728 | 2,677 | 69 |
cfdc93d32114242e90d406587520d510b22c1d61 | 4,233 | py | Python | utils/annotations.py | shreyas/swalpa | 200c23a04e0a6a63b805705c9af15cec708b895a | [
"MIT"
] | null | null | null | utils/annotations.py | shreyas/swalpa | 200c23a04e0a6a63b805705c9af15cec708b895a | [
"MIT"
] | null | null | null | utils/annotations.py | shreyas/swalpa | 200c23a04e0a6a63b805705c9af15cec708b895a | [
"MIT"
] | null | null | null | # Copyright (c) 2016 Shreyas Kulkarni (shyran@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import inspect
def virtual(func):
"""
annotation to set a method for override,
any method, that doesnt have this annotation, cannot be overridden with @overrides(cls) annotation
"""
# hack to throw exception if the virtual function is not inside a class
# ref: http://stackoverflow.com/questions/8793233/python-can-a-decorator-determine-if-a-function-is-being-defined-inside-a-class
frames = inspect.stack()
if not (len(frames) > 2 and frames[2][4][0].strip().startswith('class ')):
raise OverrideError("function '%s' should be inside a class to be virtual" % func.__name__);
func.func_doc = "@virtual available for override\n" + (func.func_doc or '')
func.__virtual__ = True
return func
##### TEST CODE #######
# class myclass(object):
# def __init__(self):
# pass
#
# @virtual
# def add(self):
# pass
#
# @virtual
# def delete(self):
# pass
#
# def edit(self):
# pass
#
#
# class anotherclass(myclass):
# @overrides(myclass)
# def delete(self):
# pass
#
# @overrides(myclass)
# def add(self):
# pass
#
#
# @virtual
# def myfunc():
# """
# this is the
# docstring
# of my func
# """
# print("inside myfunc")
#
#
# #@overrides(myclass)
# def add():
# """i am anotherfunc"""
# print("inside anotherfunc")
#
#
# if __name__ == "__main__":
# print(myfunc.func_doc)
# print(add.func_doc)
# print(anotherclass.delete.func_doc)
| 34.137097 | 136 | 0.66832 | # Copyright (c) 2016 Shreyas Kulkarni (shyran@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import inspect
class OverrideError(Exception):
pass
def virtual(func):
"""
annotation to set a method for override,
any method, that doesnt have this annotation, cannot be overridden with @overrides(cls) annotation
"""
# hack to throw exception if the virtual function is not inside a class
# ref: http://stackoverflow.com/questions/8793233/python-can-a-decorator-determine-if-a-function-is-being-defined-inside-a-class
frames = inspect.stack()
if not (len(frames) > 2 and frames[2][4][0].strip().startswith('class ')):
raise OverrideError("function '%s' should be inside a class to be virtual" % func.__name__);
func.func_doc = "@virtual available for override\n" + (func.func_doc or '')
func.__virtual__ = True
return func
def overrides(cls):
def overrider(func):
# hack to throw exception if the function requesting override, is not inside a class
# ref: http://stackoverflow.com/questions/8793233/python-can-a-decorator-determine-if-a-function-is-being-defined-inside-a-class
frames = inspect.stack()
if not (len(frames) > 2 and frames[2][4][0].strip().startswith('class ')):
raise OverrideError("function '%s' should be inside class" % func.__name__);
clsmethodname, clsmethod = inspect.getmembers(cls, lambda m: inspect.ismethod(m) and m.__name__ == func.__name__)[0]
#if not (func.__name__ in map(lambda f: f[0], inspect.getmembers(cls, inspect.ismethod))):
if not clsmethod:
raise OverrideError("%s not in %s class" % (func.__name__, cls.__name__))
#check if the method is declared virtual anywhere in the MRO
isvirtual = False
for class_in_mro in inspect.getmro(cls):
methodname, method = inspect.getmembers(class_in_mro, lambda m: inspect.ismethod(m) and m.__name__ == func.__name__)[0]
if (hasattr(method, '__virtual__') and method.__virtual__):
isvirtual = True
break
if not isvirtual:
raise OverrideError("%s::%s is not virtual (hint: use @virtual)" % (cls.__name__, clsmethodname))
#if all safety checks are passing, then mark the docstring accordingly
func.func_doc = "@overriding %s::%s" % (cls.__name__, func.__name__)
return func
return overrider
##### TEST CODE #######
# class myclass(object):
# def __init__(self):
# pass
#
# @virtual
# def add(self):
# pass
#
# @virtual
# def delete(self):
# pass
#
# def edit(self):
# pass
#
#
# class anotherclass(myclass):
# @overrides(myclass)
# def delete(self):
# pass
#
# @overrides(myclass)
# def add(self):
# pass
#
#
# @virtual
# def myfunc():
# """
# this is the
# docstring
# of my func
# """
# print("inside myfunc")
#
#
# #@overrides(myclass)
# def add():
# """i am anotherfunc"""
# print("inside anotherfunc")
#
#
# if __name__ == "__main__":
# print(myfunc.func_doc)
# print(add.func_doc)
# print(anotherclass.delete.func_doc)
| 1,536 | 19 | 46 |
60749fea68af7dc73ff219a05ebd833765cd6532 | 1,699 | py | Python | school/serializers.py | Fercho191/testDjangoAPI | dd377a90f72f2ea82fb4a3f2c8cfadcd8fc3f9c9 | [
"MIT"
] | null | null | null | school/serializers.py | Fercho191/testDjangoAPI | dd377a90f72f2ea82fb4a3f2c8cfadcd8fc3f9c9 | [
"MIT"
] | null | null | null | school/serializers.py | Fercho191/testDjangoAPI | dd377a90f72f2ea82fb4a3f2c8cfadcd8fc3f9c9 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import School, Student, Activity, Assignment
| 36.934783 | 76 | 0.728664 | from rest_framework import serializers
from .models import School, Student, Activity, Assignment
class SchoolSerializer(serializers.HyperlinkedModelSerializer):
# students = serializers.HyperlinkedRelatedField(
# many=True, view_name='student-detail', queryset=Student.objects)
students = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = School
fields = ('id', 'name', 'created', 'students')
class StudentSerializer(serializers.HyperlinkedModelSerializer):
# school = serializers.HyperlinkedRelatedField(
# view_name='student-detail', queryset=School.objects)
school = serializers.PrimaryKeyRelatedField(queryset=School.objects)
class Meta:
model = Student
fields = ('id', 'name', 'school')
class ActivitySerializer(serializers.HyperlinkedModelSerializer):
# school = serializers.HyperlinkedRelatedField(
# view_name='student-detail', queryset=School.objects)
school = serializers.PrimaryKeyRelatedField(queryset=School.objects)
class Meta:
model = Activity
fields = ('id', 'name', 'school', 'created')
class AssignmentSerializer(serializers.HyperlinkedModelSerializer):
# activity = serializers.HyperlinkedRelatedField(
# view_name='student-detail', queryset=Activity.objects)
# student = serializers.HyperlinkedRelatedField(
# view_name='student-detail', queryset=Student.objects)
student = serializers.PrimaryKeyRelatedField(queryset=School.objects)
activity = serializers.PrimaryKeyRelatedField(queryset=School.objects)
class Meta:
model = Assignment
fields = ('id', 'activity', 'student')
| 0 | 1,506 | 92 |
49ca7bef342b854c92a4d3754db9fd9cf2d93994 | 3,366 | py | Python | packages/mcni/tests/mcni/pyre_components/MultiMonitors/MultiMonitors_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 5 | 2017-01-16T03:59:47.000Z | 2020-06-23T02:54:19.000Z | packages/mcni/tests/mcni/pyre_components/MultiMonitors/MultiMonitors_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 293 | 2015-10-29T17:45:52.000Z | 2022-01-07T16:31:09.000Z | packages/mcni/tests/mcni/pyre_components/MultiMonitors/MultiMonitors_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 1 | 2019-05-25T00:53:31.000Z | 2019-05-25T00:53:31.000Z | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007-2010 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
standalone = True
import time
import unittestX as unittest
if __name__ == "__main__":
main()
# version
__id__ = "$Id$"
# End of file
| 24.933333 | 80 | 0.550802 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007-2010 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
standalone = True
import time
import unittestX as unittest
class TestCase(unittest.TestCase):
def test1(self):
cmd = '''./testapp1.py \
--monitor.geometer.m1="(0,0,1),(0,0,0)" \
--monitor.geometer.m2="(0,0,0),(0,90,0)" \
--journal.debug.monitor \
'''
execute(cmd)
return
def test2(self):
cmd = '''./testapp1.py \
--monitor.m1=E_monitor \
--output-dir=out-test2 \
--overwrite-datafiles \
--monitor.geometer.m1="(0,0,1),(0,0,0)" \
--monitor.geometer.m2="(0,0,0),(0,90,0)" \
--journal.debug.monitor \
'''
execute(cmd)
return
def test3(self):
"two monitors facing the incident beam shoulder by shoulder"
cmd = '''./testapp1.py \
--ncount=1e6 \
--source=Source_simple \
--source.width=0.01 \
--source.height=0.01 \
--source.radius=0 \
--source.xw=0.2 \
--source.yh=0.1 \
--source.dist=1 \
--source.E0=60 \
--monitor.m1="NDMonitor(x,y)" \
--monitor.m2="NDMonitor(x,y)" \
--monitor.geometer.m1="(0.05,0,0,),(0,0,0)" \
--monitor.geometer.m2="(-0.05,0,0),(0,0,0)" \
--monitor.m1.xwidth=0.1 \
--monitor.m1.yheight=0.1 \
--monitor.m1.xmin=-0.05 \
--monitor.m1.xmax=0.05 \
--monitor.m1.nx=10 \
--monitor.m1.ymin=-0.05 \
--monitor.m1.ymax=0.05 \
--monitor.m1.ny=10 \
--monitor.m1.filename=m1.h5 \
--monitor.m2.xwidth=0.1 \
--monitor.m2.yheight=0.1 \
--monitor.m2.xmin=-0.05 \
--monitor.m2.xmax=0.05 \
--monitor.m2.nx=10 \
--monitor.m2.ymin=-0.05 \
--monitor.m2.ymax=0.05 \
--monitor.m2.ny=10 \
--monitor.m2.filename=m2.h5 \
--geometer.monitor="(0,0,1),(0,0,0)" \
--output-dir=out-test3 \
--overwrite-datafiles \
--journal.debug.monitor \
'''
execute(cmd)
time.sleep(3)
# the flux from source is 1/(s*cm**2*st)
# each round the neutron intensity is
# 1 * 0.01 (width) * 0.01 (height) * 1e4 = 1
# the solid angle for each monitor is
# about 0.1*0.1/(1*1) = 0.01
# there are totally 100 pixels per monitor
# so each pixel has about 1e-4 counts
from histogram.hdf import load
from histogram.hdf.utils import getOnlyEntry
import numpy
def loadhist(f):
return load(f, getOnlyEntry(f))
m1 = loadhist('out-test3/m1.h5')
self.assertTrue(numpy.abs(m1.I - 1e-4).max() < 1.5e-5)
m2 = loadhist('out-test3/m2.h5')
self.assertTrue(numpy.abs(m2.I - 1e-4).max() < 1.5e-5)
return
pass # end of TestCase
def execute(cmd):
import os
if os.system(cmd):
raise RuntimeError("%s failed" % cmd)
def pysuite():
suite1 = unittest.makeSuite(TestCase)
return unittest.TestSuite( (suite1,) )
def main():
#debug.activate()
pytests = pysuite()
alltests = unittest.TestSuite( (pytests, ) )
res = unittest.TextTestRunner(verbosity=2).run(alltests)
import sys; sys.exit(not res.wasSuccessful())
if __name__ == "__main__":
main()
# version
__id__ = "$Id$"
# End of file
| 833 | 1,909 | 92 |
d0ec58c52a34c87210e6e568252fff52654825a1 | 1,440 | py | Python | matches/migrations/0001_initial.py | Shirhussain/Find-my-match | f11f64ebb0f2145d8cd9a352bd2c29bf30c85088 | [
"MIT"
] | null | null | null | matches/migrations/0001_initial.py | Shirhussain/Find-my-match | f11f64ebb0f2145d8cd9a352bd2c29bf30c85088 | [
"MIT"
] | null | null | null | matches/migrations/0001_initial.py | Shirhussain/Find-my-match | f11f64ebb0f2145d8cd9a352bd2c29bf30c85088 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-10 09:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 42.352941 | 172 | 0.645139 | # Generated by Django 3.1.5 on 2021-01-10 09:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('match_decimal', models.DecimalField(decimal_places=2, max_digits=16, verbose_name='Percentage')),
('question_answered', models.IntegerField(default=0, verbose_name='Answered Question')),
('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='Timestamp')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('user_a', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='match_user_a', to=settings.AUTH_USER_MODEL, verbose_name='User A')),
('user_b', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='match_user_b', to=settings.AUTH_USER_MODEL, verbose_name='User B')),
],
options={
'verbose_name': 'match',
'verbose_name_plural': 'matchs',
},
),
]
| 0 | 1,260 | 23 |
199619e24bbb6139123e8ecf26c949a0982121a0 | 3,885 | py | Python | tests/test_alarm.py | gocept/alphaflow | 4b797cb12fb52254b1884159fd9a8b899c739f7c | [
"ZPL-2.1",
"ZPL-2.0"
] | null | null | null | tests/test_alarm.py | gocept/alphaflow | 4b797cb12fb52254b1884159fd9a8b899c739f7c | [
"ZPL-2.1",
"ZPL-2.0"
] | null | null | null | tests/test_alarm.py | gocept/alphaflow | 4b797cb12fb52254b1884159fd9a8b899c739f7c | [
"ZPL-2.1",
"ZPL-2.0"
] | 1 | 2021-11-01T07:58:18.000Z | 2021-11-01T07:58:18.000Z | # Copyright (c) 2004-2006 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
import unittest
from DateTime import DateTime
from Products.CMFCore.utils import getToolByName
from Products.AlphaFlow.tests.AlphaFlowTestCase import AlphaFlowTestCase
from Products.AlphaFlow.activities.interfaces import \
IAlarmWorkItem, IAlarmActivity
from Products.AlphaFlow.activities.alarm import AlarmWorkItem, AlarmActivity
from Products.AlphaFlow.interfaces import ILifeCycleController
| 33.491379 | 83 | 0.655341 | # Copyright (c) 2004-2006 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
import unittest
from DateTime import DateTime
from Products.CMFCore.utils import getToolByName
from Products.AlphaFlow.tests.AlphaFlowTestCase import AlphaFlowTestCase
from Products.AlphaFlow.activities.interfaces import \
IAlarmWorkItem, IAlarmActivity
from Products.AlphaFlow.activities.alarm import AlarmWorkItem, AlarmActivity
from Products.AlphaFlow.interfaces import ILifeCycleController
class AlarmTest(AlphaFlowTestCase):
interfaces_to_test = [
(IAlarmWorkItem, AlarmWorkItem),
(IAlarmActivity, AlarmActivity)
]
def _init_object(self):
portal = self.portal
self._create_test_users()
self.loginAsPortalOwner()
self._import_wf('workflows/alarm_review.alf')
wftool = getToolByName(portal, 'workflow_manager')
self.login("author")
mtool = getToolByName(portal, 'portal_membership')
home = mtool.getHomeFolder("author")
# Create object for instanciation of this process
home.createObject("testdocument", "DummyContent")
# Initialize the process
doc = home.testdocument
doc.assignProcess(self.test_process)
return doc
def _getAlarmWorkItem(self, workitems):
for wi in workitems:
if IAlarmWorkItem.providedBy(wi):
return wi
raise RuntimeError
def test_definition(self):
portal = self.portal
wftool = getToolByName(portal, 'workflow_manager')
doc = self._init_object()
instance = doc.getInstance()
controller = ILifeCycleController(instance)
controller.start("testing")
self.assertEquals(controller.state, "active")
wis = instance.getWorkItems()
# one is the edit and one is the alarm
self.assertEquals(len(wis), 2)
alarm_wi = self._getAlarmWorkItem(wis)
self.failUnless(alarm_wi)
self.assertEquals(alarm_wi.getStatusInfo(),
"deadline not yet reached")
self.assertEquals(ILifeCycleController(alarm_wi).state,
"active")
# we're testing if the deadline is not reached
now = DateTime()
datestring = '%d.%m.%Y %H:'
# one minute in the future
doc.deadline = now + 1/1440.0
wftool.pingCronItems()
self.assertEquals(alarm_wi.getStatusInfo(), "deadline not yet reached")
self.assertEquals(ILifeCycleController(alarm_wi).state,
"active")
self.assertEquals(alarm_wi.getGeneratedWorkItems(), [])
# create a new DateTime() object to get a date one minute in the
# past
doc.deadline = now - 1/1440.0
wftool.pingCronItems()
self.assertEquals(ILifeCycleController(alarm_wi).state,
"ended")
self.assertEquals(alarm_wi.getStatusInfo(), "deadline exceeded")
self.assertEquals(len(alarm_wi.getGeneratedWorkItems()), 0)
# test failure on error for new wf instance
instance.getWorkItems('active')[0].complete('complete') # complete wf first
wftool.pingCronItems()
doc.assignProcess(self.test_process) # assign new process
doc.deadline = 'gaak'
instance = doc.getInstance()
controller = ILifeCycleController(instance)
controller.start("testing")
wis = instance.getWorkItems()
alarm_wi = self._getAlarmWorkItem(wis)
wftool.pingCronItems()
self.assertEquals(ILifeCycleController(alarm_wi).state,
"failed")
self.assertEquals(controller.state, "failed")
self.assertEquals(len(alarm_wi.getGeneratedWorkItems()), 0)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AlarmTest))
return suite
| 3,149 | 208 | 46 |
70999ad3e29e3657d1dcf4c82f635f991a851c98 | 2,965 | py | Python | scripts/mod_requests/find_back.py | einsxiao/evawiz | 12c148f46c89551c281271718893a92b26da2bfa | [
"BSD-2-Clause"
] | 1 | 2019-06-07T03:44:39.000Z | 2019-06-07T03:44:39.000Z | scripts/mod_requests/find_back.py | einsxiao/evawiz | 12c148f46c89551c281271718893a92b26da2bfa | [
"BSD-2-Clause"
] | null | null | null | scripts/mod_requests/find_back.py | einsxiao/evawiz | 12c148f46c89551c281271718893a92b26da2bfa | [
"BSD-2-Clause"
] | null | null | null | from evawiz_basic import *
| 34.476744 | 121 | 0.575717 | from evawiz_basic import *
def request(handler):
handler.contact_server('mod_find_back')
dprint('request_find_back')
handler.send('eva! I want to find back')
reply_info = handler.recv()
dprint('reply_info=',reply_info)
if not reply_info == 'allow to find back': # not allowed to register
handler.send('why')
ans = handler.recv();
print("failed to find back:",ans);
return;
# email
sys.stdout.write("your alternative email (provided when you register).\n")
sys.stdout.write("email >")
sys.stdout.flush()
while True:
email = input()
email.strip()
if ( len(email) > 128 ):
sys.stdout.write("email address too long.\n")
elif ( re.match('^[a-zA-Z0-9_\.\-]+@[a-zA-Z0-9\-]+\.[a-zA-Z0-9\-\.]+$',email) ):
break
sys.stdout.write("email not valid, retype >")
sys.stdout.flush()
pass
handler.encrypt_send(email)
res = handler.recv();
if ( res != "please check your mailbox" ):
print("Operation failed for:",res);
handler.send_eof();
return
print("An verification code has been sent to your mailbox. Please check your mailbox to continue.")
sys.stdout.write("Verification code >")
sys.stdout.flush()
code = ""
while True:
code = input()
dprint("'%s'"%(code,))
code.strip()
if ( len(code) == 6 ):
break
sys.stdout.write("Verification code is in the wrong form.\n")
sys.stdout.write("Verification code >")
sys.stdout.flush()
pass
handler.encrypt_send(code)
ans = handler.recv()
if ( ans == "verify time out" ):
print("Operation time out. Please try again.")
handler.send_eof()
return
if ( ans == "verify code wrong" ):
print("Verification code is wrong. Please try again.")
handler.send_eof()
return
sys.stdout.write("New password >")
sys.stdout.flush()
while True:
password = getpass.getpass("")
password.strip()
if re.match('^.{8,64}$',password) and not re.match('^[0-9]+$',password) and not re.match('^[a-zA-Z]+$',password):
sys.stdout.write("repeat password >")
sys.stdout.flush()
password1 = getpass.getpass("")
password1.strip()
if ( password == password1 ) :
break
sys.stdout.write("password not the same, retype >")
sys.stdout.flush()
else:
sys.stdout.write("password should be at least 8 bits with letters, numbers and symbols mixed\n retype >")
sys.stdout.flush()
handler.encrypt_send(password)
ans = handler.recv()
if ans == "password changing done":
print("Your account password is changed. Operation done successfully.")
else:
print("Operation failed: %s"%(ans,))
pass
handler.send_eof()
return
| 2,915 | 0 | 22 |
396f2526831ee7c3989200e800e342954bc4f9dc | 9,047 | py | Python | CS4420_final_project/simulator.py | peytonhowell/eva | edd2cb441e49cb1759eb11fcac63bc0e826080e9 | [
"Apache-2.0"
] | null | null | null | CS4420_final_project/simulator.py | peytonhowell/eva | edd2cb441e49cb1759eb11fcac63bc0e826080e9 | [
"Apache-2.0"
] | null | null | null | CS4420_final_project/simulator.py | peytonhowell/eva | edd2cb441e49cb1759eb11fcac63bc0e826080e9 | [
"Apache-2.0"
] | 1 | 2021-12-06T01:09:35.000Z | 2021-12-06T01:09:35.000Z | import queue
from collections import Counter, OrderedDict
import random
import matplotlib.pyplot as plt
class FIFO(Policy):
"Notice that things close to 0 are closer to being evicted"
if __name__ == "__main__":
# raise Exception
hit_rate = []
for file in ["ftrace_combined.csv"]:
for p in [TwoQ, RR, FIFO, LRU, LFU]:
hit_rate = []
for i in range(6, 400):
try:
policy = p(i)
simulator = Simulator(file)
simulator.simulate(policy)
hit_rate.append(float(simulator.hits)/simulator.total)
except Exception as e:
hit_rate.append(0)
print(hit_rate)
plt.scatter(range(6, 400), hit_rate, label=policy.name, s=3)
plt.title("Buffer Size versus Hit Rate")
plt.xlabel("Buffer Size (# Video Frames)")
plt.ylabel("Hit Rate (proportion)")
plt.legend()
plt.savefig(f'{file[:-4]}-results')
plt.clf()
break | 31.304498 | 145 | 0.489112 | import queue
from collections import Counter, OrderedDict
import random
import matplotlib.pyplot as plt
class Simulator:
def __init__(self, filename):
self.filename = filename
itemList = []
with open(filename, "r") as fp:
lines = fp.readlines()
for l in lines:
itemList.append(Line(l.split(",")))
self.lines = itemList
def simulate(self, bufferPolicy):
self.hits = 0
self.misses = 0
self.total = 0
self.seen_misses = 0
self.worst_case = 0
self.seen = []
for l in self.lines:
frame_id = l.frame_id
if l.fix:
self.total += 1
hit = bufferPolicy.fix(l.frame_id)
self.hits += hit
self.misses += not hit
self.seen_misses += (not hit) and (frame_id in self.seen)
self.worst_case += (frame_id in self.seen)
self.seen.append(frame_id)
else:
bufferPolicy.unfix(l.frame_id)
# print(f"Hits: {self.hits}\nMisses: {self.misses}\nSeen Misses: {self.seen_misses}\nWorst Case: {self.worst_case}\nTotal: {self.total}")
class Line:
def __init__(self, itemList):
self.frame_id = itemList[2]
self.fix = itemList[1] == "True"
self.timestamp = itemList[2]
class Policy():
def __init__(self):
self.name = "BasePolicy"
def fix(self, frame_id):
raise NotImplementedError
# return bool
def unfix(self, frame_id):
raise NotImplementedError
# No Return
class TestPolicy1(Policy):
def __init__(self):
self.name = "TestPolicy1"
self.counter = 0
def fix(self, frame_id):
return False
def unfix(self, frame_id):
# count calls
self.counter += 1
class TestPolicy2(Policy):
def __init__(self):
self.name = "TestPolicy2"
self.counter = 0
def fix(self, frame_id):
return True
def unfix(self, frame_id):
# count calls
self.counter += 1
class FIFO(Policy):
"Notice that things close to 0 are closer to being evicted"
def __init__(self, buffer_size):
self.name = "FIFO"
self.size = buffer_size
self.queue = []
self.fixed = Counter()
def fix(self, frame_id):
if frame_id in self.queue:
self.fixed[frame_id] += 1
return True
else:
if len(self.queue) == self.size:
for id in self.queue:
if self.fixed[id] == 0:
self.queue.remove(id)
self.queue.append(frame_id)
self.fixed[frame_id] += 1
break
else:
raise Exception("All items in queue are fixed")
else:
self.fixed[frame_id] += 1
self.queue.append(frame_id)
return False
def unfix(self, frame_id):
self.fixed[frame_id] -= 1
class LRU(Policy):
def __init__(self, buffer_size):
self.name = "LRU"
self.size = buffer_size
self.queue = OrderedDict()
self.fixed = Counter()
def fix(self, frame_id):
if frame_id in self.queue:
self.fixed[frame_id] += 1
self.queue.move_to_end(frame_id)
return True
else:
if len(self.queue) == self.size:
for id in self.queue:
if self.fixed[id] == 0:
del self.queue[id]
self.queue[frame_id] = None
self.fixed[frame_id] += 1
break
else:
raise Exception("All items in queue are fixed")
else:
self.fixed[frame_id] += 1
self.queue[frame_id] = None
return False
def unfix(self, frame_id):
self.fixed[frame_id] -= 1
class RR(Policy):
def __init__(self, buffer_size):
self.name = "RR"
self.size = buffer_size
self.queue = []
self.fixed = Counter()
def fix(self, frame_id):
if frame_id in self.queue:
self.fixed[frame_id] += 1
return True
else:
if len(self.queue) == self.size:
unfixed = []
for id in self.queue:
if self.fixed[id] == 0:
unfixed.append(id)
if unfixed:
id = random.choice(unfixed)
self.queue.remove(id)
self.queue.append(frame_id)
self.fixed[frame_id] += 1
else:
raise Exception("All items in queue are fixed")
else:
self.fixed[frame_id] += 1
self.queue.append(frame_id)
return False
def unfix(self, frame_id):
self.fixed[frame_id] -= 1
class TwoQ(Policy):
def __init__(self, buffer_size):
self.name = "2Q"
self.size = buffer_size
self.fifo = []
# self.queue = []
self.fixed = Counter()
self.lru = OrderedDict()
self.lru_fixed = Counter()
def fix(self, frame_id):
if frame_id in self.lru:
self.fixed[frame_id] += 1
self.lru.move_to_end(frame_id)
return True
## Try to add to lru
if frame_id in self.fifo:
self.fixed[frame_id] += 1
self.lru[frame_id] = None
self.fifo.remove(frame_id)
return True
## Add to FIFO
if len(self.fifo) + len(self.lru) >= self.size:
inserted = False
for id in self.fifo:
if self.fixed[id] == 0:
self.fifo.remove(id)
self.fifo.append(frame_id)
self.fixed[frame_id] += 1
inserted = True
break
if not inserted:
for id in self.lru.keys():
if self.fixed[id] == 0:
del self.lru[id]
self.fifo.append(frame_id)
self.fixed[frame_id] += 1
inserted = True
break
else:
raise Exception("All items in queue are fixed")
else:
self.fixed[frame_id] += 1
self.fifo.append(frame_id)
return False
def unfix(self, frame_id):
self.fixed[frame_id] -= 1
class LFU(Policy):
def __init__(self, buffer_size):
self.name = "LFU"
self.size = buffer_size
self.queue = {}
self.fixed = Counter()
self.history = Counter()
def fix(self, frame_id):
if frame_id in self.queue:
self.fixed[frame_id] += 1
self.queue[frame_id] += 1
self.queue = {k:v for k, v in sorted(self.queue.items(), key=lambda item: item[1])}
return True
else:
if len(self.queue) == self.size:
for id in self.queue.keys():
if self.fixed[id] == 0:
self.history[id] = self.queue[id]
del self.queue[id]
self.queue[frame_id] = 1 + self.history[frame_id]
self.queue = {k:v for k, v in sorted(self.queue.items(), key=lambda item: item[1])}
self.fixed[frame_id] += 1
break
else:
raise Exception("All items in queue are fixed")
else:
self.fixed[frame_id] += 1
self.queue[frame_id] = 1 + self.history[frame_id]
self.queue = {k:v for k, v in sorted(self.queue.items(), key=lambda item: item[1])}
self.history[frame_id] = 0
return False
def unfix(self, frame_id):
self.fixed[frame_id] -= 1
if __name__ == "__main__":
# raise Exception
hit_rate = []
for file in ["ftrace_combined.csv"]:
for p in [TwoQ, RR, FIFO, LRU, LFU]:
hit_rate = []
for i in range(6, 400):
try:
policy = p(i)
simulator = Simulator(file)
simulator.simulate(policy)
hit_rate.append(float(simulator.hits)/simulator.total)
except Exception as e:
hit_rate.append(0)
print(hit_rate)
plt.scatter(range(6, 400), hit_rate, label=policy.name, s=3)
plt.title("Buffer Size versus Hit Rate")
plt.xlabel("Buffer Size (# Video Frames)")
plt.ylabel("Hit Rate (proportion)")
plt.legend()
plt.savefig(f'{file[:-4]}-results')
plt.clf()
break | 7,005 | -23 | 969 |
c4412a67585d962973827f0a73ba480b0ec1a3d9 | 562 | py | Python | res_users_clear_access_rights/__manifest__.py | marionumza/it-projects-llc-access-addons | 421fb60b25bfea67ccf0309f3cf7385dafafc350 | [
"MIT"
] | null | null | null | res_users_clear_access_rights/__manifest__.py | marionumza/it-projects-llc-access-addons | 421fb60b25bfea67ccf0309f3cf7385dafafc350 | [
"MIT"
] | null | null | null | res_users_clear_access_rights/__manifest__.py | marionumza/it-projects-llc-access-addons | 421fb60b25bfea67ccf0309f3cf7385dafafc350 | [
"MIT"
] | 1 | 2021-02-15T03:14:52.000Z | 2021-02-15T03:14:52.000Z | {
"name": "Clear User Access rights",
"summary": """Useful tool to reset user rights""",
"version": "12.0.1.1.0",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "Other OSI approved licence", # MIT
"support": "apps@itpp.dev",
"website": "https://it-projects.info",
"category": "Access",
"images": ["images/clear_user_access_rights.jpg"],
"depends": ["base"],
"external_dependencies": {"python": [], "bin": []},
"data": ["views.xml"],
"demo": [],
"installable": True,
"auto_install": False,
}
| 31.222222 | 55 | 0.576512 | {
"name": "Clear User Access rights",
"summary": """Useful tool to reset user rights""",
"version": "12.0.1.1.0",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "Other OSI approved licence", # MIT
"support": "apps@itpp.dev",
"website": "https://it-projects.info",
"category": "Access",
"images": ["images/clear_user_access_rights.jpg"],
"depends": ["base"],
"external_dependencies": {"python": [], "bin": []},
"data": ["views.xml"],
"demo": [],
"installable": True,
"auto_install": False,
}
| 0 | 0 | 0 |
bfdc3954b8f13c5db07d0acdaf7b15ded19dcb8f | 1,146 | py | Python | RobotCode/GUI/main.py | rpellumbi/ECE-3400-Team15 | a693957c154af7e31f6af82592a670d2f9d6bcc9 | [
"MIT"
] | null | null | null | RobotCode/GUI/main.py | rpellumbi/ECE-3400-Team15 | a693957c154af7e31f6af82592a670d2f9d6bcc9 | [
"MIT"
] | null | null | null | RobotCode/GUI/main.py | rpellumbi/ECE-3400-Team15 | a693957c154af7e31f6af82592a670d2f9d6bcc9 | [
"MIT"
] | null | null | null | import serial, argparse
from gui.controller import Controller
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Arduino connecton configuration
parser.add_argument('port', type=str, default='', help='Where to find the Arduino.')
parser.add_argument('--baudrate', type=int, default=9600, help='Baudrate for the serial connection.')
# Maze configuration
parser.add_argument('--rows', type=int, default=2, help='Number of rows in the maze.')
parser.add_argument('--cols', type=int, default=3, help='Number of cols in the maze.')
args = parser.parse_args()
try:
# Setup the serial connection to the Arduino
with serial.Serial(args.port, args.baudrate) as ser:
# Setup the GUI controller
controller = Controller(args.rows, args.cols)
while True:
# Note: readline blocks.. If you do not terminate your message
# with a newline, this will block forever...
msg = ser.readline()
print 'Received message: %s' % msg.strip()
controller.handle_msg(msg)
except serial.serialutil.SerialException as e:
print 'Could not connect to the Arduino.'
print e | 42.444444 | 103 | 0.696335 | import serial, argparse
from gui.controller import Controller
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Arduino connecton configuration
parser.add_argument('port', type=str, default='', help='Where to find the Arduino.')
parser.add_argument('--baudrate', type=int, default=9600, help='Baudrate for the serial connection.')
# Maze configuration
parser.add_argument('--rows', type=int, default=2, help='Number of rows in the maze.')
parser.add_argument('--cols', type=int, default=3, help='Number of cols in the maze.')
args = parser.parse_args()
try:
# Setup the serial connection to the Arduino
with serial.Serial(args.port, args.baudrate) as ser:
# Setup the GUI controller
controller = Controller(args.rows, args.cols)
while True:
# Note: readline blocks.. If you do not terminate your message
# with a newline, this will block forever...
msg = ser.readline()
print 'Received message: %s' % msg.strip()
controller.handle_msg(msg)
except serial.serialutil.SerialException as e:
print 'Could not connect to the Arduino.'
print e | 0 | 0 | 0 |
1a58fb1dda24fcd5cb4bb49896853e34617cc8cf | 787 | py | Python | src/fuckbot/eightball.py | Zer0-One/fuckbot | 02f5a112988e25a9f04a9a941a55f11cf51c3d8f | [
"BSD-2-Clause"
] | null | null | null | src/fuckbot/eightball.py | Zer0-One/fuckbot | 02f5a112988e25a9f04a9a941a55f11cf51c3d8f | [
"BSD-2-Clause"
] | null | null | null | src/fuckbot/eightball.py | Zer0-One/fuckbot | 02f5a112988e25a9f04a9a941a55f11cf51c3d8f | [
"BSD-2-Clause"
] | 1 | 2022-01-24T21:20:43.000Z | 2022-01-24T21:20:43.000Z | import random
ANSWERS = [
"of course not you idiot",
"sure, why not",
"do i look like an oracle to you?",
"yes, obviously",
"no",
"yes",
"literally kys",
"absolutely haram",
"idk, probably",
"is grass green? is the sky blue? is taiwan numbah wan?"
]
| 21.27027 | 60 | 0.550191 | import random
ANSWERS = [
"of course not you idiot",
"sure, why not",
"do i look like an oracle to you?",
"yes, obviously",
"no",
"yes",
"literally kys",
"absolutely haram",
"idk, probably",
"is grass green? is the sky blue? is taiwan numbah wan?"
]
def is_question(msg):
m = msg.lower()
if (m.startswith("can ") or
m.startswith("could ") or
m.startswith("do ") or
m.startswith("does ") or
m.startswith("is ") or
m.startswith("may ") or
m.startswith("shall ") or
m.startswith("should ") or
m.startswith("would ") or
m.startswith("will ")):
return True
return False
def answer():
i = random.randint(1, len(ANSWERS) - 1)
return ANSWERS[i]
| 450 | 0 | 46 |
132f3bc069a5a17eb3737ecc921bd6e99c559c42 | 15,775 | py | Python | roles/validate-tempest/files/tempestmail/tests/test_tempestmail.py | IgorBMSTU/tripleo-quickstart-extras | 485f2a398b972ea8d91c99eb9c3c7f7a2b9abb14 | [
"Apache-2.0"
] | 3 | 2018-09-05T08:34:05.000Z | 2019-09-26T19:01:11.000Z | roles/validate-tempest/files/tempestmail/tests/test_tempestmail.py | IgorBMSTU/tripleo-quickstart-extras | 485f2a398b972ea8d91c99eb9c3c7f7a2b9abb14 | [
"Apache-2.0"
] | 15 | 2018-10-10T17:53:42.000Z | 2021-03-31T07:55:08.000Z | roles/validate-tempest/files/tempestmail/tests/test_tempestmail.py | IgorBMSTU/tripleo-quickstart-extras | 485f2a398b972ea8d91c99eb9c3c7f7a2b9abb14 | [
"Apache-2.0"
] | 4 | 2019-01-03T21:34:34.000Z | 2019-10-04T15:20:49.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# flake8: noqa
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import datetime
import mock
import re
import tempfile
import unittest
from tempestmail import Config
from tempestmail import Mail
from tempestmail import TempestMailCmd
| 44.43662 | 123 | 0.581933 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# flake8: noqa
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import datetime
import mock
import re
import tempfile
import unittest
from tempestmail import Config
from tempestmail import Mail
from tempestmail import TempestMailCmd
class MailTest(unittest.TestCase):
def setUp(self):
self.config = self._generate_config()
self.data = self._generate_data()
self.render_output = self._get_render_template_output()
self.maxDiff = None
def _generate_config(self):
config = Config()
config.mail_from = 'tripleoresults@gmail.com'
config.templates_path = 'tests/fixtures/'
config.log_url = 'http://logs.openstack.org/periodic/'
config.api_server = 'http://tempest-tripleoci.rhcloud.com/api/v1.0/sendmail'
config.use_api_server = True
config.default_log_url = 'http://logs.openstack.org'
config.username = ''
config.password = ''
config.smtp = ''
config.require_auth = True
config.emails = [
{'mail': 'email1@example.com', 'name': 'name 1',
'jobs': [], 'regex': [], 'topics': ''},
{'mail': 'email2@example.com', 'name': 'name 2',
'jobs': [], 'regex': [], 'topics': ''}
]
config.template = 'template.html'
return config
def _get_render_template_output(self):
output = (u'<html>\n <head></head>\n <body>\n '
'<p>Hello,</p>\n <p>Here\'s the result of the latest '
'tempest run for job '
'periodic-tripleo-ci-centos-7-ovb-ha-tempest.</p>\n '
'<p>The job ran on 2017-01-19 08:27:00.</p>\n '
'<p>For more details, you can check the URL: '
'http://logs.openstack.org/periodic/periodic-tripleo-ci-'
'centos-7-ovb-ha-tempest/1ce5e95/console.html (It might take '
'a few minutes to upload the logs).</p>\n \n '
'<h2>New failures</h2>\n <ul>\n \n <li>'
'tempest.api.object_storage.test_container_quotas.'
'ContainerQuotasTest.test_upload_too_many_objects</li>'
'\n \n <li>tempest.api.object_storage.test_'
'container_quotas.ContainerQuotasTest.'
'test_upload_valid_object</li>\n \n </ul>\n \n\n '
'\n \n \n <p></p>\n <p>You are receiving this '
'email because someone from TripleO team though you were '
'interested in these results.</p>\n <p>\n '
'</body>\n</html>\n')
return output
def _generate_data(self):
data = {
'errors': [],
'run': True,
'failed': [
u'tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_too_many_objects',
u'tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_valid_object'
],
'job': 'periodic-tripleo-ci-centos-7-ovb-ha-tempest',
'link': u'http://logs.openstack.org/periodic/periodic-tripleo-ci-centos-7-ovb-ha-tempest/1ce5e95/console.html',
'covered': [],
'date': datetime.datetime(2017, 1, 19, 8, 27),
'new': [
u'tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_too_many_objects',
u'tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_valid_object'
]
}
return data
def test_render_template(self):
mail = Mail(self.config)
content = mail.render_template(self.data)
self.assertEqual(self.render_output, content)
def test_filter_emails(self):
mail = Mail(self.config)
self.assertEqual(self.data.get('has_errors'), None)
addresses = mail.filter_emails(
'periodic-tripleo-ci-centos-7-ovb-ha-tempest', self.data)
self.assertEqual({'': ['email1@example.com', 'email2@example.com']},
addresses)
mail.config.emails[0]['jobs'].append('another-job')
addresses = mail.filter_emails(
'periodic-tripleo-ci-centos-7-ovb-ha-tempest', self.data)
self.assertEqual({'': ['email2@example.com']}, addresses)
self.assertEqual(self.data['has_errors'], True)
mail.config.emails[0]['jobs'] = []
mail.config.emails[0]['regex'].append(re.compile(
'tempest.some.regex'))
self.assertEqual({'': ['email2@example.com']}, addresses)
def test_filter_emails_topics(self):
mail = Mail(self.config)
addresses = mail.filter_emails(
'periodic-tripleo-ci-centos-7-ovb-ha-tempest', self.data)
self.assertEqual({'': ['email1@example.com',
'email2@example.com']},
addresses)
mail.config.emails[0]['jobs'].append(
'periodic-tripleo-ci-centos-7-ovb-ha-tempest')
mail.config.emails[0]['regex'].append(re.compile(
'upload_too_many_objects'))
mail.config.emails[0]['topics'] = 'many_objects'
mail.config.emails[1]['regex'].append(re.compile(
'upload_valid_object'))
mail.config.emails[1]['topics'] = 'valid_object'
new = {'mail': 'email2@example.com', 'name': 'name 2',
'jobs': ['periodic-tripleo-ci-centos-7-ovb-ha-tempest'],
'regex': [re.compile('upload_valid_object')],
'topics': 'valid_object,object_storage'}
mail.config.emails.append(new)
addresses = mail.filter_emails(
'periodic-tripleo-ci-centos-7-ovb-ha-tempest', self.data)
bookaddr = {'[many_objects]': ['email1@example.com'],
'[valid_object]': ['email2@example.com'],
'[valid_object][object_storage]': ['email2@example.com']}
self.assertEqual(bookaddr, addresses)
@mock.patch('tempestmail.Mail._send_mail_api')
@mock.patch('tempestmail.Mail._send_mail_local')
def test_send_mail(self, mock_local, mock_api):
mail = Mail(self.config)
mail.send_mail('periodic-tripleo-ci-centos-7-ovb-ha-tempest',
self.data, False)
mock_api.assert_called_with(['email1@example.com',
'email2@example.com'],
self.render_output,
'Job periodic-tripleo-ci-centos-7-ovb-ha-'
'tempest results')
mock_local.assert_not_called()
mock_api.reset_mock()
self.config.use_api_server = False
mail = Mail(self.config)
mail.send_mail('periodic-tripleo-ci-centos-7-ovb-ha-tempest',
self.data, False)
mock_local.assert_called_with(['email1@example.com',
'email2@example.com'],
self.render_output,
'Job periodic-tripleo-ci-centos-7-ovb-ha-'
'tempest results', False)
class TestTempestMailCmd(unittest.TestCase):
def setUp(self):
self.content_job = self._get_content_file(
'tests/fixtures/content_job.html')
self.console_ok = self._get_content_file(
'tests/fixtures/console_ok.log')
self.console_fail = self._get_content_file(
'tests/fixtures/console_fail.log')
self.fd_file, self.tmp_file = tempfile.mkstemp()
self._populate_skip_file()
def _get_content_file(self, filename):
with open(filename) as f:
content = f.read()
return content
def _populate_skip_file(self):
content = '''
known_failures:
- test: '.*test_external_network_visibility'
reason: 'Tempest test "external network visibility" fails'
lp: 'https://bugs.launchpad.net/tripleo/+bug/1577769'
- test: 'tempest.api.data_processing'
reason: 'tempest.api.data_processing tests failing on newton'
bz: 'https://bugzilla.redhat.com/show_bug.cgi?id=1357667'
- test: 'neutron.tests.tempest.api.test_revisions.TestRevisions'
reason: 'New test, need investigation'
'''
self.skip_file = open(self.tmp_file, 'w')
self.skip_file.write(content)
self.skip_file.close()
@mock.patch('tempestmail.get_html')
def test_get_index(self, html_mock):
tmc = TempestMailCmd()
tmc.parse_arguments(['-c', 'tests/fixtures/config.yaml', '--job',
'periodic-tripleo-ci-centos-7-ovb-nonha-tempest-'
'oooq-master'])
tmc.setup_logging()
tmc.setupConfig()
html_mock.return_value.content.decode.return_value = self.content_job.decode()
index = tmc.get_index()
self.assertEqual(
index,
[(u'http://logs.openstack.org/periodic/periodic-tripleo-ci'
'-centos-7-ovb-nonha-tempest-oooq-master/613de4e/')])
html_mock.return_value.content.decode.return_value = 'No links'
index = tmc.get_index()
self.assertEqual(index, [])
html_mock.return_value = None
index = tmc.get_index()
self.assertEqual(index, [])
html_mock.ok.return_value = None
index = tmc.get_index()
self.assertEqual(index, [])
html_mock.ok.return_value = True
html_mock.content.return_value = None
index = tmc.get_index()
self.assertEqual(index, [])
@mock.patch('tempestmail.get_html')
def test_get_console(self, html_mock):
tmc = TempestMailCmd()
tmc.parse_arguments(['-c', 'tests/fixtures/config.yaml', '--job',
'periodic-tripleo-ci-centos-7-ovb-nonha-tempest-'
'oooq-master', '--file',
'tests/fixtures/console_ok.log'])
tmc.setup_logging()
tmc.setupConfig()
console, date, log_path = tmc.get_console()
self.assertEqual(console, self.console_ok)
self.assertEqual(log_path, None)
tmc.parse_arguments(['-c', 'tests/fixtures/config.yaml', '--job',
'periodic-tripleo-ci-centos-7-ovb-nonha-tempest-'
'oooq-master', '--file',
'tests/fixtures/not_found.log'])
self.assertEqual(tmc.get_console(), (None, None, None))
html_mock.return_value.status_code = '300'
result = tmc.get_console(job_url='http://logs.openstack.org')
self.assertEqual(result, (None, None, None))
html_mock.return_value.status_code = '200'
html_mock.return_value.content = self.console_ok
console, date, url = tmc.get_console(
job_url='http://logs.openstack.org')
self.assertEqual(console, self.console_ok.decode('utf-8'))
self.assertEqual(url, 'http://logs.openstack.org/console.html.gz')
html_mock.return_value = None
result = tmc.get_console(job_url='http://logs.openstack.org')
self.assertEqual(result, (None, None, None))
def test_get_data(self):
tmc = TempestMailCmd()
tmc.parse_arguments(['-c', 'tests/fixtures/config.yaml', '--job',
'periodic-tripleo-ci-centos-7-ovb-nonha-tempest-'
'oooq-master', '--file',
'tests/fixtures/not_found.log'])
tmc.setup_logging()
tmc.setupConfig()
data = tmc.get_data(self.console_ok, None, 'http://logs.openstack.org')
self.assertEqual(
data['job'],
'periodic-tripleo-ci-centos-7-ovb-nonha-tempest-oooq-master')
self.assertEqual(data['date'], None)
self.assertEqual(data['run'], True)
self.assertEqual(data['link'], 'http://logs.openstack.org')
self.assertEqual(len(data['ok']), 2)
self.assertEqual(data.get('failed'), None)
self.assertEqual(data.get('covered'), None)
self.assertEqual(data.get('new'), None)
self.assertEqual(data.get('errors'), None)
data = tmc.get_data('some content', None, 'http://logs.openstack.org')
self.assertEqual(data['run'], False)
data = tmc.get_data(self.console_fail, None,
'http://logs.openstack.org')
self.assertNotEqual(data['failed'], None)
def test_load_skip_file(self):
tmc = TempestMailCmd()
tmc.parse_arguments(['-c', 'tests/fixtures/config.yaml', '--job',
'periodic-tripleo-ci-centos-7-ovb-nonha-tempest-'
'oooq-master', '--file',
'tests/fixtures/not_found.log', '--skip-file',
self.tmp_file])
tmc.setup_logging()
tmc.setupConfig()
result = tmc.load_skip_file(self.tmp_file)
expected = [
{'test': '.*test_external_network_visibility',
'reason': 'Tempest test "external network visibility" fails'},
{'test': 'tempest.api.data_processing',
'reason': 'tempest.api.data_processing tests failing on newton'},
{'test': 'neutron.tests.tempest.api.test_revisions.TestRevisions',
'reason': 'New test, need investigation'}
]
self.assertEqual(result, expected)
tmc.parse_arguments(['-c', 'tests/fixtures/config.yaml', '--job',
'periodic-tripleo-ci-centos-7-ovb-nonha-tempest-'
'oooq-master', '--file',
'tests/fixtures/not_found.log', '--skip-file',
'non_exist_file.txt'])
result = tmc.load_skip_file(self.tmp_file)
self.assertEqual(result, [])
def test_setup_config(self):
tmc = TempestMailCmd()
tmc.parse_arguments(['-c', 'tests/fixtures/config.yaml', '--job',
'periodic-tripleo-ci-centos-7-ovb-nonha-tempest-'
'oooq-master', '--file',
'tests/fixtures/not_found.log', '--skip-file',
self.tmp_file])
tmc.setup_logging()
tmc.setupConfig()
config = tmc.config
self.assertEqual(config.require_auth, True)
self.assertEqual(config.mail_from, 'tripleoresults@gmail.com')
self.assertEqual(config.templates_path, 'template/')
self.assertEqual(
config.log_url,
'http://logs.openstack.org/periodic/')
self.assertEqual(
config.api_server,
'http://tempest-tripleoci.rhcloud.com/api/v1.0/sendmail')
self.assertEqual(config.use_api_server, True)
self.assertEqual(config.default_log_url, 'http://logs.openstack.org')
| 14,106 | 650 | 46 |
708da950522d98ebe82a8527d2ea39185351f89c | 473 | py | Python | src/utils/scripts/setRelay.py | wooWoow/koa2-mysql-framework | 0a63843576b0307ccdb5f5ed64a88f7cb9e32291 | [
"MIT"
] | null | null | null | src/utils/scripts/setRelay.py | wooWoow/koa2-mysql-framework | 0a63843576b0307ccdb5f5ed64a88f7cb9e32291 | [
"MIT"
] | null | null | null | src/utils/scripts/setRelay.py | wooWoow/koa2-mysql-framework | 0a63843576b0307ccdb5f5ed64a88f7cb9e32291 | [
"MIT"
] | null | null | null | # coding=utf-8
import RPi.GPIO as GPIO #引入RPi.GPIO模块,并实例化为GPIO,简化后面的模块调用
import time #引入time模块
import sys
GPIO.setmode(GPIO.BOARD) #定义GPIO编码方式
print(len(sys.argv))
if (len(sys.argv) == 3):
param1 = 1 if sys.argv[1].lower() == 'false' else 0
param2 = 1 if sys.argv[2].lower() == 'false' else 0
GPIO.setup(31, GPIO.OUT) #将GPIO设置为输出模式
GPIO.setup(33, GPIO.OUT) #将GPIO设置为输出模式
GPIO.output(31, param1)
GPIO.output(33, param2)
else:
print('参数不足') | 27.823529 | 59 | 0.668076 | # coding=utf-8
import RPi.GPIO as GPIO #引入RPi.GPIO模块,并实例化为GPIO,简化后面的模块调用
import time #引入time模块
import sys
GPIO.setmode(GPIO.BOARD) #定义GPIO编码方式
print(len(sys.argv))
if (len(sys.argv) == 3):
param1 = 1 if sys.argv[1].lower() == 'false' else 0
param2 = 1 if sys.argv[2].lower() == 'false' else 0
GPIO.setup(31, GPIO.OUT) #将GPIO设置为输出模式
GPIO.setup(33, GPIO.OUT) #将GPIO设置为输出模式
GPIO.output(31, param1)
GPIO.output(33, param2)
else:
print('参数不足') | 0 | 0 | 0 |
9692dc8ae7941f96d6737b187a6177dca31f2a9d | 1,981 | py | Python | preprocessings.py | diningphilosophers5/Attendance-System-II | a26f5556e16376ad0c40d02fb4864bdede313bad | [
"MIT"
] | null | null | null | preprocessings.py | diningphilosophers5/Attendance-System-II | a26f5556e16376ad0c40d02fb4864bdede313bad | [
"MIT"
] | null | null | null | preprocessings.py | diningphilosophers5/Attendance-System-II | a26f5556e16376ad0c40d02fb4864bdede313bad | [
"MIT"
] | null | null | null | import argparse
import cv2
import numpy as np
import os
import random
import shutil
import openface
import openface.helper
from openface.data import iterImgs
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '.', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
| 30.476923 | 99 | 0.654215 | import argparse
import cv2
import numpy as np
import os
import random
import shutil
import openface
import openface.helper
from openface.data import iterImgs
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '.', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def write(vals, fName):
if os.path.isfile(fName):
print("{} exists. Backing up.".format(fName))
os.rename(fName, "{}.bak".format(fName))
with open(fName, 'w') as f:
for p in vals:
f.write(",".join(str(x) for x in p))
f.write("\n")
def alignMain(outputDir, inputDir):
openface.helper.mkdirP(outputDir)
size = 96
imgs = list(iterImgs(inputDir))
# Shuffle so multiple versions can be run at once.
random.shuffle(imgs)
landmarkMap = {
'outerEyesAndNose': openface.AlignDlib.OUTER_EYES_AND_NOSE,
'innerEyesAndBottomLip': openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP
}
landmarkIndices = landmarkMap['outerEyesAndNose']
dlibFacePredictor = os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat")
align = openface.AlignDlib(dlibFacePredictor)
nFallbacks = 0
for imgObject in imgs:
print("=== {} ===".format(imgObject.path))
outDir = os.path.join(outputDir, imgObject.cls)
openface.helper.mkdirP(outDir)
outputPrefix = os.path.join(outDir, imgObject.name)
imgName = outputPrefix + ".png"
rgb = imgObject.getRGB()
if rgb is None:
outRgb = None
else:
outRgb = align.align(size, rgb,
landmarkIndices=landmarkIndices)
if outRgb is not None:
outBgr = cv2.cvtColor(outRgb, cv2.COLOR_RGB2BGR)
cv2.imwrite(imgName, outBgr)
def run_script():
os.system("./batch-represent/main.lua -outDir ./generated-embeddings/ -data ./aligned-images/") | 1,549 | 0 | 69 |
e04c45c0ce61a4e19d0552a4783620f7e54a2967 | 1,154 | py | Python | lambdata_phatdeluxe/df_utils.py | Phatdeluxe/lambdata_unit3 | 1e95e8500556787622448ac7ca24ab968db98ae9 | [
"MIT"
] | null | null | null | lambdata_phatdeluxe/df_utils.py | Phatdeluxe/lambdata_unit3 | 1e95e8500556787622448ac7ca24ab968db98ae9 | [
"MIT"
] | 3 | 2020-03-31T09:22:59.000Z | 2021-06-02T00:50:59.000Z | lambdata_phatdeluxe/df_utils.py | Phatdeluxe/lambdata_unit3 | 1e95e8500556787622448ac7ca24ab968db98ae9 | [
"MIT"
] | null | null | null | """
utility functions for working with DataFrames
"""
import pandas as pd
from sklearn.model_selection import train_test_split
class Data:
"""
For use with dataframes and the many things you want to do to them
"""
def check_null(self):
"""
Prints the columns with and ammounts of null values
"""
columns = self.df.columns
null_list = []
for column in columns:
if self.df[column].isnull().sum() > 0:
null_list.append({column: self.df[column].isnull().sum()})
for i in range(0, len(null_list)):
print(null_list[i], '\n')
def split(self):
"""
Makes a train, val, and test split from one dataframe
"""
train, test = train_test_split(self.df, random_state=42, test_size=0.2)
train, val = train_test_split(train, random_state=42, test_size=0.2)
return train, val, test
def add_to_df(to_series, name, df):
"""
Takes a list and adds it to a dataframe as a new columns
"""
new_col = pd.Series(to_series)
df[name] = new_col
| 26.837209 | 79 | 0.601386 | """
utility functions for working with DataFrames
"""
import pandas as pd
from sklearn.model_selection import train_test_split
class Data:
"""
For use with dataframes and the many things you want to do to them
"""
def __init__(self, df):
self.df = df
def check_null(self):
"""
Prints the columns with and ammounts of null values
"""
columns = self.df.columns
null_list = []
for column in columns:
if self.df[column].isnull().sum() > 0:
null_list.append({column: self.df[column].isnull().sum()})
for i in range(0, len(null_list)):
print(null_list[i], '\n')
def split(self):
"""
Makes a train, val, and test split from one dataframe
"""
train, test = train_test_split(self.df, random_state=42, test_size=0.2)
train, val = train_test_split(train, random_state=42, test_size=0.2)
return train, val, test
def add_to_df(to_series, name, df):
"""
Takes a list and adds it to a dataframe as a new columns
"""
new_col = pd.Series(to_series)
df[name] = new_col
| 23 | 0 | 26 |
5da0193f9398e22f438f85011b08e4509d8a7d9d | 17,665 | py | Python | pelicun/auto.py | kuanshi/pelicun | fd89bb13a337faba61247f7ba1346baa25c33d96 | [
"BSD-3-Clause"
] | 20 | 2019-03-01T21:51:15.000Z | 2022-02-25T11:51:13.000Z | pelicun/auto.py | kuanshi/pelicun | fd89bb13a337faba61247f7ba1346baa25c33d96 | [
"BSD-3-Clause"
] | 7 | 2018-12-04T18:55:39.000Z | 2021-09-26T00:20:34.000Z | pelicun/auto.py | kuanshi/pelicun | fd89bb13a337faba61247f7ba1346baa25c33d96 | [
"BSD-3-Clause"
] | 14 | 2018-10-26T21:48:44.000Z | 2022-02-28T01:53:40.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# Adam Zsarnóczay
"""
This module has classes and methods that auto-populate DL models.
.. rubric:: Contents
.. autosummary::
auto_populate
"""
from .base import *
import importlib
import json
from pathlib import Path
ap_DesignLevel = {
1940: 'Pre-Code',
1940: 'Low-Code',
1975: 'Moderate-Code',
2100: 'High-Code'
}
ap_DesignLevel_W1 = {
0: 'Pre-Code',
0: 'Low-Code',
1975: 'Moderate-Code',
2100: 'High-Code'
}
ap_Occupancy = {
'Other/Unknown': 'RES3',
'Residential - Single-Family': 'RES1',
'Residential - Town-Home': 'RES3',
'Residential - Multi-Family': 'RES3',
'Residential - Mixed Use': 'RES3',
'Office': 'COM4',
'Hotel': 'RES4',
'School': 'EDU1',
'Industrial - Light': 'IND2',
'Industrial - Warehouse': 'IND2',
'Industrial - Heavy': 'IND1',
'Retail': 'COM1',
'Parking' : 'COM10'
}
convert_design_level = {
'High-Code' : 'HC',
'Moderate-Code': 'MC',
'Low-Code' : 'LC',
'Pre-Code' : 'PC'
}
def auto_populate(DL_input_path, EDP_input_path,
DL_method, realization_count, coupled_EDP, event_time,
ground_failure, auto_script_path = None):
"""
Short description
Assumptions:
- The BIM is stored under 'GeneralInformation' or 'GI' in the root of
DL_input
Parameters
----------
DL_input_path:
Returns
-------
DL_input
DL_ap_path
"""
# load the available DL input information
with open(DL_input_path, 'r') as f:
DL_input = json.load(f)
# get the BIM data
BIM = DL_input.get('GeneralInformation', None)
if BIM is None:
raise ValueError(
"No Building Information provided for the auto-population routine."
)
if auto_script_path is not None: # if an external auto pop script is provided
# load the module
ASP = Path(auto_script_path).resolve()
sys.path.insert(0, str(ASP.parent)+'/')
auto_script = importlib.__import__(ASP.name[:-3], globals(), locals(), [], 0)
auto_populate_ext = auto_script.auto_populate
# generate the DL input data
BIM_ap, DL_ap = auto_populate_ext(BIM = BIM)
# add the response model information
DL_ap.update({
'ResponseModel': {
'ResponseDescription': {
'EDP_Distribution': 'empirical',
'Realizations' : realization_count
}
}
})
# add the even time information - if needed
if (('Inhabitants' in DL_ap['LossModel'].keys()) and
(event_time is not None)):
DL_ap['LossModel']['Inhabitants'].update({'EventTime': event_time})
# assemble the extended DL input
DL_input['GeneralInformation'].update(BIM_ap)
DL_input.update({'DamageAndLoss': DL_ap})
# save it to the DL file with the ap suffix
DL_ap_path = DL_input_path[:-5] + '_ap.json'
with open(DL_ap_path, 'w') as f:
json.dump(DL_input, f, indent=2)
# and also return these information
return DL_input, DL_ap_path
else: # otherwise, use the old autopop method
EDP_input = pd.read_csv(EDP_input_path, sep='\s+', header=0,
index_col=0)
is_IM_based = DL_method[-2:] == 'IM'
stories = BIM['NumberOfStories']
# use only 1 story if DM is based on IM
if DL_method == 'HAZUS MH EQ IM':
stories = 1
BIM.update({'NumberOfStories':stories})
# HAZUS Earthquake
if DL_method in ['HAZUS MH EQ', 'HAZUS MH EQ IM']:
bt = BIM['StructureType']
if bt == 'RV.structType':
bt = EDP_input['structType'].values[0]
year_built = BIM['YearBuilt']
if bt not in ['W1', 'W2', 'S3', 'PC1', 'MH']:
if bt not in ['URM']:
if stories <= 3:
bt += 'L'
elif stories <= 7:
bt += 'M'
else:
if bt in ['RM']:
bt += 'M'
else:
bt += 'H'
else:
if stories <= 2:
bt += 'L'
else:
bt += 'M'
if BIM['OccupancyClass'] in ap_Occupancy.keys():
ot = ap_Occupancy[BIM['OccupancyClass']]
else:
ot = BIM['OccupancyClass']
replacementCost = BIM.get('ReplacementCost', 1.0)
replacementTime = BIM.get('ReplacementTime', 1.0)
population = BIM.get('Population', 1.0)
loss_dict = {
'_method': DL_method,
'DamageModel': {
'StructureType': bt
},
'LossModel': {
'DecisionVariables': {
'ReconstructionCost': True,
'ReconstructionTime': True,
'Injuries': True
},
'Inhabitants': {
'OccupancyType': ot,
'PeakPopulation': f'{population}'
},
'ReplacementCost': replacementCost,
'ReplacementTime': replacementTime
},
'ResponseModel': {
'ResponseDescription': {
'Realizations': realization_count,
"CoupledAssessment": coupled_EDP
}
},
"Dependencies": {
"Fragilities": "btw. Performance Groups"
}
}
# add uncertainty if the EDPs are not coupled
if not coupled_EDP:
loss_dict['ResponseModel'].update({
"AdditionalUncertainty": {
"GroundMotion": "0.10",
"Modeling" : "0.20"
}})
if is_IM_based:
loss_dict.update({
"ComponentDataFolder": pelicun_path+"/resources/HAZUS_MH_2.1_EQ_eqv_PGA.hdf"
})
else:
loss_dict['ResponseModel'].update({
'DetectionLimits': {
"PID": "0.20",
"PRD": "0.20"
}})
loss_dict.update({
"ComponentDataFolder": pelicun_path+"/resources/HAZUS_MH_2.1_EQ_story.hdf"
})
if 'W1' in bt:
DesignL = ap_DesignLevel_W1
else:
DesignL = ap_DesignLevel
for year in sorted(DesignL.keys()):
if year_built <= year:
loss_dict['DamageModel'].update(
{'DesignLevel': DesignL[year]})
break
dl = convert_design_level[loss_dict['DamageModel']['DesignLevel']]
if 'C3' in bt:
if dl not in ['LC', 'PC']:
dl = 'LC'
# only one structural component for IM-based approach
if is_IM_based:
FG_S = f'S-{bt}-{dl}-{ot}'
loss_dict.update({
'Components': {
FG_S: [
{'location': '1',
'direction': '1',
'median_quantity': '1.0',
'unit': 'ea',
'distribution': 'N/A'
}]
}})
# story-based approach
else:
FG_S = f'S-{bt}-{dl}-{ot}'
FG_NSD = f'NSD-{ot}'
FG_NSA = f'NSA-{dl}-{ot}'
loss_dict.update({
'Components': {
FG_S: [
{'location': 'all',
'direction': '1, 2',
#'median_quantity': '{q}'.format(q = 0.5), #/stories),
'median_quantity': '{q}'.format(q = story_scale(stories, 'S')/stories/2.),
'unit': 'ea',
'distribution': 'N/A'
}],
FG_NSA: [
{'location': 'all',
'direction': '1',
#'median_quantity': '{q}'.format(q = 1.0), #/stories),
'median_quantity': '{q}'.format(q = story_scale(stories, 'NSA')/stories),
'unit': 'ea',
'distribution': 'N/A'
}],
FG_NSD: [
{'location': 'all',
'direction': '1, 2',
#'median_quantity': '{q}'.format(q = 0.5), #/stories),
'median_quantity': '{q}'.format(q = story_scale(stories, 'NSD')/stories/2.),
'unit': 'ea',
'distribution': 'N/A'
}]
}})
# if damage from ground failure is included
if ground_failure:
foundation_type = 'S'
FG_GF_H = f'GF-H_{foundation_type}-{bt}'
FG_GF_V = f'GF-V_{foundation_type}-{bt}'
loss_dict['Components'].update({
FG_GF_H: [
{'location': '1',
'direction': '1',
'median_quantity': '1.0',
'unit': 'ea',
'distribution': 'N/A'
}],
FG_GF_V: [
{'location': '1',
'direction': '3',
'median_quantity': '1.0',
'unit': 'ea',
'distribution': 'N/A'
}]
})
# define logic that connects ground failure with building damage
loss_dict.update({
'DamageLogic': [
{'type': 'propagate',
'source_FG': FG_GF_H,
'target_FG': FG_S,
'DS_links': {
'1_1': '3_1',
'2_1': '4_1',
'2_2': '4_2'
}
},
{'type': 'propagate',
'source_FG': FG_GF_V,
'target_FG': FG_S,
'DS_links': {
'1_1': '3_1',
'2_1': '4_1',
'2_2': '4_2'
}
}
]
})
# HAZUS Hurricane
elif DL_method == 'HAZUS MH HU':
#TODO: use the HU NJ autopop script by default
pass
elif DL_method == 'FEMA P58':
if BIM.get('AssetType',None) == 'Water_Pipe':
material = BIM['Material']
if material in ['Asbestos cement', 'Cast iron']:
# brittle material
config = 'P0001a'
else:
# ductile material
config = 'P0001b'
segment_count = BIM['SegmentCount']
segment_length = BIM['Segments'][0]['length']
cg_count = int(segment_length / (100 * ft))
quantities = '1'
for s in range(1, cg_count):
quantities += ', 1'
loss_dict = {
"_method" : "FEMA P58",
"ResponseModel" : {
"ResponseDescription": {
"EDP_Distribution" : "empirical",
"Realizations" : "1000", # need to fix this later
"CoupledAssessment": True
}
},
"DamageModel" : {
"CollapseProbability": {
"Value": "0.0",
},
},
"LossModel" : {
"ReplacementCost" : "1",
"ReplacementTime" : "180",
"DecisionVariables": {
"Injuries" : False,
"ReconstructionCost": True,
"ReconstructionTime": True,
"RedTag" : False
},
},
"Dependencies" : {
"CostAndTime" : True,
"Fragilities" : "btw. Damage States",
"Quantities" : "Independent",
"ReconstructionCosts": "Independent",
"ReconstructionTimes": "Independent",
},
"ComponentDataFolder": "c:/Adam/Dropbox/Kutatas/2019 SC Testbeds/Memphis/",
"Components" : {
config: [
{
"location" : "all",
"direction" : "1",
"median_quantity": quantities,
"unit" : "ea",
"distribution" : "N/A",
}
],
}
}
if (('Inhabitants' in loss_dict['LossModel'].keys()) and
(event_time is not None)):
loss_dict['LossModel']['Inhabitants'].update({'EventTime': event_time})
DL_input.update({'DamageAndLoss':loss_dict})
DL_ap_path = DL_input_path[:-5]+'_ap.json'
with open(DL_ap_path, 'w') as f:
json.dump(DL_input, f, indent = 2)
return DL_input, DL_ap_path
| 34.300971 | 105 | 0.441891 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# Adam Zsarnóczay
"""
This module has classes and methods that auto-populate DL models.
.. rubric:: Contents
.. autosummary::
auto_populate
"""
from .base import *
import importlib
import json
from pathlib import Path
ap_DesignLevel = {
1940: 'Pre-Code',
1940: 'Low-Code',
1975: 'Moderate-Code',
2100: 'High-Code'
}
ap_DesignLevel_W1 = {
0: 'Pre-Code',
0: 'Low-Code',
1975: 'Moderate-Code',
2100: 'High-Code'
}
ap_Occupancy = {
'Other/Unknown': 'RES3',
'Residential - Single-Family': 'RES1',
'Residential - Town-Home': 'RES3',
'Residential - Multi-Family': 'RES3',
'Residential - Mixed Use': 'RES3',
'Office': 'COM4',
'Hotel': 'RES4',
'School': 'EDU1',
'Industrial - Light': 'IND2',
'Industrial - Warehouse': 'IND2',
'Industrial - Heavy': 'IND1',
'Retail': 'COM1',
'Parking' : 'COM10'
}
convert_design_level = {
'High-Code' : 'HC',
'Moderate-Code': 'MC',
'Low-Code' : 'LC',
'Pre-Code' : 'PC'
}
def story_scale(stories, comp_type):
if comp_type == 'NSA':
if stories == 1:
return 1.00
elif stories == 2:
return 1.22
elif stories == 3:
return 1.40
elif stories == 4:
return 1.45
elif stories == 5:
return 1.50
elif stories == 6:
return 1.90
elif stories == 7:
return 2.05
elif stories == 8:
return 2.15
elif stories == 9:
return 2.20
elif (stories >= 10) and (stories < 30):
return 2.30 + (stories-10)*0.04
elif stories >= 30:
return 3.10
else:
return 1.0
elif comp_type in ['S', 'NSD']:
if stories == 1:
return 1.45
elif stories == 2:
return 1.90
elif stories == 3:
return 2.50
elif stories == 4:
return 2.75
elif stories == 5:
return 3.00
elif stories == 6:
return 3.50
elif stories == 7:
return 3.50
elif stories == 8:
return 3.50
elif stories == 9:
return 4.50
elif (stories >= 10) and (stories < 50):
return 4.50 + (stories-10)*0.07
elif stories >= 50:
return 7.30
else:
return 1.0
def auto_populate(DL_input_path, EDP_input_path,
DL_method, realization_count, coupled_EDP, event_time,
ground_failure, auto_script_path = None):
"""
Short description
Assumptions:
- The BIM is stored under 'GeneralInformation' or 'GI' in the root of
DL_input
Parameters
----------
DL_input_path:
Returns
-------
DL_input
DL_ap_path
"""
# load the available DL input information
with open(DL_input_path, 'r') as f:
DL_input = json.load(f)
# get the BIM data
BIM = DL_input.get('GeneralInformation', None)
if BIM is None:
raise ValueError(
"No Building Information provided for the auto-population routine."
)
if auto_script_path is not None: # if an external auto pop script is provided
# load the module
ASP = Path(auto_script_path).resolve()
sys.path.insert(0, str(ASP.parent)+'/')
auto_script = importlib.__import__(ASP.name[:-3], globals(), locals(), [], 0)
auto_populate_ext = auto_script.auto_populate
# generate the DL input data
BIM_ap, DL_ap = auto_populate_ext(BIM = BIM)
# add the response model information
DL_ap.update({
'ResponseModel': {
'ResponseDescription': {
'EDP_Distribution': 'empirical',
'Realizations' : realization_count
}
}
})
# add the even time information - if needed
if (('Inhabitants' in DL_ap['LossModel'].keys()) and
(event_time is not None)):
DL_ap['LossModel']['Inhabitants'].update({'EventTime': event_time})
# assemble the extended DL input
DL_input['GeneralInformation'].update(BIM_ap)
DL_input.update({'DamageAndLoss': DL_ap})
# save it to the DL file with the ap suffix
DL_ap_path = DL_input_path[:-5] + '_ap.json'
with open(DL_ap_path, 'w') as f:
json.dump(DL_input, f, indent=2)
# and also return these information
return DL_input, DL_ap_path
else: # otherwise, use the old autopop method
EDP_input = pd.read_csv(EDP_input_path, sep='\s+', header=0,
index_col=0)
is_IM_based = DL_method[-2:] == 'IM'
stories = BIM['NumberOfStories']
# use only 1 story if DM is based on IM
if DL_method == 'HAZUS MH EQ IM':
stories = 1
BIM.update({'NumberOfStories':stories})
# HAZUS Earthquake
if DL_method in ['HAZUS MH EQ', 'HAZUS MH EQ IM']:
bt = BIM['StructureType']
if bt == 'RV.structType':
bt = EDP_input['structType'].values[0]
year_built = BIM['YearBuilt']
if bt not in ['W1', 'W2', 'S3', 'PC1', 'MH']:
if bt not in ['URM']:
if stories <= 3:
bt += 'L'
elif stories <= 7:
bt += 'M'
else:
if bt in ['RM']:
bt += 'M'
else:
bt += 'H'
else:
if stories <= 2:
bt += 'L'
else:
bt += 'M'
if BIM['OccupancyClass'] in ap_Occupancy.keys():
ot = ap_Occupancy[BIM['OccupancyClass']]
else:
ot = BIM['OccupancyClass']
replacementCost = BIM.get('ReplacementCost', 1.0)
replacementTime = BIM.get('ReplacementTime', 1.0)
population = BIM.get('Population', 1.0)
loss_dict = {
'_method': DL_method,
'DamageModel': {
'StructureType': bt
},
'LossModel': {
'DecisionVariables': {
'ReconstructionCost': True,
'ReconstructionTime': True,
'Injuries': True
},
'Inhabitants': {
'OccupancyType': ot,
'PeakPopulation': f'{population}'
},
'ReplacementCost': replacementCost,
'ReplacementTime': replacementTime
},
'ResponseModel': {
'ResponseDescription': {
'Realizations': realization_count,
"CoupledAssessment": coupled_EDP
}
},
"Dependencies": {
"Fragilities": "btw. Performance Groups"
}
}
# add uncertainty if the EDPs are not coupled
if not coupled_EDP:
loss_dict['ResponseModel'].update({
"AdditionalUncertainty": {
"GroundMotion": "0.10",
"Modeling" : "0.20"
}})
if is_IM_based:
loss_dict.update({
"ComponentDataFolder": pelicun_path+"/resources/HAZUS_MH_2.1_EQ_eqv_PGA.hdf"
})
else:
loss_dict['ResponseModel'].update({
'DetectionLimits': {
"PID": "0.20",
"PRD": "0.20"
}})
loss_dict.update({
"ComponentDataFolder": pelicun_path+"/resources/HAZUS_MH_2.1_EQ_story.hdf"
})
if 'W1' in bt:
DesignL = ap_DesignLevel_W1
else:
DesignL = ap_DesignLevel
for year in sorted(DesignL.keys()):
if year_built <= year:
loss_dict['DamageModel'].update(
{'DesignLevel': DesignL[year]})
break
dl = convert_design_level[loss_dict['DamageModel']['DesignLevel']]
if 'C3' in bt:
if dl not in ['LC', 'PC']:
dl = 'LC'
# only one structural component for IM-based approach
if is_IM_based:
FG_S = f'S-{bt}-{dl}-{ot}'
loss_dict.update({
'Components': {
FG_S: [
{'location': '1',
'direction': '1',
'median_quantity': '1.0',
'unit': 'ea',
'distribution': 'N/A'
}]
}})
# story-based approach
else:
FG_S = f'S-{bt}-{dl}-{ot}'
FG_NSD = f'NSD-{ot}'
FG_NSA = f'NSA-{dl}-{ot}'
loss_dict.update({
'Components': {
FG_S: [
{'location': 'all',
'direction': '1, 2',
#'median_quantity': '{q}'.format(q = 0.5), #/stories),
'median_quantity': '{q}'.format(q = story_scale(stories, 'S')/stories/2.),
'unit': 'ea',
'distribution': 'N/A'
}],
FG_NSA: [
{'location': 'all',
'direction': '1',
#'median_quantity': '{q}'.format(q = 1.0), #/stories),
'median_quantity': '{q}'.format(q = story_scale(stories, 'NSA')/stories),
'unit': 'ea',
'distribution': 'N/A'
}],
FG_NSD: [
{'location': 'all',
'direction': '1, 2',
#'median_quantity': '{q}'.format(q = 0.5), #/stories),
'median_quantity': '{q}'.format(q = story_scale(stories, 'NSD')/stories/2.),
'unit': 'ea',
'distribution': 'N/A'
}]
}})
# if damage from ground failure is included
if ground_failure:
foundation_type = 'S'
FG_GF_H = f'GF-H_{foundation_type}-{bt}'
FG_GF_V = f'GF-V_{foundation_type}-{bt}'
loss_dict['Components'].update({
FG_GF_H: [
{'location': '1',
'direction': '1',
'median_quantity': '1.0',
'unit': 'ea',
'distribution': 'N/A'
}],
FG_GF_V: [
{'location': '1',
'direction': '3',
'median_quantity': '1.0',
'unit': 'ea',
'distribution': 'N/A'
}]
})
# define logic that connects ground failure with building damage
loss_dict.update({
'DamageLogic': [
{'type': 'propagate',
'source_FG': FG_GF_H,
'target_FG': FG_S,
'DS_links': {
'1_1': '3_1',
'2_1': '4_1',
'2_2': '4_2'
}
},
{'type': 'propagate',
'source_FG': FG_GF_V,
'target_FG': FG_S,
'DS_links': {
'1_1': '3_1',
'2_1': '4_1',
'2_2': '4_2'
}
}
]
})
# HAZUS Hurricane
elif DL_method == 'HAZUS MH HU':
#TODO: use the HU NJ autopop script by default
pass
elif DL_method == 'FEMA P58':
if BIM.get('AssetType',None) == 'Water_Pipe':
material = BIM['Material']
if material in ['Asbestos cement', 'Cast iron']:
# brittle material
config = 'P0001a'
else:
# ductile material
config = 'P0001b'
segment_count = BIM['SegmentCount']
segment_length = BIM['Segments'][0]['length']
cg_count = int(segment_length / (100 * ft))
quantities = '1'
for s in range(1, cg_count):
quantities += ', 1'
loss_dict = {
"_method" : "FEMA P58",
"ResponseModel" : {
"ResponseDescription": {
"EDP_Distribution" : "empirical",
"Realizations" : "1000", # need to fix this later
"CoupledAssessment": True
}
},
"DamageModel" : {
"CollapseProbability": {
"Value": "0.0",
},
},
"LossModel" : {
"ReplacementCost" : "1",
"ReplacementTime" : "180",
"DecisionVariables": {
"Injuries" : False,
"ReconstructionCost": True,
"ReconstructionTime": True,
"RedTag" : False
},
},
"Dependencies" : {
"CostAndTime" : True,
"Fragilities" : "btw. Damage States",
"Quantities" : "Independent",
"ReconstructionCosts": "Independent",
"ReconstructionTimes": "Independent",
},
"ComponentDataFolder": "c:/Adam/Dropbox/Kutatas/2019 SC Testbeds/Memphis/",
"Components" : {
config: [
{
"location" : "all",
"direction" : "1",
"median_quantity": quantities,
"unit" : "ea",
"distribution" : "N/A",
}
],
}
}
if (('Inhabitants' in loss_dict['LossModel'].keys()) and
(event_time is not None)):
loss_dict['LossModel']['Inhabitants'].update({'EventTime': event_time})
DL_input.update({'DamageAndLoss':loss_dict})
DL_ap_path = DL_input_path[:-5]+'_ap.json'
with open(DL_ap_path, 'w') as f:
json.dump(DL_input, f, indent = 2)
return DL_input, DL_ap_path
| 1,357 | 0 | 23 |
7c9f99c50114d97b564e8565ee1149e9919f9ee9 | 748 | py | Python | test/db.py | zsffq999/helloworld | aba6b4bcbae496766bf371681fe126d11051d40b | [
"Apache-2.0"
] | null | null | null | test/db.py | zsffq999/helloworld | aba6b4bcbae496766bf371681fe126d11051d40b | [
"Apache-2.0"
] | null | null | null | test/db.py | zsffq999/helloworld | aba6b4bcbae496766bf371681fe126d11051d40b | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey, create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine('mysql+pymysql://macd:macd@localhost/test')
User.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
for i in range(10):
user = User("user%d" % i)
session.add(user)
session.commit()
| 22.666667 | 74 | 0.677807 | # -*- coding:utf-8 -*-
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey, create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
def __init__(self, name):
self.name = name
id = Column(Integer, primary_key=True)
name = Column(String(20))
fullname = Column(String(50))
password = Column(String(50))
engine = create_engine('mysql+pymysql://macd:macd@localhost/test')
User.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
for i in range(10):
user = User("user%d" % i)
session.add(user)
session.commit()
| 30 | 200 | 25 |
1c5e8a4827bffafcc15a78439f8ca216f768e5de | 844 | py | Python | facts.d/get_extensions_details.py | akondasif/ZendServerPuppet | e165ebe23c70641334c58d46d899f059a99b252e | [
"Apache-2.0"
] | 6 | 2015-03-18T11:19:43.000Z | 2020-03-24T05:31:30.000Z | facts.d/get_extensions_details.py | akondasif/ZendServerPuppet | e165ebe23c70641334c58d46d899f059a99b252e | [
"Apache-2.0"
] | 56 | 2015-02-23T07:03:59.000Z | 2022-03-22T22:00:33.000Z | facts.d/get_extensions_details.py | akondasif/ZendServerPuppet | e165ebe23c70641334c58d46d899f059a99b252e | [
"Apache-2.0"
] | 18 | 2015-02-19T20:06:24.000Z | 2022-02-22T08:08:18.000Z | #!/usr/bin/python
import json
import os.path
import re
import subprocess
zs_api_config_file = '/.zsapi.ini'
zs_api_target = 'localadmin'
if os.path.isfile("/usr/local/zend/bin/zs-client.sh"):
extensions_details = subprocess.check_output(["/usr/local/zend/bin/zs-client.sh", "configurationExtensionsList", "--target=localadmin", "--output-format=json"])
## Strip the PHP notices from the json
extensions_details = re.sub("Notice:.*\n", "", extensions_details)
## Strip the newlines from the json
extensions_details = re.sub("\n", "", extensions_details)
arr = json.loads(extensions_details)
for extension in arr[u"responseData"][u"extensions"]:
name = extension["name"]
for key, value in extension.iteritems():
if not isinstance(value, list):
print ('zend_extension_' + key + '_' + name + '=' + value)
| 31.259259 | 162 | 0.701422 | #!/usr/bin/python
import json
import os.path
import re
import subprocess
zs_api_config_file = '/.zsapi.ini'
zs_api_target = 'localadmin'
if os.path.isfile("/usr/local/zend/bin/zs-client.sh"):
extensions_details = subprocess.check_output(["/usr/local/zend/bin/zs-client.sh", "configurationExtensionsList", "--target=localadmin", "--output-format=json"])
## Strip the PHP notices from the json
extensions_details = re.sub("Notice:.*\n", "", extensions_details)
## Strip the newlines from the json
extensions_details = re.sub("\n", "", extensions_details)
arr = json.loads(extensions_details)
for extension in arr[u"responseData"][u"extensions"]:
name = extension["name"]
for key, value in extension.iteritems():
if not isinstance(value, list):
print ('zend_extension_' + key + '_' + name + '=' + value)
| 0 | 0 | 0 |
22355443f41286a7404b51bd88925041c91ec650 | 3,378 | py | Python | tensorflow/python/tpu/tests/tpu_embedding_v2_hd_invalid_input_test.py | Stevanus-Christian/tensorflow | d44afcf5ca16c5d704c66f891b99eac804e7cd14 | [
"Apache-2.0"
] | 3 | 2022-03-09T01:39:56.000Z | 2022-03-30T23:17:58.000Z | tensorflow/python/tpu/tests/tpu_embedding_v2_hd_invalid_input_test.py | Stevanus-Christian/tensorflow | d44afcf5ca16c5d704c66f891b99eac804e7cd14 | [
"Apache-2.0"
] | 1 | 2020-08-01T05:40:12.000Z | 2020-08-01T05:40:12.000Z | tensorflow/python/tpu/tests/tpu_embedding_v2_hd_invalid_input_test.py | Stevanus-Christian/tensorflow | d44afcf5ca16c5d704c66f891b99eac804e7cd14 | [
"Apache-2.0"
] | 1 | 2022-03-22T00:45:15.000Z | 2022-03-22T00:45:15.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.platform import test
from tensorflow.python.tpu.tests import tpu_embedding_base_test
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| 40.214286 | 80 | 0.721433 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.platform import test
from tensorflow.python.tpu.tests import tpu_embedding_base_test
class TPUEmbeddingTest(tpu_embedding_base_test.TPUEmbeddingBaseTest):
def test_build_incorrect_output_shapes(self):
_, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
# Output shapes is set in the mid_level_api, but build with incorrect output
# shapes.
mid_level_api._output_shapes = [TensorShape((2, 4)) for _ in range(3)]
with self.assertRaisesRegex(ValueError,
'Inconsistent shape founded for input feature'):
mid_level_api.build([TensorShape([1, 1, 1]) for _ in range(3)])
def test_enqueue_incorrect_shape_feature(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_high_dimensional_sparse_dataset(strategy)
sparse_iter = iter(
strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
mid_level_api._output_shapes = [TensorShape((1, 1)) for _ in range(3)]
# The output shape passed to build method is consistent.
mid_level_api.build([TensorShape([1, 1, 1]) for _ in range(3)])
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(sparse_iter), training=False)
return strategy.run(step)
# Enqueued tensor has shape inconsistent with the output shape setting.
with self.assertRaisesRegex(ValueError,
'Inconsistent shape founded for input feature'):
test_fn()
def test_not_fully_defined_output_shapes_in_feature_config(self):
_, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
# Feature config sets undefined output shapes
mid_level_api._output_shapes = [TensorShape(None) for _ in range(3)]
with self.assertRaisesRegex(ValueError, 'Input Feature'):
mid_level_api.build()
def test_not_fully_defined_output_shapes_for_build(self):
_, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
# Build with undefined output shape
with self.assertRaisesRegex(ValueError, 'Input Feature'):
mid_level_api.build([TensorShape([1, None, None]) for _ in range(3)])
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| 2,062 | 48 | 123 |
a6799c79a8ee7856f473650d48d520d46381fe53 | 1,211 | py | Python | utils/errors.py | thatoneolib/senko | 686d768f8bc0c69a874dba180abb85049ff473b9 | [
"MIT"
] | null | null | null | utils/errors.py | thatoneolib/senko | 686d768f8bc0c69a874dba180abb85049ff473b9 | [
"MIT"
] | null | null | null | utils/errors.py | thatoneolib/senko | 686d768f8bc0c69a874dba180abb85049ff473b9 | [
"MIT"
] | null | null | null | __all__ = ("QuietExit", "EmbedExit")
class QuietExit(Exception):
"""
An exception that is silently ignored by its error handler added
in :ref:`cogs_error_handlers`.
The primary purpose of this class is to allow a command to be exited
from within a nested call without having to propagate return values.
"""
pass
class EmbedExit(Exception):
r"""
An exception that can be used to show a custom error message.
The keyword arguments passed into the constructor of this
exception are propagated into :func:`~senko.CommandContext.embed`
by the error handler defined for this exception.
See :func:`~cogs.error_handlers.handlers.handle_embed_exit`.
Examples
--------
.. code-block:: python3
# Somewhere inside a command.
_ = ctx.locale
raise EmbedExit(
description=_("This is the embed description."),
fields=[dict(name=_("It is fully localized."), value=_("How neat!"))]
)
Parameters
----------
\*\*kwargs
The same keyword arguments as accepted by
:func:`~senko.CommandContext.embed`.
"""
| 25.229167 | 81 | 0.642444 | __all__ = ("QuietExit", "EmbedExit")
class QuietExit(Exception):
"""
An exception that is silently ignored by its error handler added
in :ref:`cogs_error_handlers`.
The primary purpose of this class is to allow a command to be exited
from within a nested call without having to propagate return values.
"""
pass
class EmbedExit(Exception):
r"""
An exception that can be used to show a custom error message.
The keyword arguments passed into the constructor of this
exception are propagated into :func:`~senko.CommandContext.embed`
by the error handler defined for this exception.
See :func:`~cogs.error_handlers.handlers.handle_embed_exit`.
Examples
--------
.. code-block:: python3
# Somewhere inside a command.
_ = ctx.locale
raise EmbedExit(
description=_("This is the embed description."),
fields=[dict(name=_("It is fully localized."), value=_("How neat!"))]
)
Parameters
----------
\*\*kwargs
The same keyword arguments as accepted by
:func:`~senko.CommandContext.embed`.
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
| 37 | 0 | 27 |
255158596cc8b558d83ff8ae233bca9480a6d9b2 | 1,012 | py | Python | test2/test_phb_ecn.py | sambhu025/Gihub_Actions_and_pyTest | 0eb076cf88b87567fad1b95d064564fc25179eda | [
"MIT"
] | null | null | null | test2/test_phb_ecn.py | sambhu025/Gihub_Actions_and_pyTest | 0eb076cf88b87567fad1b95d064564fc25179eda | [
"MIT"
] | null | null | null | test2/test_phb_ecn.py | sambhu025/Gihub_Actions_and_pyTest | 0eb076cf88b87567fad1b95d064564fc25179eda | [
"MIT"
] | null | null | null | import pytest
from abstract_open_traffic_generator.flow import *
from abstract_open_traffic_generator.flow_ipv4 import *
from abstract_open_traffic_generator.config import Config
from abstract_open_traffic_generator.control import *
def test_phb_ecn(serializer, api, tx_port, rx_port):
"""
This will test that phb and ecn are set on an ipv4 header
"""
port_endpoint = PortTxRx(tx_port_name=tx_port.name,
rx_port_name=rx_port.name)
dscp = Dscp(phb=Pattern([Dscp.PHB_CS2, Dscp.PHB_CS1, Dscp.PHB_CS5]),
ecn=Pattern(Dscp.ECN_CAPABLE_TRANSPORT_1))
priority = Priority(dscp)
ipv4 = Ipv4(priority=priority)
flow = Flow(name='Ipv4 with Phb and Ecn',
tx_rx=TxRx(port_endpoint),
packet=[Header(Ethernet()), Header(ipv4)])
config = Config(ports=[tx_port, rx_port], flows=[flow])
api.set_state(State(ConfigState(config=config, state='set')))
if __name__ == '__main__':
pytest.main(['-s', __file__])
| 37.481481 | 72 | 0.6917 | import pytest
from abstract_open_traffic_generator.flow import *
from abstract_open_traffic_generator.flow_ipv4 import *
from abstract_open_traffic_generator.config import Config
from abstract_open_traffic_generator.control import *
def test_phb_ecn(serializer, api, tx_port, rx_port):
"""
This will test that phb and ecn are set on an ipv4 header
"""
port_endpoint = PortTxRx(tx_port_name=tx_port.name,
rx_port_name=rx_port.name)
dscp = Dscp(phb=Pattern([Dscp.PHB_CS2, Dscp.PHB_CS1, Dscp.PHB_CS5]),
ecn=Pattern(Dscp.ECN_CAPABLE_TRANSPORT_1))
priority = Priority(dscp)
ipv4 = Ipv4(priority=priority)
flow = Flow(name='Ipv4 with Phb and Ecn',
tx_rx=TxRx(port_endpoint),
packet=[Header(Ethernet()), Header(ipv4)])
config = Config(ports=[tx_port, rx_port], flows=[flow])
api.set_state(State(ConfigState(config=config, state='set')))
if __name__ == '__main__':
pytest.main(['-s', __file__])
| 0 | 0 | 0 |
b0a989af5065776baa4aef79c3669d9eef7fd5a4 | 629 | py | Python | utuby/utuby.py | swipswaps/utuby | 464ec6c4b9093586efb8edbb04d2d19cb6f3759e | [
"MIT"
] | 2 | 2020-08-12T14:35:57.000Z | 2020-12-16T10:13:44.000Z | utuby/utuby.py | swipswaps/utuby | 464ec6c4b9093586efb8edbb04d2d19cb6f3759e | [
"MIT"
] | 2 | 2019-05-30T11:53:44.000Z | 2021-05-28T05:34:55.000Z | utuby/utuby.py | swipswaps/utuby | 464ec6c4b9093586efb8edbb04d2d19cb6f3759e | [
"MIT"
] | 3 | 2021-02-07T15:38:39.000Z | 2021-03-19T07:11:41.000Z | from utuby.youtube_video_info import info
from utuby.youtube_comments import comments
from utuby.utils import *
class youtube:
"""
Collects info. and comments from the multi-media content in YouTube when url is given.
:param youtubeid: Unique identification for every multimedia in YouTube.
"""
| 34.944444 | 130 | 0.683625 | from utuby.youtube_video_info import info
from utuby.youtube_comments import comments
from utuby.utils import *
class youtube:
"""
Collects info. and comments from the multi-media content in YouTube when url is given.
:param youtubeid: Unique identification for every multimedia in YouTube.
"""
def __init__(self, url):
start_time = datetime.now()
info.__init__(self, url)
comments.__init__(self, url)
time_delta = datetime.now() - start_time
print('\n' + str("Calculating time taken to extract info. of input url") + ": " + str(time_delta.seconds) + " seconds")
| 289 | 0 | 27 |
b4172a18badbbef588ff2f4a5bf1d36005c072d1 | 83 | py | Python | topics/modifierstate/ModifierType.py | CydrickT/HomeAutomation | e2a14d749c26a6dd0a96e5cdd8e6d715e57b75e3 | [
"MIT"
] | null | null | null | topics/modifierstate/ModifierType.py | CydrickT/HomeAutomation | e2a14d749c26a6dd0a96e5cdd8e6d715e57b75e3 | [
"MIT"
] | 3 | 2021-06-02T02:21:51.000Z | 2022-03-12T00:39:28.000Z | topics/modifierstate/ModifierType.py | CydrickT/HomeAutomation | e2a14d749c26a6dd0a96e5cdd8e6d715e57b75e3 | [
"MIT"
] | null | null | null | from enum import Enum
| 13.833333 | 25 | 0.686747 | from enum import Enum
class ModifierType(Enum):
Increase = 1
Decrease = 2 | 0 | 38 | 23 |
0897abad274eed1af347739ba694289f0926177f | 366 | py | Python | tests/index.py | ankingcodes/CP-CLI | 909cc1c2a0f651d0ed9bd0cff13b70e9b213ebd3 | [
"MIT"
] | 30 | 2020-07-03T15:59:17.000Z | 2021-08-30T05:51:17.000Z | tests/index.py | ankingcodes/CP-CLI | 909cc1c2a0f651d0ed9bd0cff13b70e9b213ebd3 | [
"MIT"
] | 83 | 2020-01-29T17:20:44.000Z | 2021-09-15T12:49:01.000Z | tests/index.py | ankingcodes/CP-CLI | 909cc1c2a0f651d0ed9bd0cff13b70e9b213ebd3 | [
"MIT"
] | 30 | 2020-11-26T07:52:57.000Z | 2021-09-19T06:32:53.000Z | import codemoninitdir
if __name__ == "__main__":
success, failed = 0, 0
# test 1
print("Running test 'Codemon init dir':")
if codemoninitdir.codemoninitdir() == 0: success += 1
else: failed += 1
# print test results
print("***** Test results *****")
print("Total: ", success + failed)
print("Success: ", success)
print("Failed: ", failed)
| 22.875 | 55 | 0.625683 | import codemoninitdir
if __name__ == "__main__":
success, failed = 0, 0
# test 1
print("Running test 'Codemon init dir':")
if codemoninitdir.codemoninitdir() == 0: success += 1
else: failed += 1
# print test results
print("***** Test results *****")
print("Total: ", success + failed)
print("Success: ", success)
print("Failed: ", failed)
| 0 | 0 | 0 |
708084b1ee1685f67a0b6c5fade84fa298134490 | 2,439 | py | Python | eval.py | CryptoSalamander/DeepFake-Detection | f3b1c95ce1955a4c203a9f3d1279c5fbade66684 | [
"MIT"
] | null | null | null | eval.py | CryptoSalamander/DeepFake-Detection | f3b1c95ce1955a4c203a9f3d1279c5fbade66684 | [
"MIT"
] | null | null | null | eval.py | CryptoSalamander/DeepFake-Detection | f3b1c95ce1955a4c203a9f3d1279c5fbade66684 | [
"MIT"
] | null | null | null | import argparse
import json
import os
import csv
import math
import numpy as np
from glob import glob
from pathlib import Path
from sklearn.metrics import log_loss
total = 0
correct = 0
true_positive = 0
true_negative = 0
false_positive = 0
false_negative = 0
if __name__=="__main__":
parser = argparse.ArgumentParser("Evaluate Models")
arg = parser.add_argument
arg('--fake-threshold', type=float, default=0.5, required=False, help="Fake Threshold")
arg('--real-threshold', type=float, default=0.5, required=False,
help="Real Threshold")
arg('--result-path', type=str, required=True, help="result file path")
arg('--answer-json', type=str, required=False, default="output.json", help="answer json")
args = parser.parse_args()
FAKE_thres = args.fake_threshold
REAL_thres = args.real_threshold
y = []
y_pred = []
with open(args.answer_json) as json_file:
json_data = json.load(json_file)
for csv_path in glob(os.path.join(args.result_path, "*.csv")):
dir = Path(csv_path).parent
with open(csv_path, "r") as f:
rdr = csv.reader(f)
next(rdr)
for line in rdr:
total += 1
json_object = json_data[line[0]]
if json_object['label'] == 'FAKE':
y.append(1)
y_pred.append(float(line[1]))
if float(line[1]) >= FAKE_thres:
correct += 1
true_positive += 1
else:
false_positive += 1
elif json_object['label'] == 'REAL':
y.append(0)
y_pred.append(float(line[1]))
if float(line[1]) < REAL_thres:
correct += 1
true_negative += 1
else:
false_negative += 1
precision = true_positive / (true_positive + false_positive)
recall = true_positive / (true_positive + false_negative)
print('Accuracy \t',correct/total)
print('Precision\t', precision)
print('Recall\t\t', recall)
print('F1 Score\t', 2*(precision * recall) / (precision + recall))
print('Fall-out\t', false_positive / (true_negative + false_positive))
print('Log-Loss\t', log_loss(y,y_pred)) | 38.109375 | 93 | 0.551866 | import argparse
import json
import os
import csv
import math
import numpy as np
from glob import glob
from pathlib import Path
from sklearn.metrics import log_loss
total = 0
correct = 0
true_positive = 0
true_negative = 0
false_positive = 0
false_negative = 0
if __name__=="__main__":
parser = argparse.ArgumentParser("Evaluate Models")
arg = parser.add_argument
arg('--fake-threshold', type=float, default=0.5, required=False, help="Fake Threshold")
arg('--real-threshold', type=float, default=0.5, required=False,
help="Real Threshold")
arg('--result-path', type=str, required=True, help="result file path")
arg('--answer-json', type=str, required=False, default="output.json", help="answer json")
args = parser.parse_args()
FAKE_thres = args.fake_threshold
REAL_thres = args.real_threshold
y = []
y_pred = []
with open(args.answer_json) as json_file:
json_data = json.load(json_file)
for csv_path in glob(os.path.join(args.result_path, "*.csv")):
dir = Path(csv_path).parent
with open(csv_path, "r") as f:
rdr = csv.reader(f)
next(rdr)
for line in rdr:
total += 1
json_object = json_data[line[0]]
if json_object['label'] == 'FAKE':
y.append(1)
y_pred.append(float(line[1]))
if float(line[1]) >= FAKE_thres:
correct += 1
true_positive += 1
else:
false_positive += 1
elif json_object['label'] == 'REAL':
y.append(0)
y_pred.append(float(line[1]))
if float(line[1]) < REAL_thres:
correct += 1
true_negative += 1
else:
false_negative += 1
precision = true_positive / (true_positive + false_positive)
recall = true_positive / (true_positive + false_negative)
print('Accuracy \t',correct/total)
print('Precision\t', precision)
print('Recall\t\t', recall)
print('F1 Score\t', 2*(precision * recall) / (precision + recall))
print('Fall-out\t', false_positive / (true_negative + false_positive))
print('Log-Loss\t', log_loss(y,y_pred)) | 0 | 0 | 0 |
fe09f275fc79fd3b33936f581aa4966b6dace6f3 | 781 | py | Python | scripts/anime_heads_data.py | cwza/deep_t2i | 22877fdd28ad407984ddc3bc4d57109c54c22fc0 | [
"Apache-2.0"
] | null | null | null | scripts/anime_heads_data.py | cwza/deep_t2i | 22877fdd28ad407984ddc3bc4d57109c54c22fc0 | [
"Apache-2.0"
] | null | null | null | scripts/anime_heads_data.py | cwza/deep_t2i | 22877fdd28ad407984ddc3bc4d57109c54c22fc0 | [
"Apache-2.0"
] | 1 | 2020-11-30T06:11:02.000Z | 2020-11-30T06:11:02.000Z | import os
import argparse
from pathlib import Path
parser = argparse.ArgumentParser(description='')
ori_data_dir = '../data/full_data'
'''
data_dir -
tmp -
imgs
tags.csv
'''
if __name__ == "__main__":
parser.add_argument('--data_dir', dest='data_dir', help='Please specify an empty folder', default='/root/data/anime_heads', type=str)
args = parser.parse_args()
data_dir = Path(args.data_dir)
tmp_dir = data_dir/'tmp'
os.system(f"mkdir -p {tmp_dir}")
os.system(f"cp {ori_data_dir}/data.zip {tmp_dir}")
os.system(f"7z x {tmp_dir/'data.zip'} -o{tmp_dir}")
os.system(f"mv {tmp_dir/'extra_data/images'} {data_dir/'imgs'}")
os.system(f"mv {tmp_dir/'extra_data/tags.csv'} {data_dir}")
os.system(f"rm -rf {tmp_dir}") | 30.038462 | 137 | 0.65557 | import os
import argparse
from pathlib import Path
parser = argparse.ArgumentParser(description='')
ori_data_dir = '../data/full_data'
'''
data_dir -
tmp -
imgs
tags.csv
'''
if __name__ == "__main__":
parser.add_argument('--data_dir', dest='data_dir', help='Please specify an empty folder', default='/root/data/anime_heads', type=str)
args = parser.parse_args()
data_dir = Path(args.data_dir)
tmp_dir = data_dir/'tmp'
os.system(f"mkdir -p {tmp_dir}")
os.system(f"cp {ori_data_dir}/data.zip {tmp_dir}")
os.system(f"7z x {tmp_dir/'data.zip'} -o{tmp_dir}")
os.system(f"mv {tmp_dir/'extra_data/images'} {data_dir/'imgs'}")
os.system(f"mv {tmp_dir/'extra_data/tags.csv'} {data_dir}")
os.system(f"rm -rf {tmp_dir}") | 0 | 0 | 0 |
30908500369f1530b583b67f3ffd12579e77fe00 | 697 | py | Python | setup.py | lalisalala/Music | 8a04bab596ca1853650952b4772eb4a8ccd17c5a | [
"MIT"
] | 1 | 2022-02-07T06:39:02.000Z | 2022-02-07T06:39:02.000Z | setup.py | lalisalala/Music | 8a04bab596ca1853650952b4772eb4a8ccd17c5a | [
"MIT"
] | null | null | null | setup.py | lalisalala/Music | 8a04bab596ca1853650952b4772eb4a8ccd17c5a | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="lisenta",
version="0.0.1",
author="Lisa-Yao Gan",
author_email="ga27bil@mytum.de",
description="A fun little program that lets you transcribe and generate piano music",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://gitlab.ldv.ei.tum.de/komcrea/musik/-/tree/master",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Windows 64 Bit",
],
python_requires='=3.7',
) | 31.681818 | 89 | 0.66858 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="lisenta",
version="0.0.1",
author="Lisa-Yao Gan",
author_email="ga27bil@mytum.de",
description="A fun little program that lets you transcribe and generate piano music",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://gitlab.ldv.ei.tum.de/komcrea/musik/-/tree/master",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Windows 64 Bit",
],
python_requires='=3.7',
) | 0 | 0 | 0 |
b621ee6e8bf9fb9a75b8974ed4f4009256805676 | 1,889 | py | Python | pypy/interpreter/generator.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/interpreter/generator.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/interpreter/generator.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import Wrappable
class GeneratorIterator(Wrappable):
"An iterator created by a generator."
def descr__iter__(self):
"""x.__iter__() <==> iter(x)"""
return self.space.wrap(self)
def descr_next(self):
"""x.next() -> the next value, or raise StopIteration"""
space = self.space
if self.running:
raise OperationError(space.w_ValueError,
space.wrap('generator already executing'))
if self.frame.frame_finished_execution:
raise OperationError(space.w_StopIteration, space.w_None)
self.running = True
try:
try:
w_result = self.frame.execute_generator_frame(space.w_None)
except OperationError:
# errors finish a frame
self.frame.frame_finished_execution = True
raise
# if the frame is now marked as finished, it was RETURNed from
if self.frame.frame_finished_execution:
raise OperationError(space.w_StopIteration, space.w_None)
else:
return w_result # YIELDed
finally:
self.frame.f_back = None
self.running = False
| 34.345455 | 75 | 0.590789 | from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import Wrappable
class GeneratorIterator(Wrappable):
"An iterator created by a generator."
def __init__(self, frame):
self.space = frame.space
self.frame = frame
self.running = False
def descr__reduce__(self, space):
from pypy.interpreter.mixedmodule import MixedModule
w_mod = space.getbuiltinmodule('_pickle_support')
mod = space.interp_w(MixedModule, w_mod)
new_inst = mod.get('generator_new')
w = space.wrap
tup = [
w(self.frame),
w(self.running),
]
return space.newtuple([new_inst, space.newtuple(tup)])
def descr__iter__(self):
"""x.__iter__() <==> iter(x)"""
return self.space.wrap(self)
def descr_next(self):
"""x.next() -> the next value, or raise StopIteration"""
space = self.space
if self.running:
raise OperationError(space.w_ValueError,
space.wrap('generator already executing'))
if self.frame.frame_finished_execution:
raise OperationError(space.w_StopIteration, space.w_None)
self.running = True
try:
try:
w_result = self.frame.execute_generator_frame(space.w_None)
except OperationError:
# errors finish a frame
self.frame.frame_finished_execution = True
raise
# if the frame is now marked as finished, it was RETURNed from
if self.frame.frame_finished_execution:
raise OperationError(space.w_StopIteration, space.w_None)
else:
return w_result # YIELDed
finally:
self.frame.f_back = None
self.running = False
| 507 | 0 | 58 |
80b1941ce1329fbf78c9da892dd397aabb31c149 | 8,121 | py | Python | debias/make_data.py | fengtony686/heckman_debias_model | 298b32e8c1bfe5fb22216861c6901309465c7b35 | [
"MIT"
] | null | null | null | debias/make_data.py | fengtony686/heckman_debias_model | 298b32e8c1bfe5fb22216861c6901309465c7b35 | [
"MIT"
] | null | null | null | debias/make_data.py | fengtony686/heckman_debias_model | 298b32e8c1bfe5fb22216861c6901309465c7b35 | [
"MIT"
] | null | null | null | import io
import os
import re
import keras
import random
import numpy as np
import pandas as pd
from keras_preprocessing.sequence import pad_sequences
from keras_preprocessing.text import Tokenizer
from scipy.sparse import coo_matrix
from sklearn import preprocessing
from sklearn.preprocessing import scale
########################################
# parameters
########################################
random.seed(1234)
np.random.seed(1234)
split_ratio = -1
max_nb_words = 50000
max_seq_len = 35
emb_dim = 300
dir_base = './data/'
file_emb = dir_base + 'wordvec.txt'
file_train = dir_base + 'train.tsv'
file_val = dir_base + 'dev.tsv'
file_test = dir_base + 'test.tsv'
file_sick = dir_base + 'sick.txt'
file_msr = dir_base + 'msr.txt'
file_sample_weight = dir_base + 'density.npy'
dir_processed = "./processed_data/"
if not os.path.isdir(dir_processed):
os.mkdir(dir_processed)
stamp_data = str(split_ratio)
file_data = dir_processed + 'data_%s.npz' % str(split_ratio)
file_split = dir_processed + 'split_%s.npz' % str(split_ratio)
file_leaky = dir_processed + 'leakage_features_%s.npz' % str(split_ratio)
########################################
# read-data
########################################
tr = pd.read_csv(file_train, delimiter='\t', header=None)
tr.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
tr = tr[['is_duplicate', 'question1', 'question2']]
val = pd.read_csv(file_val, delimiter='\t', header=None)
val.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
val = val[['is_duplicate', 'question1', 'question2']]
tst = pd.read_csv(file_test, delimiter='\t', header=None)
tst.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
tst = tst[['is_duplicate', 'question1', 'question2']]
sick = pd.read_csv(file_sick, delimiter='\t', usecols=['sentence_A', 'sentence_B', 'relatedness_score'])
sick.columns = ['question1', 'question2', 'is_duplicate']
sick['is_duplicate'] = sick['is_duplicate'].apply(lambda x: 1 if x > 3.6 else 0)
msr = pd.read_csv(file_msr, delimiter='\t', usecols=['#1 String', '#2 String', 'Quality'])
msr.columns = ['is_duplicate', 'question1', 'question2']
data = pd.concat([tr, val, tst, sick, msr], sort=False).fillna('')
########################################
# pre-processing
########################################
print('Pre-processing')
data['question1'] = data['question1'].apply(text_cleaning)
data['question2'] = data['question2'].apply(text_cleaning)
tokenizer = Tokenizer(num_words=max_nb_words, oov_token='oov_token_placeholder')
tokenizer.fit_on_texts(list(data['question1'].values) + list(data['question2'].values))
sequences_1 = tokenizer.texts_to_sequences(data['question1'].values)
sequences_2 = tokenizer.texts_to_sequences(data['question2'].values)
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
x1 = pad_sequences(sequences_1, maxlen=max_seq_len)
x2 = pad_sequences(sequences_2, maxlen=max_seq_len)
y = data['is_duplicate'].values
########################################
# retrieval embeddings
########################################
print('Indexing word vectors')
word2vec = {}
fin = io.open(file_emb, 'r', encoding='utf-8', newline='\n', errors='ignore')
for line in fin:
tokens = line.rstrip().split(' ')
word2vec[tokens[0]] = np.asarray(tokens[1:], dtype='float32')
print('Found %s word vectors of word2vec' % len(word2vec.keys()))
print('Preparing embedding matrix')
nb_words = min(max_nb_words, len(word_index))
emb = np.zeros((nb_words + 1, emb_dim))
miss_cnt = 0
for word, i in word_index.items():
if i >= nb_words:
break
if word in word2vec.keys():
emb[i] = word2vec[word]
else:
emb[i] = (np.random.rand(emb_dim) - 0.5) * 0.1
miss_cnt += 1
print('Null word embeddings: %d' % miss_cnt)
########################################
# sample train/val/test data
########################################
questions = list(data['question1'].values) + list(data['question2'].values)
le = preprocessing.LabelEncoder()
le.fit(questions)
q1_id = le.transform(data['question1'].values)
q2_id = le.transform(data['question2'].values)
pair_number = q1_id.shape[0]
sen_number = np.max((q1_id.max(), q2_id.max())) + 1
num_data = len(tr) + len(val) + len(tst)
sick_idx = np.arange(num_data, num_data + len(sick))
msr_idx = np.arange(num_data + len(sick), num_data + len(sick) + len(msr))
if split_ratio == -1:
train_idx = np.arange(len(tr))
val_idx = np.arange(len(tr), len(tr) + len(val))
test_idx = np.arange(len(tr) + len(val), len(tr) + len(val) + len(tst))
else:
perm = np.random.permutation(num_data)
val_split = (1 - split_ratio) / 2
train_idx = perm[:int(num_data * split_ratio)]
val_idx = perm[int(num_data * split_ratio): int(num_data * (split_ratio + val_split))]
test_idx = perm[int(num_data * (split_ratio + val_split)):]
train_sent_set = set(q1_id[train_idx]) | set(q2_id[train_idx])
val_overlap_idx = [i for i, idx in enumerate(val_idx) if
(q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
test_overlap_idx = [i for i, idx in enumerate(test_idx) if
(q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
val_no_overlap_idx = [i for i, idx in enumerate(val_idx) if
not (q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
test_no_overlap_idx = [i for i, idx in enumerate(test_idx) if
not (q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
print("Valid Overlap Distribution: %.5lf%%"
% (y[val_idx][val_overlap_idx].sum() / len(val_overlap_idx) * 100.0))
print("Test Overlap Distribution: %.5lf%%" %
(y[test_idx][test_overlap_idx].sum() / len(test_overlap_idx) * 100.0))
print("Valid No Overlap Distribution: %.5lf%%" %
(y[val_idx][val_no_overlap_idx].sum() / len(val_no_overlap_idx) * 100.0))
print("Test No Overlap Distribution: %.5lf%%" %
(y[test_idx][test_no_overlap_idx].sum() / len(test_no_overlap_idx) * 100.0))
sent_test_same = list(
set(list(data['question1'].values[train_idx]) + list(data['question2'].values[train_idx])))
sequences_test_same = tokenizer.texts_to_sequences(sent_test_same)
x_test_same = pad_sequences(sequences_test_same, maxlen=max_seq_len)
y_test_same = np.ones(len(x_test_same))
test_same_idx = range(len(x1), len(x1) + len(x_test_same))
x1 = np.concatenate([x1, x_test_same])
x2 = np.concatenate([x2, x_test_same])
y = np.concatenate([y, y_test_same])
########################################
# process leaky feature
########################################
adj = coo_matrix((np.ones(len(q1_id) * 2), (np.concatenate(
[q1_id, q2_id]), np.concatenate([q2_id, q1_id]))), (sen_number, sen_number))
leaky_features = np.zeros([len(q1_id), 8])
degree = np.array(adj.sum(axis=1))
leaky_features[:, 0] = degree[q1_id][:, 0]
leaky_features[:, 1] = degree[q2_id][:, 0]
tmp = adj * adj
degree1 = np.array(tmp.sum(axis=1))
leaky_features[:, 2] = np.array([tmp[q1_id[i], q2_id[i]] for i in range(len(q1_id))])
leaky_features[:, 3] = degree1[q1_id][:, 0]
leaky_features[:, 4] = degree1[q2_id][:, 0]
tmp = adj * adj * adj
degree2 = np.array(tmp.sum(axis=1))
leaky_features[:, 5] = np.array([tmp[q1_id[i], q2_id[i]] for i in range(len(q1_id))])
leaky_features[:, 6] = degree1[q1_id][:, 0]
leaky_features[:, 7] = degree1[q2_id][:, 0]
leaky_features = leaky_features[:, :3]
leaky_features = scale(leaky_features)
########################################
# save data to disk
########################################
np.savez(file_data, x1=x1, x2=x2, y=y, emb=emb, word_index=word_index)
np.savez(file_split, train_idx=train_idx, val_idx=val_idx, test_idx=test_idx,
val_overlap_idx=val_overlap_idx, val_no_overlap_idx=val_no_overlap_idx,
test_overlap_idx=test_overlap_idx, test_no_overlap_idx=test_no_overlap_idx,
sick_idx=sick_idx, msr_idx=msr_idx, test_same_idx=test_same_idx)
np.savez(file_leaky, leaky_features=leaky_features)
| 38.306604 | 104 | 0.6562 | import io
import os
import re
import keras
import random
import numpy as np
import pandas as pd
from keras_preprocessing.sequence import pad_sequences
from keras_preprocessing.text import Tokenizer
from scipy.sparse import coo_matrix
from sklearn import preprocessing
from sklearn.preprocessing import scale
########################################
# parameters
########################################
random.seed(1234)
np.random.seed(1234)
split_ratio = -1
max_nb_words = 50000
max_seq_len = 35
emb_dim = 300
dir_base = './data/'
file_emb = dir_base + 'wordvec.txt'
file_train = dir_base + 'train.tsv'
file_val = dir_base + 'dev.tsv'
file_test = dir_base + 'test.tsv'
file_sick = dir_base + 'sick.txt'
file_msr = dir_base + 'msr.txt'
file_sample_weight = dir_base + 'density.npy'
dir_processed = "./processed_data/"
if not os.path.isdir(dir_processed):
os.mkdir(dir_processed)
stamp_data = str(split_ratio)
file_data = dir_processed + 'data_%s.npz' % str(split_ratio)
file_split = dir_processed + 'split_%s.npz' % str(split_ratio)
file_leaky = dir_processed + 'leakage_features_%s.npz' % str(split_ratio)
########################################
# read-data
########################################
tr = pd.read_csv(file_train, delimiter='\t', header=None)
tr.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
tr = tr[['is_duplicate', 'question1', 'question2']]
val = pd.read_csv(file_val, delimiter='\t', header=None)
val.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
val = val[['is_duplicate', 'question1', 'question2']]
tst = pd.read_csv(file_test, delimiter='\t', header=None)
tst.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
tst = tst[['is_duplicate', 'question1', 'question2']]
sick = pd.read_csv(file_sick, delimiter='\t', usecols=['sentence_A', 'sentence_B', 'relatedness_score'])
sick.columns = ['question1', 'question2', 'is_duplicate']
sick['is_duplicate'] = sick['is_duplicate'].apply(lambda x: 1 if x > 3.6 else 0)
msr = pd.read_csv(file_msr, delimiter='\t', usecols=['#1 String', '#2 String', 'Quality'])
msr.columns = ['is_duplicate', 'question1', 'question2']
data = pd.concat([tr, val, tst, sick, msr], sort=False).fillna('')
########################################
# pre-processing
########################################
print('Pre-processing')
def text_cleaning(text):
text = re.sub('[^A-Za-z0-9]', ' ', text.lower())
text = ' '.join(text.split())
return text
data['question1'] = data['question1'].apply(text_cleaning)
data['question2'] = data['question2'].apply(text_cleaning)
tokenizer = Tokenizer(num_words=max_nb_words, oov_token='oov_token_placeholder')
tokenizer.fit_on_texts(list(data['question1'].values) + list(data['question2'].values))
sequences_1 = tokenizer.texts_to_sequences(data['question1'].values)
sequences_2 = tokenizer.texts_to_sequences(data['question2'].values)
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
x1 = pad_sequences(sequences_1, maxlen=max_seq_len)
x2 = pad_sequences(sequences_2, maxlen=max_seq_len)
y = data['is_duplicate'].values
########################################
# retrieval embeddings
########################################
print('Indexing word vectors')
word2vec = {}
fin = io.open(file_emb, 'r', encoding='utf-8', newline='\n', errors='ignore')
for line in fin:
tokens = line.rstrip().split(' ')
word2vec[tokens[0]] = np.asarray(tokens[1:], dtype='float32')
print('Found %s word vectors of word2vec' % len(word2vec.keys()))
print('Preparing embedding matrix')
nb_words = min(max_nb_words, len(word_index))
emb = np.zeros((nb_words + 1, emb_dim))
miss_cnt = 0
for word, i in word_index.items():
if i >= nb_words:
break
if word in word2vec.keys():
emb[i] = word2vec[word]
else:
emb[i] = (np.random.rand(emb_dim) - 0.5) * 0.1
miss_cnt += 1
print('Null word embeddings: %d' % miss_cnt)
########################################
# sample train/val/test data
########################################
questions = list(data['question1'].values) + list(data['question2'].values)
le = preprocessing.LabelEncoder()
le.fit(questions)
q1_id = le.transform(data['question1'].values)
q2_id = le.transform(data['question2'].values)
pair_number = q1_id.shape[0]
sen_number = np.max((q1_id.max(), q2_id.max())) + 1
num_data = len(tr) + len(val) + len(tst)
sick_idx = np.arange(num_data, num_data + len(sick))
msr_idx = np.arange(num_data + len(sick), num_data + len(sick) + len(msr))
if split_ratio == -1:
train_idx = np.arange(len(tr))
val_idx = np.arange(len(tr), len(tr) + len(val))
test_idx = np.arange(len(tr) + len(val), len(tr) + len(val) + len(tst))
else:
perm = np.random.permutation(num_data)
val_split = (1 - split_ratio) / 2
train_idx = perm[:int(num_data * split_ratio)]
val_idx = perm[int(num_data * split_ratio): int(num_data * (split_ratio + val_split))]
test_idx = perm[int(num_data * (split_ratio + val_split)):]
train_sent_set = set(q1_id[train_idx]) | set(q2_id[train_idx])
val_overlap_idx = [i for i, idx in enumerate(val_idx) if
(q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
test_overlap_idx = [i for i, idx in enumerate(test_idx) if
(q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
val_no_overlap_idx = [i for i, idx in enumerate(val_idx) if
not (q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
test_no_overlap_idx = [i for i, idx in enumerate(test_idx) if
not (q1_id[idx] in train_sent_set or q2_id[idx] in train_sent_set)]
print("Valid Overlap Distribution: %.5lf%%"
% (y[val_idx][val_overlap_idx].sum() / len(val_overlap_idx) * 100.0))
print("Test Overlap Distribution: %.5lf%%" %
(y[test_idx][test_overlap_idx].sum() / len(test_overlap_idx) * 100.0))
print("Valid No Overlap Distribution: %.5lf%%" %
(y[val_idx][val_no_overlap_idx].sum() / len(val_no_overlap_idx) * 100.0))
print("Test No Overlap Distribution: %.5lf%%" %
(y[test_idx][test_no_overlap_idx].sum() / len(test_no_overlap_idx) * 100.0))
sent_test_same = list(
set(list(data['question1'].values[train_idx]) + list(data['question2'].values[train_idx])))
sequences_test_same = tokenizer.texts_to_sequences(sent_test_same)
x_test_same = pad_sequences(sequences_test_same, maxlen=max_seq_len)
y_test_same = np.ones(len(x_test_same))
test_same_idx = range(len(x1), len(x1) + len(x_test_same))
x1 = np.concatenate([x1, x_test_same])
x2 = np.concatenate([x2, x_test_same])
y = np.concatenate([y, y_test_same])
########################################
# process leaky feature
########################################
adj = coo_matrix((np.ones(len(q1_id) * 2), (np.concatenate(
[q1_id, q2_id]), np.concatenate([q2_id, q1_id]))), (sen_number, sen_number))
leaky_features = np.zeros([len(q1_id), 8])
degree = np.array(adj.sum(axis=1))
leaky_features[:, 0] = degree[q1_id][:, 0]
leaky_features[:, 1] = degree[q2_id][:, 0]
tmp = adj * adj
degree1 = np.array(tmp.sum(axis=1))
leaky_features[:, 2] = np.array([tmp[q1_id[i], q2_id[i]] for i in range(len(q1_id))])
leaky_features[:, 3] = degree1[q1_id][:, 0]
leaky_features[:, 4] = degree1[q2_id][:, 0]
tmp = adj * adj * adj
degree2 = np.array(tmp.sum(axis=1))
leaky_features[:, 5] = np.array([tmp[q1_id[i], q2_id[i]] for i in range(len(q1_id))])
leaky_features[:, 6] = degree1[q1_id][:, 0]
leaky_features[:, 7] = degree1[q2_id][:, 0]
leaky_features = leaky_features[:, :3]
leaky_features = scale(leaky_features)
########################################
# save data to disk
########################################
np.savez(file_data, x1=x1, x2=x2, y=y, emb=emb, word_index=word_index)
np.savez(file_split, train_idx=train_idx, val_idx=val_idx, test_idx=test_idx,
val_overlap_idx=val_overlap_idx, val_no_overlap_idx=val_no_overlap_idx,
test_overlap_idx=test_overlap_idx, test_no_overlap_idx=test_no_overlap_idx,
sick_idx=sick_idx, msr_idx=msr_idx, test_same_idx=test_same_idx)
np.savez(file_leaky, leaky_features=leaky_features)
| 106 | 0 | 23 |
b560bec3a92f50570551226c592eaf431c50d9ec | 129 | py | Python | scitbx/linalg/ext.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | scitbx/linalg/ext.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | scitbx/linalg/ext.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
import boost.python
boost.python.import_ext("scitbx_linalg_ext")
from scitbx_linalg_ext import *
| 25.8 | 44 | 0.852713 | from __future__ import division
import boost.python
boost.python.import_ext("scitbx_linalg_ext")
from scitbx_linalg_ext import *
| 0 | 0 | 0 |
22fde268235b9c592bd1cda429f564e9783b91e2 | 5,809 | py | Python | algos/ddpg/ddpg.py | DensoITLab/spinningup_in_pytorch | 612d8c4c6593c8c5ecb5a939bf43085daac9e552 | [
"MIT"
] | 11 | 2018-12-19T14:44:35.000Z | 2020-10-02T05:39:41.000Z | algos/ddpg/ddpg.py | DensoITLab/spinningup_in_pytorch | 612d8c4c6593c8c5ecb5a939bf43085daac9e552 | [
"MIT"
] | null | null | null | algos/ddpg/ddpg.py | DensoITLab/spinningup_in_pytorch | 612d8c4c6593c8c5ecb5a939bf43085daac9e552 | [
"MIT"
] | 1 | 2018-12-30T08:37:51.000Z | 2018-12-30T08:37:51.000Z | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import gym, time
import numpy as np
from spinup.utils.logx import EpochLogger
from core import actor_critic as ac
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--hid', type=int, default=300)
parser.add_argument('--l', type=int, default=1)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--exp_name', type=str, default='ddpg')
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
ddpg(args.env, actor_critic_function=ac,
hidden_size=[args.hid]*args.l, gamma=args.gamma, epochs=args.epochs,
logger_kwargs=logger_kwargs)
| 37 | 110 | 0.620072 | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import gym, time
import numpy as np
from spinup.utils.logx import EpochLogger
from core import actor_critic as ac
class ReplayBuffer:
def __init__(self, size):
self.size, self.max_size = 0, size
self.obs1_buf = []
self.obs2_buf = []
self.acts_buf = []
self.rews_buf = []
self.done_buf = []
def store(self, obs, act, rew, next_obs, done):
self.obs1_buf.append(obs)
self.obs2_buf.append(next_obs)
self.acts_buf.append(act)
self.rews_buf.append(rew)
self.done_buf.append(int(done))
while len(self.obs1_buf) > self.max_size:
self.obs1_buf.pop(0)
self.obs2_buf.pop(0)
self.acts_buf.pop(0)
self.rews_buf.pop(0)
self.done_buf.pop(0)
self.size = len(self.obs1_buf)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(low=0, high=self.size, size=(batch_size,))
obs1 = torch.FloatTensor([self.obs1_buf[i] for i in idxs])
obs2 = torch.FloatTensor([self.obs2_buf[i] for i in idxs])
acts = torch.FloatTensor([self.acts_buf[i] for i in idxs])
rews = torch.FloatTensor([self.rews_buf[i] for i in idxs])
done = torch.FloatTensor([self.done_buf[i] for i in idxs])
return [obs1, obs2, acts, rews, done]
def ddpg(env_name, actor_critic_function, hidden_size,
steps_per_epoch=5000, epochs=100, replay_size=int(1e6), gamma=0.99,
polyak=0.995, pi_lr=1e-3, q_lr=1e-3, batch_size=100, start_steps=10000,
act_noise=0.1, max_ep_len=1000, logger_kwargs=dict()):
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
replay_buffer = ReplayBuffer(replay_size)
env, test_env = gym.make(env_name), gym.make(env_name)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
act_limit = int(env.action_space.high[0])
actor_critic = actor_critic_function(act_dim, obs_dim, hidden_size, act_limit)
q_optimizer = optim.Adam(actor_critic.q.parameters(), q_lr)
policy_optimizer = optim.Adam(actor_critic.policy.parameters(), pi_lr)
start_time = time.time()
obs, ret, done, ep_ret, ep_len = env.reset(), 0, False, 0, 0
total_steps = steps_per_epoch * epochs
for t in range(total_steps):
if t > 50000:
env.render()
if t > start_steps:
obs_tens = torch.from_numpy(obs).float().reshape(1,-1)
act = actor_critic.get_action(obs_tens, act_noise).detach().numpy().reshape(-1)
else:
act = env.action_space.sample()
obs2, ret, done, _ = env.step(act)
ep_ret += ret
ep_len += 1
done = False if ep_len==max_ep_len else done
replay_buffer.store(obs, act, ret, obs2, done)
obs = obs2
if done or (ep_len == max_ep_len):
for _ in range(ep_len):
obs1_tens, obs2_tens, acts_tens, rews_tens, done_tens = replay_buffer.sample_batch(batch_size)
# compute Q(s, a)
q = actor_critic.q_function(obs1_tens, action=acts_tens)
# compute r + gamma * (1 - d) * Q(s', mu_targ(s'))
q_targ = actor_critic.compute_target(obs2_tens, gamma, rews_tens, done_tens)
# compute (Q(s, a) - y(r, s', d))^2
q_loss = (q.squeeze()-q_targ).pow(2).mean()
q_optimizer.zero_grad()
q_loss.backward()
q_optimizer.step()
logger.store(LossQ=q_loss.item(), QVals=q.detach().numpy())
# compute Q(s, mu(s))
policy_loss = -actor_critic.q_function(obs1_tens, detach=False).mean()
policy_optimizer.zero_grad()
policy_loss.backward()
policy_optimizer.step()
logger.store(LossPi=policy_loss.item())
# compute rho * targ_p + (1 - rho) * main_p
actor_critic.update_target(polyak)
logger.store(EpRet=ep_ret, EpLen=ep_len)
obs, ret, done, ep_ret, ep_len = env.reset(), 0, False, 0, 0
if t > 0 and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
# test_agent()
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
# logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
# logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('QVals', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--hid', type=int, default=300)
parser.add_argument('--l', type=int, default=1)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--exp_name', type=str, default='ddpg')
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
ddpg(args.env, actor_critic_function=ac,
hidden_size=[args.hid]*args.l, gamma=args.gamma, epochs=args.epochs,
logger_kwargs=logger_kwargs)
| 4,651 | -2 | 126 |
8c6f1c937eb970444aa15d0a570cc030109ae981 | 713 | py | Python | test/test_k3mime.py | WRY-learning/k3mime | ce75632ce0454b563aad505f2474b8f2c42b74ec | [
"MIT"
] | 1 | 2021-09-09T06:43:59.000Z | 2021-09-09T06:43:59.000Z | test/test_k3mime.py | WRY-learning/k3mime | ce75632ce0454b563aad505f2474b8f2c42b74ec | [
"MIT"
] | 3 | 2021-09-01T14:26:15.000Z | 2022-03-23T07:00:07.000Z | test/test_k3mime.py | WRY-learning/k3mime | ce75632ce0454b563aad505f2474b8f2c42b74ec | [
"MIT"
] | 3 | 2021-08-24T03:17:53.000Z | 2021-08-31T12:17:18.000Z | import unittest
import k3mime
import k3ut
dd = k3ut.dd
| 24.586207 | 73 | 0.565217 | import unittest
import k3mime
import k3ut
dd = k3ut.dd
class TestMime(unittest.TestCase):
def test_get_by_filename(self):
cases = (
('', 'application/octet-stream'),
('123', 'application/octet-stream'),
('file.123', 'application/vnd.lotus-1-2-3'),
('file.123.not_exist_suffix_aa', 'application/octet-stream'),
('file.json', 'application/json'),
('file.not_exist_suffix_aa', 'application/octet-stream'),
)
for inp, expected in cases:
dd('inp, expected:', inp, ' ', expected)
rst = k3mime.get_by_filename(inp)
dd('rst:', rst)
self.assertEqual(expected, rst)
| 591 | 13 | 50 |
99a6cff4201c5443d058ff9feb61047d8cb14fcf | 45 | py | Python | PythonSolutions/PythonEvaluation.py | MohamedMetwalli5/HackerRank_solutions | be13ba740214a966d41e6be1d643297c75499e24 | [
"Apache-2.0"
] | 37 | 2021-04-04T20:08:00.000Z | 2022-01-05T19:42:42.000Z | PythonSolutions/PythonEvaluation.py | ahmedsaleh1998/HackerRank_solutions | be13ba740214a966d41e6be1d643297c75499e24 | [
"Apache-2.0"
] | null | null | null | PythonSolutions/PythonEvaluation.py | ahmedsaleh1998/HackerRank_solutions | be13ba740214a966d41e6be1d643297c75499e24 | [
"Apache-2.0"
] | 15 | 2021-04-04T20:12:47.000Z | 2022-01-08T23:34:16.000Z | s = input().split("print")[1]
print(eval(s))
| 15 | 29 | 0.6 | s = input().split("print")[1]
print(eval(s))
| 0 | 0 | 0 |
8bec13719d03783afc53a55d900ffcaa42bcfe7e | 3,169 | py | Python | kuku/tests/test_values.py | xarg/kuku | bbd906347929b768753d88ec26cf579ec17f2e0a | [
"Apache-2.0"
] | 42 | 2018-09-27T23:11:27.000Z | 2022-02-27T19:23:16.000Z | kuku/tests/test_values.py | xarg/kuku | bbd906347929b768753d88ec26cf579ec17f2e0a | [
"Apache-2.0"
] | 3 | 2018-10-12T00:56:27.000Z | 2020-01-08T00:40:58.000Z | kuku/tests/test_values.py | xarg/kuku | bbd906347929b768753d88ec26cf579ec17f2e0a | [
"Apache-2.0"
] | 8 | 2018-09-27T23:44:22.000Z | 2021-04-20T04:06:57.000Z | import os
import pytest
import yaml
from kuku.types import Context
from kuku.values import resolve
HERE = os.path.abspath(os.path.dirname(__file__))
VALUES_STAGING_FILE = os.path.join(HERE, "fixtures/values/values-staging.yaml")
VALUES_PRODUCTION_FILE = os.path.join(HERE, "fixtures/values/values-production.yaml")
@pytest.mark.parametrize(
"values, expected",
[
(["k=v"], {"k": "v"}),
(["k1=v1,k2=v2"], {"k1": "v1", "k2": "v2"}),
(["k1=v1,k2=v2", "k3=v3"], {"k1": "v1", "k2": "v2", "k3": "v3"}),
(["k="], {"k": ""}),
],
)
@pytest.mark.parametrize(
"values, expected",
[
(["a.b=v1", "a.c=v2"], {"a": {"b": "v1", "c": "v2"}}),
(["a.b.c=v1", "a.b.d=v2"], {"a": {"b": {"c": "v1", "d": "v2"}}}),
],
)
@pytest.mark.parametrize(
"values, expected",
[
(["a.0.b=v1", "a.0.c=v2"], {"a": [{"c": "v2"}]}),
(["a.0=v1", "a.0=v2"], {"a": ["v2"]}),
(["a.0.b=v1", "a.1.c=v2"], {"a": [{"b": "v1"}, {"c": "v2"}]}),
],
)
@pytest.mark.parametrize(
"values", [["k"], ["=v"], ["="], ["k=1,=2"], ["a.b.c=1,=2"], ["a.0.c=1,=2"]]
)
| 29.342593 | 85 | 0.580309 | import os
import pytest
import yaml
from kuku.types import Context
from kuku.values import resolve
HERE = os.path.abspath(os.path.dirname(__file__))
VALUES_STAGING_FILE = os.path.join(HERE, "fixtures/values/values-staging.yaml")
VALUES_PRODUCTION_FILE = os.path.join(HERE, "fixtures/values/values-production.yaml")
@pytest.mark.parametrize(
"values, expected",
[
(["k=v"], {"k": "v"}),
(["k1=v1,k2=v2"], {"k1": "v1", "k2": "v2"}),
(["k1=v1,k2=v2", "k3=v3"], {"k1": "v1", "k2": "v2", "k3": "v3"}),
(["k="], {"k": ""}),
],
)
def test_valid_values(values, expected):
assert resolve(values, []) == expected
@pytest.mark.parametrize(
"values, expected",
[
(["a.b=v1", "a.c=v2"], {"a": {"b": "v1", "c": "v2"}}),
(["a.b.c=v1", "a.b.d=v2"], {"a": {"b": {"c": "v1", "d": "v2"}}}),
],
)
def test_nested_dicts(values, expected):
assert resolve(values, []) == expected
@pytest.mark.parametrize(
"values, expected",
[
(["a.0.b=v1", "a.0.c=v2"], {"a": [{"c": "v2"}]}),
(["a.0=v1", "a.0=v2"], {"a": ["v2"]}),
(["a.0.b=v1", "a.1.c=v2"], {"a": [{"b": "v1"}, {"c": "v2"}]}),
],
)
def test_nested_lists(values, expected):
assert resolve(values, []) == expected
def test_nested_lists_invalid_index():
with pytest.raises(ValueError, match=".* list 'a' has not been given a value."):
resolve(["a.1=v1"], [])
def test_nested_lists_with_value_file(tmp_path):
p = tmp_path / "test.yaml"
p.write_text(yaml.dump({"a": ["b", "c"]}))
# We replace
resolved = resolve(["a.0=new"], [str(p)])
assert resolved == {"a": ["new", "c"]}
# We append
resolved = resolve(["a.2=d"], [str(p)])
assert resolved == {"a": ["b", "c", "d"]}
# The index '3' is out of bound (neither a replacement nor an append)
with pytest.raises(ValueError):
resolve(["a.3=new"], [str(p)])
@pytest.mark.parametrize(
"values", [["k"], ["=v"], ["="], ["k=1,=2"], ["a.b.c=1,=2"], ["a.0.c=1,=2"]]
)
def test_resolve_invalid_values(values):
with pytest.raises(ValueError):
resolve(values, [])
def test_cli_values_value_files():
values = resolve([], [VALUES_STAGING_FILE])
with open(VALUES_STAGING_FILE) as fd:
assert values == yaml.load(fd)
# putting the same file twice should not change the resulting values
assert resolve([], [VALUES_STAGING_FILE, VALUES_STAGING_FILE]) == values
# cli values override values from files
assert resolve(["env=production"], [VALUES_STAGING_FILE])["env"] == "production"
# multiple values override (last wins)
assert resolve(["env=1", "env=2"], [VALUES_STAGING_FILE])["env"] == "2"
# nested override
assert (
resolve(["nodeSelector.node-label=pool-2"], [VALUES_STAGING_FILE])[
"nodeSelector"
]["node-label"]
== "pool-2"
)
def test_value_files_override():
values = resolve([], [VALUES_STAGING_FILE])
assert values["nodeSelector"]["node-label"] == "pool-1"
values = resolve([], [VALUES_STAGING_FILE, VALUES_PRODUCTION_FILE])
assert values["nodeSelector"]["node-label"] == "pool-2"
| 1,855 | 0 | 180 |
28e5a8f5ef3bb78d202349eba060265dbbccfad2 | 954 | py | Python | collection/models.py | Zadigo/mycommerce | 145031ebb359389e680a820577a4b6b2d382646d | [
"MIT"
] | null | null | null | collection/models.py | Zadigo/mycommerce | 145031ebb359389e680a820577a4b6b2d382646d | [
"MIT"
] | null | null | null | collection/models.py | Zadigo/mycommerce | 145031ebb359389e680a820577a4b6b2d382646d | [
"MIT"
] | null | null | null | from django.db import models
from shop.models import Product
from collection.choices import CollectionCategoryChoices
| 23.268293 | 59 | 0.643606 | from django.db import models
from shop.models import Product
from collection.choices import CollectionCategoryChoices
class Collection(models.Model):
name = models.CharField(
max_length=100,
unique=True
)
category = models.CharField(
max_length=100,
choices=CollectionCategoryChoices.choices,
default=CollectionCategoryChoices.SHORTS
)
description = models.TextField(
max_length=500,
blank=True,
null=True
)
products = models.ManyToManyField(
Product,
blank=True
)
illustration = models.ImageField(blank=True, null=True)
tags = models.CharField(
max_length=100,
blank=True,
null=True
)
slug = models.SlugField()
created_on = models.DateField(auto_now_add=True)
def __str__(self):
return self.name
@property
def get_view_name(self):
return self.name.lower()
| 58 | 753 | 23 |
e1b75e2806d583dd24cf0e084c6039d73059ce55 | 720 | py | Python | cronmap/__init__.py | zbuc/cronmap | 01960e1101138d719294dedceecab6b05db00390 | [
"MIT"
] | null | null | null | cronmap/__init__.py | zbuc/cronmap | 01960e1101138d719294dedceecab6b05db00390 | [
"MIT"
] | 15 | 2016-02-20T04:26:20.000Z | 2016-02-21T19:00:20.000Z | cronmap/__init__.py | zbuc/cronmap | 01960e1101138d719294dedceecab6b05db00390 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import sys
import time
import datetime
import subprocess
| 27.692308 | 76 | 0.576389 | from __future__ import absolute_import
import sys
import time
import datetime
import subprocess
class Cronmap(object):
def __init__(self, options):
# self.ips = args.ips
# if args.nmap_args:
# self.nmap_args = [a[0] for a in args.nmap_args]
# else:
# self.nmap_args = ['-sV', '-Pn', '-p0-65535']
# self.delay = args.delay
# self.outfile = args.outfile
# self.email = args.email
self.options = options
def _run_scan(self):
if self.debug:
print "Invoking nmap " + ' '.join(self.nmap_args) + ' ' +\
' '.join(self.ips)
return subprocess.check_output(["nmap"] + self.nmap_args + self.ips)
| 545 | 1 | 76 |
ebdcd4d34a28490c58f969c001cf7eb536f960b7 | 774 | py | Python | Minor Projects/live_stream.py | chandthash/nppy | 228116d4efa6d28a9cdab245c6c8045844e96211 | [
"MIT"
] | null | null | null | Minor Projects/live_stream.py | chandthash/nppy | 228116d4efa6d28a9cdab245c6c8045844e96211 | [
"MIT"
] | null | null | null | Minor Projects/live_stream.py | chandthash/nppy | 228116d4efa6d28a9cdab245c6c8045844e96211 | [
"MIT"
] | null | null | null | try:
import cv2
except (NameError, ImportError, ModuleNotFoundError):
print('cv2 package installed not found')
def live_stream():
'''Stream your live video'''
cam = cv2.VideoCapture(cv2.CAP_DSHOW)
while True:
ret, frame = cam.read() # Getting frame
frame = cv2.flip(frame, 180) # Rotating frame to 180 degree
cv2.namedWindow('LIVE STREAM', cv2.WINDOW_NORMAL)
cv2.resizeWindow('LIVE STREAM', (800, 600))
cv2.imshow('LIVE STREAM', frame)
if cv2.waitKey(1) == 27: # Press esc to quit everything
break
cam.release() # Destroying camera
cv2.destroyAllWindows() # Destroying all your active windows
if __name__ == '__main__':
live_stream()
| 25.8 | 70 | 0.614987 | try:
import cv2
except (NameError, ImportError, ModuleNotFoundError):
print('cv2 package installed not found')
def live_stream():
'''Stream your live video'''
cam = cv2.VideoCapture(cv2.CAP_DSHOW)
while True:
ret, frame = cam.read() # Getting frame
frame = cv2.flip(frame, 180) # Rotating frame to 180 degree
cv2.namedWindow('LIVE STREAM', cv2.WINDOW_NORMAL)
cv2.resizeWindow('LIVE STREAM', (800, 600))
cv2.imshow('LIVE STREAM', frame)
if cv2.waitKey(1) == 27: # Press esc to quit everything
break
cam.release() # Destroying camera
cv2.destroyAllWindows() # Destroying all your active windows
if __name__ == '__main__':
live_stream()
| 0 | 0 | 0 |
a03d3bb0f01e5d09c37a1f3391b77f3bc777d551 | 325 | py | Python | georiviere/river/migrations/0010_remove_stream_width.py | georiviere/Georiviere-admin | 4ac532f84a7a8fef3e01384fad63e8e288d397c0 | [
"BSD-2-Clause"
] | 7 | 2021-11-05T14:52:25.000Z | 2022-03-24T21:18:02.000Z | georiviere/river/migrations/0010_remove_stream_width.py | georiviere/Georiviere-admin | 4ac532f84a7a8fef3e01384fad63e8e288d397c0 | [
"BSD-2-Clause"
] | 57 | 2021-11-02T10:27:34.000Z | 2022-03-31T14:08:32.000Z | georiviere/river/migrations/0010_remove_stream_width.py | georiviere/Georiviere-admin | 4ac532f84a7a8fef3e01384fad63e8e288d397c0 | [
"BSD-2-Clause"
] | 1 | 2021-12-05T14:55:42.000Z | 2021-12-05T14:55:42.000Z | # Generated by Django 3.1.6 on 2021-09-01 10:43
from django.db import migrations
| 18.055556 | 47 | 0.587692 | # Generated by Django 3.1.6 on 2021-09-01 10:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('river', '0009_auto_20210421_1407'),
]
operations = [
migrations.RemoveField(
model_name='stream',
name='width',
),
]
| 0 | 219 | 23 |
29027ef15dd260cd7b042bc5bae9c9d0a37408ca | 6,508 | py | Python | lisa/server/web/weblisa/api/accounts.py | Seraf/LISA | 7233a0f024c7dac412cbf79d617be4e4f48b2b7d | [
"MIT"
] | 29 | 2015-01-01T20:28:34.000Z | 2018-09-28T00:54:23.000Z | lisa/server/web/weblisa/api/accounts.py | Seraf/LISA | 7233a0f024c7dac412cbf79d617be4e4f48b2b7d | [
"MIT"
] | 6 | 2015-04-06T06:41:21.000Z | 2017-02-19T19:01:20.000Z | lisa/server/web/weblisa/api/accounts.py | Seraf/LISA | 7233a0f024c7dac412cbf79d617be4e4f48b2b7d | [
"MIT"
] | 15 | 2015-03-16T02:39:08.000Z | 2021-11-25T16:25:42.000Z | from ...interface.models import LisaUser
from django.contrib.auth import login, logout
from mongoengine.queryset import DoesNotExist
from tastypie_mongoengine import resources as mongoresources
from tastypie.http import HttpUnauthorized, HttpForbidden, HttpAccepted
from tastypie import fields
from tastypie.utils import trailing_slash
from tastypie.authentication import MultiAuthentication
from django.conf.urls import *
from .mixins import PublicEndpointResourceMixin, CustomApiKeyAuthentication
from tastypie import authorization
from mongoengine import document
| 41.452229 | 156 | 0.593577 | from ...interface.models import LisaUser
from django.contrib.auth import login, logout
from mongoengine.queryset import DoesNotExist
from tastypie_mongoengine import resources as mongoresources
from tastypie.http import HttpUnauthorized, HttpForbidden, HttpAccepted
from tastypie import fields
from tastypie.utils import trailing_slash
from tastypie.authentication import MultiAuthentication
from django.conf.urls import *
from .mixins import PublicEndpointResourceMixin, CustomApiKeyAuthentication
from tastypie import authorization
from mongoengine import document
class ProfileResource(mongoresources.MongoEngineResource):
class Meta:
queryset = LisaUser.objects.all()
authentication = MultiAuthentication(CustomApiKeyAuthentication())
allowed_methods = ['get', ]
resource_name = 'profile'
class UserResource(PublicEndpointResourceMixin, mongoresources.MongoEngineResource):
features = fields.DictField(blank=True, null=True, readonly=True)
apikey = fields.CharField(blank=True, null=True, readonly=True)
user_permissions = fields.ListField(blank=True, null=True, readonly=True)
class Meta:
queryset = LisaUser.objects.all()
authentication = MultiAuthentication(CustomApiKeyAuthentication())
authorization = authorization.Authorization()
"""fields = ['username', 'first_name', 'last_name', 'apikey',
'api_key_created', 'email', 'date_joined',
'is_active', 'is_superuser', 'is_staff', 'id',
'features', 'user_permissions'
]"""
# FIXME :Problem with the put : why I receive an _id field on mongodb save with user and not plugin ?
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
login_allowed_methods = ['post', ]
extra_actions = [
{
'name': 'login',
'http_method': 'POST',
'resource_type': 'list',
'fields': {
'username': {
'type': 'string',
'required': True,
'description': 'Unique username required.',
'paramType': 'body'
},
'password': {
'type': 'string',
'required': True,
'description': 'password required',
'paramType': 'body'
}
}
},
{
'name': 'logout',
'http_method': 'POST',
'resource_type': 'list',
'fields': {}
},
{
'name': 'regenerateapikey',
'summary': 'Regenerate the API Key for a user',
'http_method': 'GET',
'fields': {}
}
]
def dehydrate_user_permissions(self, bundle):
user = bundle.obj
#user_app_permissions = user.user_permissions.all()
#user_object_permissions = UserObjectPermission.objects.filter(user=user).distinct()
#return list(user_app_permissions.values_list('codename', flat=True)) + list(user_object_permissions.values_list('permission__codename', flat=True))
def dehydrate_apikey(self, bundle):
user = bundle.obj
if hasattr(user, 'api_key') and user.api_key:
return user.api_key
return None
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/regenerateapikey%s" % (self._meta.resource_name,
trailing_slash()),
self.wrap_view('regenerate_api_key'), name='api_user_regenerate_api_key'),
url(r"^(?P<resource_name>%s)/login%s" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('dispatch_login'), name='api_user_login'),
url(r"^(?P<resource_name>%s)/logout%s" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('dispatch_logout'), name='api_user_logout'),
]
def regenerate_api_key(self, request, **kwargs):
self.method_check(request, allowed=['post', 'get'])
self.is_authenticated(request)
self.throttle_check(request)
queryset = LisaUser.objects(pk=kwargs['pk'])
for user in queryset:
user.apikey = user.set_api_key()
user.save()
status = {'username': user.username, 'apikey': user.apikey}
self.log_throttled_access(request)
return self.create_response(request, status, HttpAccepted)
def dispatch_login(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch_public('login', request, **kwargs)
def post_login(self, request, **kwargs):
data = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
username = data.get('username', '')
password = data.get('password', '')
try:
user = LisaUser.objects.get(username=username)
user.backend = 'mongoengine.django.auth.MongoEngineBackend'
if user.check_password(password):
login(request, user)
request.session.set_expiry(60 * 60 * 1) # 1 hour timeout
return self.get_detail(request, pk=user.id)
else:
return self.create_response(request, {'success': False, 'reason': 'disabled', }, HttpForbidden)
except DoesNotExist:
return self.create_response(request, {'success': False, 'reason': 'incorrect'}, HttpUnauthorized)
def dispatch_logout(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('logout', request, **kwargs)
def get_logout(self, request, **kwargs):
if request.user and request.user.is_authenticated():
logout(request)
return self.create_response(request, {'success': True})
else:
# Not logged in
return self.create_response(request, {'success': False}, HttpUnauthorized)
| 2,836 | 3,058 | 46 |
21ae507ab3e37b53d6b415a10fdf586676908328 | 1,016 | py | Python | scripts/Analysis/OMIT/g0_calculation.py | sourav-majumder/qtlab | 96b2a127b1df7b45622c90229bd5ef8a4083614e | [
"MIT"
] | null | null | null | scripts/Analysis/OMIT/g0_calculation.py | sourav-majumder/qtlab | 96b2a127b1df7b45622c90229bd5ef8a4083614e | [
"MIT"
] | null | null | null | scripts/Analysis/OMIT/g0_calculation.py | sourav-majumder/qtlab | 96b2a127b1df7b45622c90229bd5ef8a4083614e | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from scipy.constants import hbar, pi
from lmfit import Model, fit_report
#plt.style.use('presentation.mplstyle')
attenuation = 52 #dB
k = 2*pi*513e3
ke = 0.7*k
w0 = 2*pi*6.310792e9
#######################
w = w0 - (2*pi*5.2656*1e6)
pth = 'D:\\data\\2018-09-08\\12-06-01_omit_pump_pw_sw_mode1\\'
file = 'gamma_vs_pw.dat'
###################
power, cp, gamm = np.loadtxt(pth+file, unpack=True)
photons = photon(power-attenuation)
plt.figure(figsize = (5,3))
plt.plot(photons,cp, '-ro')
plt.xlabel('number of photons', fontsize=12)
plt.ylabel(r'Cooperativity', fontsize = 12)
plt.grid()
# plt.plot(photons, coop, '.')
#plt.xscale('log')
# plt.title(r'Cooperativity vs Number of Cavity Photons', fontsize=20)
plt.tight_layout()
plt.savefig(pth+'Cp_number_ph.png', transparent=True)
#plt.show() | 24.190476 | 71 | 0.634843 | import matplotlib.pyplot as plt
import numpy as np
from scipy.constants import hbar, pi
from lmfit import Model, fit_report
#plt.style.use('presentation.mplstyle')
def photon(power):
p = 1e-3*10**(power/10.)
return (p*ke)/(hbar*w*((k/2)**2+(w-w0)**2))
def line(x, m, c):
return m*x + c
attenuation = 52 #dB
k = 2*pi*513e3
ke = 0.7*k
w0 = 2*pi*6.310792e9
#######################
w = w0 - (2*pi*5.2656*1e6)
pth = 'D:\\data\\2018-09-08\\12-06-01_omit_pump_pw_sw_mode1\\'
file = 'gamma_vs_pw.dat'
###################
power, cp, gamm = np.loadtxt(pth+file, unpack=True)
photons = photon(power-attenuation)
plt.figure(figsize = (5,3))
plt.plot(photons,cp, '-ro')
plt.xlabel('number of photons', fontsize=12)
plt.ylabel(r'Cooperativity', fontsize = 12)
plt.grid()
# plt.plot(photons, coop, '.')
#plt.xscale('log')
# plt.title(r'Cooperativity vs Number of Cavity Photons', fontsize=20)
plt.tight_layout()
plt.savefig(pth+'Cp_number_ph.png', transparent=True)
#plt.show() | 84 | 0 | 50 |
5faf0ea368494a182a4b6eb92a03ae4ee6a7d15a | 2,797 | py | Python | src/gimme_secrets/azure/decrypt_encrypt.py | tomarv2/gimme-secrets | ba8255671afcc0e6b684386b52b64f3986c42ef1 | [
"Apache-2.0"
] | null | null | null | src/gimme_secrets/azure/decrypt_encrypt.py | tomarv2/gimme-secrets | ba8255671afcc0e6b684386b52b64f3986c42ef1 | [
"Apache-2.0"
] | null | null | null | src/gimme_secrets/azure/decrypt_encrypt.py | tomarv2/gimme-secrets | ba8255671afcc0e6b684386b52b64f3986c42ef1 | [
"Apache-2.0"
] | null | null | null | import logging
from azure.identity import DefaultAzureCredential
from azure.keyvault.keys import KeyClient
from azure.keyvault.keys.crypto import CryptographyClient, EncryptionAlgorithm
logger = logging.getLogger(__name__)
get_secret = DecryptEncrypt()
| 46.616667 | 118 | 0.688237 | import logging
from azure.identity import DefaultAzureCredential
from azure.keyvault.keys import KeyClient
from azure.keyvault.keys.crypto import CryptographyClient, EncryptionAlgorithm
logger = logging.getLogger(__name__)
class DecryptEncrypt:
def __init__(self):
pass
def encrypt_secret(self, secret, vault_name, keyid, subscription_id, region):
logger.info("secret to encrypt: {0}" .format(secret))
logger.info("keyid: {0}" .format(keyid))
logger.info("Azure subscription id: {0}" .format(subscription_id))
logger.info("Azure region: {0}" .format(region))
# https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/identity/azure-identity#defaultazurecredential
vault_url = "https://{0}.vault.azure.net/".format(vault_name)
logger.info("vault url: " .format(vault_url))
byte_literal_value = secret.encode() # convert string to byte literal
credential = DefaultAzureCredential()
key_client = KeyClient(vault_url=vault_url, credential=credential)
key = key_client.get_key(keyid)
crypto_client = CryptographyClient(key, credential=credential)
# the result holds the ciphertext and identifies the encryption key and algorithm used
result = crypto_client.encrypt(EncryptionAlgorithm.rsa_oaep, byte_literal_value)
ciphertext = result.ciphertext
print("-" * 50)
print("ciphertext: {0}" .format(ciphertext))
print("result: {0}" .format(result.key_id))
print(result.algorithm)
print("-" * 50)
def decrypt_secret(self, cipher_text, vault_name, keyid, subscription_id, region):
logger.info("secret to decrypt: {0}".format(cipher_text))
logger.info("keyid: {0}".format(keyid))
logger.info("Azure subscription id: {0}".format(subscription_id))
logger.info("Azure region: {0}".format(region))
vault_url = "https://{0}.vault.azure.net/".format(vault_name)
logger.info(vault_url)
'''
convert string to byte literal
'''
byte_literal_value = cipher_text.encode()
'''
You can use str.decode() with encoding as unicode-escape .
Then decode it back using the required encoding to get back your bytes array.
'''
byte_literal_value = byte_literal_value.decode('unicode-escape').encode('ISO-8859-1')
credential = DefaultAzureCredential()
key_client = KeyClient(vault_url=vault_url, credential=credential)
key = key_client.get_key(keyid)
crypto_client = CryptographyClient(key, credential=credential)
decrypted = crypto_client.decrypt(EncryptionAlgorithm.rsa_oaep, byte_literal_value)
print("decrypted: ", decrypted.plaintext)
get_secret = DecryptEncrypt()
| 2,438 | 0 | 103 |
0a216e03788bec536004842a6203d8d5c5839110 | 4,872 | py | Python | tsflex/features/logger.py | predict-idlab/tsflex | 73aae174ec292da7c1117f94a08b5ee14a57a17e | [
"MIT"
] | 138 | 2021-06-29T15:51:16.000Z | 2022-03-29T07:05:47.000Z | tsflex/features/logger.py | matdemeue/tsflex | 919c25c28ddbbb055425434852be0018329d5531 | [
"MIT"
] | 60 | 2021-06-24T16:28:50.000Z | 2022-03-28T14:52:24.000Z | tsflex/features/logger.py | matdemeue/tsflex | 919c25c28ddbbb055425434852be0018329d5531 | [
"MIT"
] | 11 | 2021-11-13T09:54:34.000Z | 2022-03-02T15:16:39.000Z | """Contains the used variables and functions to provide logging functionality.
See Also
--------
FeatureCollection: its `logging_file_path` of the `calculate` method.
"""
__author__ = "Jeroen Van Der Donckt"
import logging
import pandas as pd
import re
from ..utils.logging import logging_file_to_df, remove_inner_brackets
from ..utils.time import timedelta_to_str
# Package specific logger
logger = logging.getLogger("feature_calculation_logger")
logger.setLevel(logging.DEBUG)
# Create logger which writes WARNING messages or higher to sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.WARNING)
logger.addHandler(console)
def _parse_message(message: str) -> list:
"""Parse the message of the logged info."""
regex = r"\[(.*?)\]"
matches = re.findall(regex, remove_inner_brackets(message))
assert len(matches) == 4
func = matches[0]
key = matches[1].replace("'", "")
window, stride = matches[2].split(",")[0], matches[2].split(",")[1]
duration_s = float(matches[3].rstrip(" seconds"))
return [func, key, window, stride, duration_s]
def _parse_logging_execution_to_df(logging_file_path: str) -> pd.DataFrame:
"""Parse the logged messages into a dataframe that contains execution info.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Note
----
This function only works when the `logging_file_path` used in a
`FeatureCollection` its `calculate` method is passed.
Returns
-------
pd.DataFrame
A DataFrame with the features its function, input series names and
calculation duration.
"""
df = logging_file_to_df(logging_file_path)
df[["function", "series_names", "window", "stride", "duration"]] = pd.DataFrame(
list(df["message"].apply(_parse_message)),
index=df.index,
)
df["window"] = pd.to_timedelta(df["window"]).apply(timedelta_to_str)
df["stride"] = pd.to_timedelta(df["stride"]).apply(timedelta_to_str)
return df.drop(columns=["name", "log_level", "message"])
def get_feature_logs(logging_file_path: str) -> pd.DataFrame:
"""Get execution (time) info for each feature of a `FeatureCollection`.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Returns
-------
pd.DataFrame
A DataFrame with the features its function, input series names and
calculation duration.
"""
df = _parse_logging_execution_to_df(logging_file_path)
df["duration"] = pd.to_timedelta(df["duration"], unit="s")
return df
def get_function_stats(logging_file_path: str) -> pd.DataFrame:
"""Get execution (time) statistics for each function.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Returns
-------
pd.DataFrame
A DataFrame with for each function (i.e., `function-(window,stride)`)
combination the mean (time), std (time), sum (time), and number of executions.
"""
df = _parse_logging_execution_to_df(logging_file_path)
# Get the sorted functions in a list to use as key for sorting the groups
sorted_funcs = (
df.groupby(["function"])
.agg({"duration": ["mean"]})
.sort_values(by=("duration", "mean"), ascending=True)
.index.to_list()
)
return (
df.groupby(["function", "window", "stride"])
.agg({"duration": ["mean", "std", "sum", "count"]})
.sort_index(key=key_func, ascending=False)
)
def get_series_names_stats(logging_file_path: str) -> pd.DataFrame:
"""Get execution (time) statistics for each `key-(window,stride)` combination.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Returns
-------
pd.DataFrame
A DataFrame with for each function the mean (time), std (time), sum (time), and
number of executions.
"""
df = _parse_logging_execution_to_df(logging_file_path)
return (
df.groupby(["series_names", "window", "stride"])
.agg({"duration": ["sum", "mean", "std", "count"]})
.sort_values(by=("duration", "sum"), ascending=False)
)
| 32.48 | 87 | 0.663793 | """Contains the used variables and functions to provide logging functionality.
See Also
--------
FeatureCollection: its `logging_file_path` of the `calculate` method.
"""
__author__ = "Jeroen Van Der Donckt"
import logging
import pandas as pd
import re
from ..utils.logging import logging_file_to_df, remove_inner_brackets
from ..utils.time import timedelta_to_str
# Package specific logger
logger = logging.getLogger("feature_calculation_logger")
logger.setLevel(logging.DEBUG)
# Create logger which writes WARNING messages or higher to sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.WARNING)
logger.addHandler(console)
def _parse_message(message: str) -> list:
"""Parse the message of the logged info."""
regex = r"\[(.*?)\]"
matches = re.findall(regex, remove_inner_brackets(message))
assert len(matches) == 4
func = matches[0]
key = matches[1].replace("'", "")
window, stride = matches[2].split(",")[0], matches[2].split(",")[1]
duration_s = float(matches[3].rstrip(" seconds"))
return [func, key, window, stride, duration_s]
def _parse_logging_execution_to_df(logging_file_path: str) -> pd.DataFrame:
"""Parse the logged messages into a dataframe that contains execution info.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Note
----
This function only works when the `logging_file_path` used in a
`FeatureCollection` its `calculate` method is passed.
Returns
-------
pd.DataFrame
A DataFrame with the features its function, input series names and
calculation duration.
"""
df = logging_file_to_df(logging_file_path)
df[["function", "series_names", "window", "stride", "duration"]] = pd.DataFrame(
list(df["message"].apply(_parse_message)),
index=df.index,
)
df["window"] = pd.to_timedelta(df["window"]).apply(timedelta_to_str)
df["stride"] = pd.to_timedelta(df["stride"]).apply(timedelta_to_str)
return df.drop(columns=["name", "log_level", "message"])
def get_feature_logs(logging_file_path: str) -> pd.DataFrame:
"""Get execution (time) info for each feature of a `FeatureCollection`.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Returns
-------
pd.DataFrame
A DataFrame with the features its function, input series names and
calculation duration.
"""
df = _parse_logging_execution_to_df(logging_file_path)
df["duration"] = pd.to_timedelta(df["duration"], unit="s")
return df
def get_function_stats(logging_file_path: str) -> pd.DataFrame:
"""Get execution (time) statistics for each function.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Returns
-------
pd.DataFrame
A DataFrame with for each function (i.e., `function-(window,stride)`)
combination the mean (time), std (time), sum (time), and number of executions.
"""
df = _parse_logging_execution_to_df(logging_file_path)
# Get the sorted functions in a list to use as key for sorting the groups
sorted_funcs = (
df.groupby(["function"])
.agg({"duration": ["mean"]})
.sort_values(by=("duration", "mean"), ascending=True)
.index.to_list()
)
def key_func(idx_level):
if all(idx in sorted_funcs for idx in idx_level):
return [sorted_funcs.index(idx) for idx in idx_level]
return idx_level
return (
df.groupby(["function", "window", "stride"])
.agg({"duration": ["mean", "std", "sum", "count"]})
.sort_index(key=key_func, ascending=False)
)
def get_series_names_stats(logging_file_path: str) -> pd.DataFrame:
"""Get execution (time) statistics for each `key-(window,stride)` combination.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored. This is the file path that
is passed to the `FeatureCollection` its `calculate` method.
Returns
-------
pd.DataFrame
A DataFrame with for each function the mean (time), std (time), sum (time), and
number of executions.
"""
df = _parse_logging_execution_to_df(logging_file_path)
return (
df.groupby(["series_names", "window", "stride"])
.agg({"duration": ["sum", "mean", "std", "count"]})
.sort_values(by=("duration", "sum"), ascending=False)
)
| 152 | 0 | 27 |
8ba917b5faeec47368ea3dc38ac2533f2c67148e | 4,317 | py | Python | src/mcookbook/exchanges/abc.py | s0undt3ch/mommas-cookbook | ccca526eee9241f12674cad8c1e1da1a900cef82 | [
"Apache-2.0"
] | 2 | 2022-01-02T23:47:32.000Z | 2022-01-07T11:14:15.000Z | src/mcookbook/exchanges/abc.py | UfSoft/mommas-cookbook | ccca526eee9241f12674cad8c1e1da1a900cef82 | [
"Apache-2.0"
] | 1 | 2022-01-17T12:47:37.000Z | 2022-01-17T12:47:37.000Z | src/mcookbook/exchanges/abc.py | s0undt3ch/mommas-cookbook | ccca526eee9241f12674cad8c1e1da1a900cef82 | [
"Apache-2.0"
] | 1 | 2022-01-10T18:49:36.000Z | 2022-01-10T18:49:36.000Z | """
Base exchange class implementation.
"""
from __future__ import annotations
import logging
import pprint
from typing import Any
import ccxt
from ccxt.async_support import Exchange as CCXTExchange
from pydantic import BaseModel
from pydantic import PrivateAttr
from mcookbook.config.live import LiveConfig
from mcookbook.exceptions import OperationalException
from mcookbook.pairlist.manager import PairListManager
from mcookbook.utils import merge_dictionaries
log = logging.getLogger(__name__)
class Exchange(BaseModel):
"""
Base Exchange class.
"""
_name: str = PrivateAttr()
_market: str = PrivateAttr()
config: LiveConfig
_api: type[CCXTExchange] = PrivateAttr()
_markets: dict[str, dict[str, Any]] = PrivateAttr(default_factory=dict)
_pairlist_manager: PairListManager = PrivateAttr()
@classmethod
def resolved(cls, config: LiveConfig) -> Exchange:
"""
Resolve the passed ``name`` and ``market`` to class implementation.
"""
name = config.exchange.name
market = config.exchange.market
for subclass in cls.__subclasses__():
subclass_name = subclass._name # pylint: disable=protected-access
subclass_market = subclass._market # pylint: disable=protected-access
if subclass_name == name and market == subclass_market:
instance = subclass.parse_obj({"config": config.dict()})
instance._pairlist_manager = PairListManager.construct(config=config)
instance._pairlist_manager._exchange = instance
for handler in config.pairlists:
handler._exchange = instance
instance._pairlist_manager._pairlist_handlers = config.pairlists
return instance
raise OperationalException(
f"Cloud not find an implementation for the {name}(market={market}) exchange."
)
@property
def api(self) -> CCXTExchange:
"""
Instantiate and return a CCXT exchange class.
"""
try:
return self._api
except AttributeError:
log.info("Using CCXT %s", ccxt.__version__)
ccxt_config = self.config.exchange.get_ccxt_config()
exchange_ccxt_config = self._get_ccxt_config() # pylint: disable=assignment-from-none
if exchange_ccxt_config:
merge_dictionaries(ccxt_config, exchange_ccxt_config)
headers = self._get_ccxt_headers() # pylint: disable=assignment-from-none
if headers:
merge_dictionaries(ccxt_config, {"headers": headers})
log.info(
"Instantiating API for the '%s' exchange with the following configuration:\n%s",
self.config.exchange.name,
pprint.pformat(ccxt_config),
)
# Reveal secrets
for key in ("apiKey", "secret", "password", "uid"):
if key not in ccxt_config:
continue
ccxt_config[key] = ccxt_config[key].get_secret_value()
try:
self._api = getattr(ccxt.async_support, self.config.exchange.name)(ccxt_config)
except (KeyError, AttributeError) as exc:
raise OperationalException(
f"Exchange {self.config.exchange.name} is not supported"
) from exc
except ccxt.BaseError as exc:
raise OperationalException(f"Initialization of ccxt failed. Reason: {exc}") from exc
return self._api
async def get_markets(self) -> dict[str, Any]:
"""
Load the exchange markets.
"""
if not self._markets:
log.info("Loading markets")
self._markets = await self.api.load_markets()
return self._markets
@property
def markets(self) -> dict[str, Any]:
"""
Return the loaded markets.
"""
return self._markets
@property
def pairlist_manager(self) -> PairListManager:
"""
Return the pair list manager.
"""
return self._pairlist_manager
| 35.097561 | 100 | 0.624276 | """
Base exchange class implementation.
"""
from __future__ import annotations
import logging
import pprint
from typing import Any
import ccxt
from ccxt.async_support import Exchange as CCXTExchange
from pydantic import BaseModel
from pydantic import PrivateAttr
from mcookbook.config.live import LiveConfig
from mcookbook.exceptions import OperationalException
from mcookbook.pairlist.manager import PairListManager
from mcookbook.utils import merge_dictionaries
log = logging.getLogger(__name__)
class Exchange(BaseModel):
"""
Base Exchange class.
"""
_name: str = PrivateAttr()
_market: str = PrivateAttr()
config: LiveConfig
_api: type[CCXTExchange] = PrivateAttr()
_markets: dict[str, dict[str, Any]] = PrivateAttr(default_factory=dict)
_pairlist_manager: PairListManager = PrivateAttr()
def _get_ccxt_headers(self) -> dict[str, str] | None:
return None
def _get_ccxt_config(self) -> dict[str, Any] | None:
return None
@classmethod
def resolved(cls, config: LiveConfig) -> Exchange:
"""
Resolve the passed ``name`` and ``market`` to class implementation.
"""
name = config.exchange.name
market = config.exchange.market
for subclass in cls.__subclasses__():
subclass_name = subclass._name # pylint: disable=protected-access
subclass_market = subclass._market # pylint: disable=protected-access
if subclass_name == name and market == subclass_market:
instance = subclass.parse_obj({"config": config.dict()})
instance._pairlist_manager = PairListManager.construct(config=config)
instance._pairlist_manager._exchange = instance
for handler in config.pairlists:
handler._exchange = instance
instance._pairlist_manager._pairlist_handlers = config.pairlists
return instance
raise OperationalException(
f"Cloud not find an implementation for the {name}(market={market}) exchange."
)
@property
def api(self) -> CCXTExchange:
"""
Instantiate and return a CCXT exchange class.
"""
try:
return self._api
except AttributeError:
log.info("Using CCXT %s", ccxt.__version__)
ccxt_config = self.config.exchange.get_ccxt_config()
exchange_ccxt_config = self._get_ccxt_config() # pylint: disable=assignment-from-none
if exchange_ccxt_config:
merge_dictionaries(ccxt_config, exchange_ccxt_config)
headers = self._get_ccxt_headers() # pylint: disable=assignment-from-none
if headers:
merge_dictionaries(ccxt_config, {"headers": headers})
log.info(
"Instantiating API for the '%s' exchange with the following configuration:\n%s",
self.config.exchange.name,
pprint.pformat(ccxt_config),
)
# Reveal secrets
for key in ("apiKey", "secret", "password", "uid"):
if key not in ccxt_config:
continue
ccxt_config[key] = ccxt_config[key].get_secret_value()
try:
self._api = getattr(ccxt.async_support, self.config.exchange.name)(ccxt_config)
except (KeyError, AttributeError) as exc:
raise OperationalException(
f"Exchange {self.config.exchange.name} is not supported"
) from exc
except ccxt.BaseError as exc:
raise OperationalException(f"Initialization of ccxt failed. Reason: {exc}") from exc
return self._api
async def get_markets(self) -> dict[str, Any]:
"""
Load the exchange markets.
"""
if not self._markets:
log.info("Loading markets")
self._markets = await self.api.load_markets()
return self._markets
@property
def markets(self) -> dict[str, Any]:
"""
Return the loaded markets.
"""
return self._markets
@property
def pairlist_manager(self) -> PairListManager:
"""
Return the pair list manager.
"""
return self._pairlist_manager
| 103 | 0 | 54 |
f1d06f11dfa6ba4ce17841c8e24ef727e4beae28 | 381 | py | Python | src/sailing_robot/src/sailing_robot/taskbase.py | sjdsm/sailing-robot | 15789066bb6dead147fc24d763ea384454588cb0 | [
"MIT"
] | 87 | 2016-02-04T08:44:00.000Z | 2022-03-19T19:53:48.000Z | src/sailing_robot/src/sailing_robot/taskbase.py | LaurieChen/sailing-robot | 840fb10d18026ea0f2ea546691c9bf958b8842d3 | [
"MIT"
] | 269 | 2016-01-29T08:19:59.000Z | 2020-02-13T12:33:26.000Z | src/sailing_robot/src/sailing_robot/taskbase.py | LaurieChen/sailing-robot | 840fb10d18026ea0f2ea546691c9bf958b8842d3 | [
"MIT"
] | 45 | 2016-02-11T22:59:53.000Z | 2020-12-10T02:58:50.000Z | """Base class for task classes.
This has almost no implementation; the debugging methods are injected by
tasks_ros. This allows us to test task classes outside ROS.
"""
| 21.166667 | 72 | 0.656168 | """Base class for task classes.
This has almost no implementation; the debugging methods are injected by
tasks_ros. This allows us to test task classes outside ROS.
"""
class TaskBase(object):
debug_topics = []
def debug_pub(self, topic, value):
pass
def log(self, level, msg, *values):
print(msg % values)
def init_ros(self):
pass
| 79 | 109 | 23 |
13b2a9e16032f8c90217d064f8d76e49405125f7 | 8,150 | py | Python | src/backend/UserManager.py | PNBenfica/Tipsters | 499f1e32448353f59908c36df552a657217c946d | [
"Apache-2.0"
] | 2 | 2020-06-17T11:57:09.000Z | 2020-12-08T02:56:28.000Z | src/backend/UserManager.py | PNBenfica/Tipsters | 499f1e32448353f59908c36df552a657217c946d | [
"Apache-2.0"
] | null | null | null | src/backend/UserManager.py | PNBenfica/Tipsters | 499f1e32448353f59908c36df552a657217c946d | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import endpoints
from google.appengine.api import taskqueue
from Crypto.Hash import SHA256
from google.appengine.ext import ndb
from domain import Bet, PostStatus
from models import User, UserForm, UserMiniForm, TrendsMessage, TrendUserMessage, RankingsMessage, UserRankingProfileMessage, UserStatsMessage
from datetime import datetime
from Utils import average, random_list_element
import PostManager
from domain import NotificationType
# user is an user object
| 34.978541 | 217 | 0.711043 | from __future__ import division
import endpoints
from google.appengine.api import taskqueue
from Crypto.Hash import SHA256
from google.appengine.ext import ndb
from domain import Bet, PostStatus
from models import User, UserForm, UserMiniForm, TrendsMessage, TrendUserMessage, RankingsMessage, UserRankingProfileMessage, UserStatsMessage
from datetime import datetime
from Utils import average, random_list_element
import PostManager
from domain import NotificationType
def register_user(name, email, pwd):
if _userAlreadyExist(name):
raise endpoints.ConflictException("Username already exists")
if _emailAlreadyExist(email):
raise endpoints.ConflictException("Email already being used")
hashPwd = _hashPassword(pwd)
User(id=name, email=email, pwd=hashPwd, avatar="img/default_user.png").put()
def _hashPassword(pwd):
return SHA256.new(pwd).hexdigest()
def checkPassword(user, pwd):
return user.pwd == _hashPassword(pwd)
def _get_user_key(username):
return ndb.Key(User, username)
def getUser(username):
user = _get_user_key(username).get()
if not user:
raise endpoints.NotFoundException(username)
else:
return user
def getUserAvatar(user):
avatar = user.avatar
if avatar is None:
avatar = "img/default_user.png"
return avatar
def getUserProfile(currentUser, requestedUsername):
requestedUser = getUser(requestedUsername)
followers = map(_toUserMiniForm ,_getFollowers(requestedUser))
following = map(_toUserMiniForm ,_getFollowing(requestedUser))
is_following = _is_following(currentUser, requestedUsername)
stats = _get_user_stats(requestedUser)
return UserForm(name=requestedUsername, email=requestedUser.email, avatar=getUserAvatar(requestedUser), followers=followers, following=following, stats=stats, is_following=is_following, about=requestedUser.about)
# user is an user object
def follow_user(user, userToFollowName):
userToFollow = getUser(userToFollowName)
is_following = _is_following(user, userToFollowName)
if is_following:
user.followingKeys.remove(userToFollowName)
userToFollow.followersKeys.remove(user.key.id())
else:
user.followingKeys.append(userToFollowName)
userToFollow.followersKeys.append(user.key.id())
user.put()
userToFollow.put()
send_follow_notification(user.key.id(), userToFollowName, not is_following)
def _is_following(user, userToFollowName):
return userToFollowName in user.followingKeys
def send_follow_notification(user_source, user_target, follow):
if follow:
params = { 'type' : NotificationType.FOLLOW, 'source' : user_source, 'target' : user_target }
else:
params = { 'type' : NotificationType.UNFOLLOW, 'source' : user_source, 'target' : user_target }
taskqueue.add(url='/tasks/send_notification', params=params)
def _userAlreadyExist(username):
return ndb.Key(User, username).get()
def _emailAlreadyExist(email):
return User.query(ndb.GenericProperty("email") == email).get()
def getUserByToken(token):
return User.query(ndb.GenericProperty("authToken") == token).get()
def updateAuthToken(user, token):
user.authToken = token
user.authTokenDate = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
user.put()
def _getFollowers(user):
followersKeys = [ndb.Key(User, followerName) for followerName in user.followersKeys]
return ndb.get_multi(followersKeys)
def _getNumberFollowers(user):
return len(user.followersKeys)
def _getFollowing(user):
followingKeys = [ndb.Key(User, followingName) for followingName in user.followingKeys]
return ndb.get_multi(followingKeys)
def _toUserMiniForm(user):
return UserMiniForm(name=user.key.id(), avatar=getUserAvatar(user))
def get_user_mini_form(username):
user = getUser(username)
return _toUserMiniForm(user)
def getTrends(user):
trendUsers = map(_toTrendUserMesssage, _getTrendUsers(user))
return TrendsMessage(users = trendUsers)
def _getTrendUsers(user):
query = User.query()
for followingName in user.followingKeys:
query = query.filter(User.key != ndb.Key(User, followingName))
return query.filter(User.key != user.key).fetch(limit=2)
def _toTrendUserMesssage(user):
return TrendUserMessage(tipster=_toUserMiniForm(user), description=_random_description())
def _random_description():
descriptions = ["ROI: 2.5%", "Is on a 5 green tips streak", "ROI: 5.34%", "Has 50% win percentage", "He is so good", "Follow him follow him"]
return random_list_element(descriptions)
def getRankings():
users = _getAllUsers()
users_ranking_profiles = map(_get_user_ranking_profile, users)
users_ranking_profiles = filter(lambda user: user.stats.nTips > 0, users_ranking_profiles)
users_ranking_profiles_messages = map(_to_ranking_profile_message, users_ranking_profiles)
return RankingsMessage(users = users_ranking_profiles_messages)
def _getAllUsers():
return User.query()
def _get_user_ranking_profile(user):
tipster = _toUserMiniForm(user)
stats = _get_user_stats(user)
return UserRankingProfileMessage(tipster = tipster, stats = stats)
def _get_user_stats(user):
posts = PostManager._getUserPosts(user)
_determine_posts_status(posts)
posts = _filter_open_posts(posts)
posts_status = map(lambda post: post.status, posts)
nTips = len(posts)
nFollowers = _getNumberFollowers(user)
streak = _user_tips_streak(posts_status)
winPercentage = _user_win_percentage(posts_status, nTips)
ROI = _user_ROI(posts)
avgWinOdds = _user_avg_win_odds(posts)
return UserStatsMessage(nTips=nTips, nFollowers=nFollowers, streak=streak, ROI=ROI, winPercentage=winPercentage, avgWinOdds=avgWinOdds)
def _determine_posts_status(posts):
for post in posts:
post.status = _get_post_status(post)
def _get_post_status(post):
choices_status = map(_get_choice_status, post.tips)
post_status = ""
if PostStatus.LOST in choices_status:
post_status = PostStatus.LOST
elif PostStatus.PENDENT in choices_status:
post_status = PostStatus.PENDENT
else:
post_status = PostStatus.WON
return post_status
def _get_choice_status(tip_key):
tip = tip_key.get()
bet_key = tip_key.parent()
bet = bet_key.get()
choice = Bet(bet).getChoice(tip.choiceId)
return choice["status"]
def _filter_open_posts(posts):
return filter(lambda post: post.status not in [PostStatus.PENDENT, PostStatus.VOID], posts)
def _user_tips_streak(tips_status):
streak = 0
for status in tips_status:
if status == "Won":
streak += 1
else:
break
return streak
def _user_win_percentage(tips_status, nTips):
wonTips = filter(lambda status: status == PostStatus.WON, tips_status)
if (len(wonTips) == 0 or nTips == 0):
return float(0)
return len(wonTips) / float(nTips)
def _user_avg_win_odds(posts):
won_posts_odds = _get_won_posts_odds(posts)
return average(won_posts_odds)
def _user_ROI(posts):
unit = 100
total_investment = unit * len(posts)
if total_investment==0: return
profit = calculate_profit(posts, unit)
return profit / total_investment * 100
def calculate_profit(posts, unit):
profit = 0
for post in posts:
if post.status == PostStatus.LOST:
profit -= unit
elif post.status == PostStatus.WON:
profit += post.totalOdd * unit - unit
return profit
def _get_won_posts_odds(posts):
won_posts = _get_won_posts(posts)
won_posts_odds = map(lambda post: post.totalOdd, won_posts)
return won_posts_odds
def _get_won_posts(posts):
return filter(lambda post: post.status == PostStatus.WON, posts)
def _to_ranking_profile_message(user_ranking_profile):
return user_ranking_profile
| 6,651 | 0 | 980 |
3de0a3de5d6dc93e7301e5fba2c9a67513c680f6 | 1,787 | py | Python | lib/surface/compute/groups/describe.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/groups/describe.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/groups/describe.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | 1 | 2020-07-25T12:23:41.000Z | 2020-07-25T12:23:41.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing groups."""
from googlecloudsdk.api_lib.compute import base_classes
class Describe(base_classes.BaseAsyncMutator):
"""Describe a Google Compute Engine group.
*{command}* displays all data associated with a Google Compute
Engine group in a project.
"""
@staticmethod
@property
@property
@property
@property
def CreateRequests(self, args):
"""Returns a list of requests necessary for describing groups."""
group_ref = self.CreateAccountsReference(
args.name, resource_type='groups')
request = self.messages.ClouduseraccountsGroupsGetRequest(
project=self.project,
groupName=group_ref.Name())
return [request]
Describe.detailed_help = {
'EXAMPLES': """\
To describe a user, run:
$ {command} example-user
""",
}
| 26.279412 | 74 | 0.706771 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing groups."""
from googlecloudsdk.api_lib.compute import base_classes
class Describe(base_classes.BaseAsyncMutator):
"""Describe a Google Compute Engine group.
*{command}* displays all data associated with a Google Compute
Engine group in a project.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'name',
metavar='NAME',
help='The name of the group to describe.')
@property
def service(self):
return self.clouduseraccounts.groups
@property
def method(self):
return 'Get'
@property
def resource_type(self):
return 'groups'
@property
def messages(self):
return self.clouduseraccounts.MESSAGES_MODULE
def CreateRequests(self, args):
"""Returns a list of requests necessary for describing groups."""
group_ref = self.CreateAccountsReference(
args.name, resource_type='groups')
request = self.messages.ClouduseraccountsGroupsGetRequest(
project=self.project,
groupName=group_ref.Name())
return [request]
Describe.detailed_help = {
'EXAMPLES': """\
To describe a user, run:
$ {command} example-user
""",
}
| 234 | 0 | 120 |
96b76c01022d8b35cf13d18eef78a5f20afc3be9 | 1,398 | py | Python | adiumsh/utils.py | shichao-an/adium-sh | fc0fc20e12a4d787f21d40cba7c730755f1e17f3 | [
"BSD-2-Clause"
] | 1 | 2016-06-08T22:48:56.000Z | 2016-06-08T22:48:56.000Z | adiumsh/utils.py | shichao-an/adium-sh | fc0fc20e12a4d787f21d40cba7c730755f1e17f3 | [
"BSD-2-Clause"
] | null | null | null | adiumsh/utils.py | shichao-an/adium-sh | fc0fc20e12a4d787f21d40cba7c730755f1e17f3 | [
"BSD-2-Clause"
] | 1 | 2018-03-05T01:24:59.000Z | 2018-03-05T01:24:59.000Z | import os
import psutil
try:
import configparser
except ImportError:
import ConfigParser as configparser
from xml.sax import saxutils
def get_old_entries(dirpath, at_least=5):
"""
Get a list of n least recently modified entries of dirpath except the most
recently modified entry if any
:param at_least: `dirpath` must contain at least `at_least` entries,
or return empty list
"""
entries = listdir_fullpath(dirpath)
if len(entries) < at_least:
return []
entries.sort(key=os.path.getmtime)
return entries[1:]
| 24.526316 | 78 | 0.674535 | import os
import psutil
try:
import configparser
except ImportError:
import ConfigParser as configparser
from xml.sax import saxutils
def is_process_running(process_name):
names = [proc.name() for proc in psutil.process_iter()]
return process_name in names
def get_process(process_name):
for proc in psutil.process_iter():
if proc.name() == process_name:
return proc
else:
return None
def get_config(path, section):
config = configparser.ConfigParser()
if os.path.exists(path):
config.read(path)
if config.has_section(section):
return dict(config.items(section))
def get_config_value(path, section, name):
config = get_config(path, section)
return config.get(name, None)
def unescape(text):
entities = {"'": "'", """: '"'}
return saxutils.unescape(text, entities)
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def get_old_entries(dirpath, at_least=5):
"""
Get a list of n least recently modified entries of dirpath except the most
recently modified entry if any
:param at_least: `dirpath` must contain at least `at_least` entries,
or return empty list
"""
entries = listdir_fullpath(dirpath)
if len(entries) < at_least:
return []
entries.sort(key=os.path.getmtime)
return entries[1:]
| 684 | 0 | 138 |
5e5a45322d68cdefedf69658d9245fb82cbe673c | 1,675 | py | Python | Part_3_advanced/m15_design_patterns/builder/homework_1_start/document_system/complex_form.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m15_design_patterns/builder/homework_1_start/document_system/complex_form.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m15_design_patterns/builder/homework_1_start/document_system/complex_form.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | from enum import Enum
from typing import Optional
| 28.87931 | 97 | 0.602985 | from enum import Enum
from typing import Optional
class FormType(Enum):
UE = "UE"
USA = "USA"
class ComplexForm:
def __init__(
self,
personal_section_heading: str,
email_input: bool,
phone_input: bool,
details_section: bool,
details_section_heading: Optional[str],
form_type: FormType,
) -> None:
self.content = self._personal_section(personal_section_heading, email_input, phone_input)
if details_section:
if not details_section_heading:
details_section_heading = "Details"
self.content += self._details_section(details_section_heading)
if form_type is FormType.UE:
self.content += self._ue_regulatory_info()
else:
self.content += self._usa_regulatory_info()
def _personal_section(
self, personal_section_heading: str, email_input: bool, phone_input: bool
) -> str:
content = f"--{personal_section_heading}--\n"
content += "First name: < >\n"
content += "Last name: < >\n"
if email_input:
content += "Email: < >\n"
if phone_input:
content += "Phone: < >\n"
return content
def _details_section(self, details_section_heading: str) -> str:
content = f"--{details_section_heading}--\n"
content += "Age: < >\n"
content += "Address: < >\n"
return content
def _ue_regulatory_info(self) -> str:
return f"UE info \n"
def _usa_regulatory_info(self) -> str:
return f"Some required in forms in USA \n"
def render(self) -> str:
return self.content
| 1,389 | 27 | 207 |
a4acd99a15bb009121ad4d4bc9c0d2c0cf480de4 | 1,288 | py | Python | backend/app/main.py | CyberDAS-Dev/VK_marketplace | 3272bcb45c88d8c43e96a4e727600f7f1d4c86a4 | [
"MIT"
] | null | null | null | backend/app/main.py | CyberDAS-Dev/VK_marketplace | 3272bcb45c88d8c43e96a4e727600f7f1d4c86a4 | [
"MIT"
] | 11 | 2021-10-10T16:30:22.000Z | 2022-01-21T07:59:36.000Z | backend/app/main.py | CyberDAS-Dev/VK_marketplace | 3272bcb45c88d8c43e96a4e727600f7f1d4c86a4 | [
"MIT"
] | null | null | null | import os
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from starlette.middleware.cors import CORSMiddleware
from app.api.router import api_router
from app.core.config import settings
from app.core.logging import setup_logging
from app.middleware.logger import LoggerMiddleware
from app.utils.package_info import get_metadata
prefix = settings.BASE_PREFIX
app = FastAPI(
**get_metadata(),
openapi_url=f"{prefix}/openapi.json",
docs_url=f"{prefix}/docs",
redoc_url=f"{prefix}/redoc",
root_path=os.environ.get("ROOT_PATH", ""),
)
logger = setup_logging()
app.middleware("http")(LoggerMiddleware(logger=logger))
# Set all CORS enabled origins
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router, prefix=prefix)
# Находится не в роутере картинок из-за https://github.com/tiangolo/fastapi/issues/1469
app.mount(f"{prefix}/images", StaticFiles(directory=settings.IMAGE_DIR), name="images")
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", reload=True, port=8888)
| 28 | 87 | 0.744565 | import os
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from starlette.middleware.cors import CORSMiddleware
from app.api.router import api_router
from app.core.config import settings
from app.core.logging import setup_logging
from app.middleware.logger import LoggerMiddleware
from app.utils.package_info import get_metadata
prefix = settings.BASE_PREFIX
app = FastAPI(
**get_metadata(),
openapi_url=f"{prefix}/openapi.json",
docs_url=f"{prefix}/docs",
redoc_url=f"{prefix}/redoc",
root_path=os.environ.get("ROOT_PATH", ""),
)
logger = setup_logging()
app.middleware("http")(LoggerMiddleware(logger=logger))
# Set all CORS enabled origins
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router, prefix=prefix)
# Находится не в роутере картинок из-за https://github.com/tiangolo/fastapi/issues/1469
app.mount(f"{prefix}/images", StaticFiles(directory=settings.IMAGE_DIR), name="images")
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", reload=True, port=8888)
| 0 | 0 | 0 |
04bb87d99a73200f29b0aff7955f8776acf369a8 | 902 | py | Python | day7.py | davidfpc/AoC2021 | b526e606dbf1cc59de4951a321aa9b98d04fde4c | [
"MIT"
] | null | null | null | day7.py | davidfpc/AoC2021 | b526e606dbf1cc59de4951a321aa9b98d04fde4c | [
"MIT"
] | null | null | null | day7.py | davidfpc/AoC2021 | b526e606dbf1cc59de4951a321aa9b98d04fde4c | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
puzzle_input = read_input("day7.txt")
print(f"Part 1: {part1(puzzle_input)}")
print(f"Part 2: {part2(puzzle_input)}")
| 27.333333 | 54 | 0.568736 | def read_input(file_name: str) -> [int]:
with open("inputFiles/" + file_name, "r") as file:
lines = file.read().splitlines()[0].split(",")
return [int(i) for i in lines]
def part1(input_value: [int]):
min_fuel = -1
for i in range(max(input_value)):
fuel = 0
for fish in input_value:
fuel += abs(i - fish)
if min_fuel == -1 or fuel < min_fuel:
min_fuel = fuel
return min_fuel
def part2(input_value: [int]):
min_fuel = -1
for i in range(max(input_value)):
fuel = 0
for fish in input_value:
fuel += sum(range(1, abs(i - fish) + 1))
if min_fuel == -1 or fuel < min_fuel:
min_fuel = fuel
return min_fuel
if __name__ == "__main__":
puzzle_input = read_input("day7.txt")
print(f"Part 1: {part1(puzzle_input)}")
print(f"Part 2: {part2(puzzle_input)}")
| 673 | 0 | 68 |
7175deae72a393a35d05f31b6a72f9c439d6637a | 823 | py | Python | setup.py | janushendersonassetallocation/loman | fc7cc939f92ad69d7b68aff35d70b8e9807911f4 | [
"BSD-3-Clause"
] | 48 | 2018-10-31T20:09:15.000Z | 2022-02-14T20:38:53.000Z | setup.py | janushendersonassetallocation/loman | fc7cc939f92ad69d7b68aff35d70b8e9807911f4 | [
"BSD-3-Clause"
] | 21 | 2017-04-07T02:41:28.000Z | 2018-01-24T15:59:51.000Z | setup.py | janushendersonassetallocation/loman | fc7cc939f92ad69d7b68aff35d70b8e9807911f4 | [
"BSD-3-Clause"
] | 3 | 2018-10-17T01:37:17.000Z | 2019-12-05T11:51:50.000Z | from setuptools import setup
setup(
name='loman',
version='0.3.0',
packages=['loman'],
url='https://github.com/janusassetallocation/loman',
license='BSD',
author='Ed Parcell',
author_email='edparcell@gmail.com',
description='Loman tracks state of computations, and the dependencies between them, allowing full and partial recalculations.',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=['six', 'dill', 'pydotplus', 'networkx', 'pandas', 'matplotlib'],
)
| 35.782609 | 131 | 0.634265 | from setuptools import setup
setup(
name='loman',
version='0.3.0',
packages=['loman'],
url='https://github.com/janusassetallocation/loman',
license='BSD',
author='Ed Parcell',
author_email='edparcell@gmail.com',
description='Loman tracks state of computations, and the dependencies between them, allowing full and partial recalculations.',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=['six', 'dill', 'pydotplus', 'networkx', 'pandas', 'matplotlib'],
)
| 0 | 0 | 0 |
793adde358f7abce99b0e2b29705fcb9b693e34e | 32,795 | py | Python | texar/modules/decoders/transformer_decoders.py | lunayach/texar-pytorch | ac3e334e491f524dd01654b07af030fa20c88b34 | [
"Apache-2.0"
] | null | null | null | texar/modules/decoders/transformer_decoders.py | lunayach/texar-pytorch | ac3e334e491f524dd01654b07af030fa20c88b34 | [
"Apache-2.0"
] | null | null | null | texar/modules/decoders/transformer_decoders.py | lunayach/texar-pytorch | ac3e334e491f524dd01654b07af030fa20c88b34 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transformer decoder.
"""
from typing import Callable, Dict, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn
from texar.core import layers
from texar.hyperparams import HParams
from texar.modules.decoders.decoder_base import DecoderBase, _make_output_layer
from texar.modules.decoders.decoder_helpers import EmbeddingHelper, Helper
from texar.modules.encoders.multihead_attention import (
Cache, MultiheadAttentionEncoder)
from texar.modules.encoders.transformer_encoder import (
default_transformer_poswise_net_hparams)
from texar.modules.networks.networks import FeedForwardNetwork
from texar.utils import transformer_attentions as attn
from texar.utils.beam_search import beam_search
from texar.utils.shapes import mask_sequences
from texar.utils.utils import sequence_mask
__all__ = [
'TransformerDecoderOutput',
'TransformerDecoder',
]
class TransformerDecoderOutput(NamedTuple):
r"""The output of :class:`TransformerDecoder`.
"""
logits: torch.Tensor
r"""A :tensor:`Tensor` of shape ``[batch_size, max_time, vocab_size]``
containing the logits."""
sample_id: torch.LongTensor
r"""A :tensor:`LongTensor` of shape ``[batch_size, max_time]`` containing
the sampled token indices."""
class TransformerDecoder(DecoderBase[Cache, TransformerDecoderOutput]):
r"""Transformer decoder that applies multi-head self-attention for
sequence decoding.
It is a stack of :class:`~texar.modules.encoders.MultiheadAttentionEncoder`,
:class:`~texar.modules.FeedForwardNetwork`, and residual connections.
Args:
vocab_size (int, optional): Vocabulary size. Required if
:attr:`output_layer` is `None`.
output_layer (optional): An output layer that transforms cell output
to logits. This can be:
- A callable layer, e.g., an instance of :torch_nn:`Module`.
- A tensor. A :torch_nn:`Linear` layer will be created using the
tensor as weights. The bias of the dense layer is determined
by ``hparams.output_layer_bias``. This can be used to tie the
output layer with the input embedding matrix, as proposed in
https://arxiv.org/pdf/1608.05859.pdf.
- `None`. A :torch_nn:`Linear` layer will be created based on
attr:`vocab_size` and ``hparams.output_layer_bias``.
- If no output layer is needed at the end, set
:attr:`vocab_size` to `None` and ``output_layer`` to
:func:`~texar.core.identity`.
hparams (dict or HParams, optional): Hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
.. document private functions
"""
# State variables used during `dynamic_decode`. Assigned in `forward`.
_state_max_decoding_length: int
_state_context: Optional[torch.LongTensor]
_state_context_sequence_length: Optional[torch.LongTensor]
_state_cache: Cache
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
# Same as in TransformerEncoder
"num_blocks": 6,
"dim": 512,
"use_gpt_config": False,
"embedding_dropout": 0.1,
"residual_dropout": 0.1,
"poswise_feedforward": default_transformer_poswise_net_hparams,
"multihead_attention": {
'name': 'multihead_attention',
'num_units': 512,
'output_dim': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
},
"initializer": None,
"name": "transformer_decoder"
# Additional for TransformerDecoder
"embedding_tie": True,
"output_layer_bias": False,
"max_decoding_length": int(1e10),
}
Here:
`"num_blocks"`: int
Number of stacked blocks.
`"dim"`: int
Hidden dimension of the encoder.
`"use_gpt_config"`: bool
Whether to follow the `eps` setting of OpenAI GPT.
`"embedding_dropout"`: float
Dropout rate of the input word and position embeddings.
`"residual_dropout"`: float
Dropout rate of the residual connections.
`"poswise_feedforward"`: dict
Hyperparameters for a feed-forward network used in residual
connections.
Make sure the dimension of the output tensor is equal to ``dim``.
See :func:`~texar.modules.default_transformer_poswise_net_hparams`
for details.
`"multihead_attention"`: dict
Hyperparameters for the multi-head attention strategy.
Make sure the ``output_dim`` in this module is equal to ``dim``.
See :func:`~texar.modules.MultiheadAttentionEncoder.default_hparams`
for details.
`"initializer"`: dict, optional
Hyperparameters of the default initializer that initializes
variables created in this module.
See :func:`~texar.core.get_initializer` for details.
`"embedding_tie"`: bool
Whether to use the word embedding matrix as the output layer
that computes logits. If `False`, a new dense layer is created.
`"output_layer_bias"`: bool
Whether to use bias to the output layer.
`"max_decoding_length"`: int
The maximum allowed number of decoding steps.
Set to a very large number of avoid the length constraint.
Ignored if provided in :meth:`forward` or ``"train_greedy"``
decoding is used.
`"name"`: str
Name of the module.
"""
dim = 512
return {
'num_blocks': 6,
'dim': dim,
'use_gpt_config': False,
'embedding_tie': True,
'output_layer_bias': False,
'max_decoding_length': int(1e10),
'embedding_dropout': 0.1,
'residual_dropout': 0.1,
'poswise_feedforward': default_transformer_poswise_net_hparams(dim),
'multihead_attention': {
'name': 'multihead_attention',
'num_units': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
},
'initializer': None,
'name': "transformer_decoder",
}
def _inputs_to_outputs(self, inputs: torch.Tensor,
cache: Cache) -> Tuple[torch.Tensor, Cache]:
r"""Returns the outputs of one decoding step (for example,
the predicted logits of the next token).
:attr:`inputs` should be of shape ``[batch_size, dim]``.
Returns:
A tuple of logits and updated cache. Logits are of shape
``[batch_size, vocab_size]``.
"""
outputs = self._self_attention_stack(
inputs.unsqueeze(1), memory=cache['memory'], cache=cache)
outputs = self._output_layer(outputs)
outputs = outputs.squeeze(1)
return outputs, cache
def forward(self, # type: ignore
inputs: Optional[torch.Tensor] = None,
sequence_length: Optional[torch.LongTensor] = None,
memory: Optional[torch.Tensor] = None,
memory_sequence_length: Optional[torch.LongTensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
context: Optional[torch.Tensor] = None,
context_sequence_length: Optional[torch.LongTensor] = None,
helper: Optional[Helper] = None,
decoding_strategy: str = 'train_greedy',
max_decoding_length: Optional[int] = None,
impute_finished: bool = False,
infer_mode: Optional[bool] = None,
beam_width: Optional[int] = None,
length_penalty: float = 0.,
**kwargs) \
-> Union[
TransformerDecoderOutput,
Tuple[TransformerDecoderOutput, torch.LongTensor],
Dict[str, torch.Tensor]]:
r"""Performs decoding.
The interface is very similar to that of RNN decoders
(:class:`texar.modules.RNNDecoderBase`). In particular,
the function provides **3 ways** to specify the decoding method, with
varying flexibility:
1. The :attr:`decoding_strategy` argument.
- **"train_greedy"**: decoding in teacher-forcing fashion (i.e.,
feeding ground truth to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Argument :attr:`inputs` is required for this strategy.
:attr:`sequence_length` is optional.
- **"infer_greedy"**: decoding in inference fashion (i.e., feeding
`generated` sample to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Arguments :attr:`(start_tokens, end_token)` are
required for this strategy, and argument
:attr:`max_decoding_length` is optional.
- **"infer_sample"**: decoding in inference fashion, and for each
step sample is obtained by `random sampling` from the logits.
Arguments :attr:`(start_tokens, end_token)` are required for this
strategy, and argument :attr:`max_decoding_length` is optional.
This argument is used only when arguments :attr:`helper` and
:attr:`beam_width` are both `None`.
2. The :attr:`helper` argument: An instance of subclass of
:class:`texar.modules.decoders.Helper`.
This provides a superset of decoding strategies than above.
The interface is the same as in RNN decoders.
Please refer to :meth:`texar.modules.RNNDecoderBase.forward` for
detailed usage and examples.
Note that, here, though using a
:class:`~texar.decoder.TrainingHelper` corresponding to the
``"train_greedy"`` strategy above, the implementation is *slower*
than directly setting ``decoding_strategy="train_greedy"`` (though
output results are the same).
Argument :attr:`max_decoding_length` is optional.
3. **Beam search**: set :attr:`beam_width` to use beam search decoding.
Arguments :attr:`(start_tokens, end_token)` are required,
and argument :attr:`max_decoding_length` is optional.
.. warning::
Beam search is not yet implemented. Setting :attr:`beam_width`
to any value greater than 1 would raise a
:exc:`NotImplementedError`
Args:
memory (optional): The memory to attend, e.g., the output of an RNN
encoder. A :tensor:`Tensor` of shape
``[batch_size, memory_max_time, dim]``.
memory_sequence_length (optional): A :tensor:`Tensor` of shape
``[batch_size]`` containing the sequence lengths for the batch
entries in memory. Used to create attention bias of
:attr:`memory_attention_bias` is not given. Ignored if
:attr:`memory_attention_bias` is provided.
memory_attention_bias (optional): A :tensor:`Tensor` of shape
``[batch_size, num_heads, memory_max_time, dim]``.
An attention bias typically sets the value of a padding
position to a large negative value for masking. If not given,
:attr:`memory_sequence_length` is used to automatically
create an attention bias.
inputs (optional): Input tensor for teacher forcing decoding, of
shape ``[batch_size, target_max_time, emb_dim]`` containing the
target sequence word embeddings. Used when
:attr:`decoding_strategy` is set to ``"train_greedy"``.
sequence_length (optional): A :tensor:`LongTensor` of shape
``[batch_size]``, containing the sequence length of
:attr:`inputs`. Tokens beyond the respective sequence length are
masked out.
Used when :attr:`decoding_strategy` is set to
``"train_greedy"``.
decoding_strategy (str): A string specifying the decoding
strategy, including ``"train_greedy"``, ``"infer_greedy"``,
``"infer_sample"``.
Different arguments are required based on the
strategy. See above for details. Ignored if
:attr:`beam_width` or :attr:`helper` is set.
beam_width (int): Set to use beam search. If given,
:attr:`decoding_strategy` is ignored.
length_penalty (float): Length penalty coefficient used in beam
search decoding. Refer to https://arxiv.org/abs/1609.08144
for more details.
It should be larger if longer sentences are desired.
context (optional): An :tensor:`LongTensor` of shape
``[batch_size, length]``, containing the starting tokens for
decoding. If context is set, ``start_tokens`` of the
:class:`~texar.modules.Helper` will be ignored.
context_sequence_length (optional): Specify the length of context.
max_decoding_length (int, optional): The maximum allowed number of
decoding steps.
If `None` (default), use ``"max_decoding_length"`` defined in
:attr:`hparams`. Ignored in ``"train_greedy"`` decoding.
impute_finished (bool): If `True`, then states for batch
entries which are marked as finished get copied through and
the corresponding outputs get zeroed out. This causes some
slowdown at each time step, but ensures that the final state
and outputs have the correct values and that backprop ignores
time steps that were marked as finished. Ignored in
``"train_greedy"`` decoding.
helper (optional): An instance of
:class:`texar.modules.decoders.Helper`
that defines the decoding strategy. If given,
``decoding_strategy`` and helper configurations in
:attr:`hparams` are ignored.
infer_mode (optional): If not `None`, overrides mode given by
:attr:`self.training`.
Returns:
- For **"train_greedy"** decoding, returns an instance of
:class:`~texar.modules.TransformerDecoderOutput` which contains
`sample_id` and `logits`.
- For **"infer_greedy"** and **"infer_sample"** decoding or
decoding with :attr:`helper`, returns
a tuple ``(outputs, sequence_lengths)``, where ``outputs`` is an
instance of :class:`~texar.modules.TransformerDecoderOutput` as
in `"train_greedy"`, and ``sequence_lengths`` is a
:tensor:`LongTensor` of shape ``[batch_size]`` containing the
length of each sample.
- For **beam search** decoding, returns a ``dict`` containing keys
``"sample_id"`` and ``"log_prob"``.
- ``"sample_id"`` is a :tensor:`LongTensor` of shape
``[batch_size, max_time, beam_width]`` containing generated
token indexes. ``sample_id[:,:,0]`` is the highest-probable
sample.
- ``"log_prob"`` is a :tensor:`Tensor` of shape
``[batch_size, beam_width]`` containing the log probability
of each sequence sample.
"""
if memory is not None:
if memory_attention_bias is None:
if memory_sequence_length is None:
raise ValueError(
"`memory_sequence_length` is required if "
"`memory_attention_bias` is not given.")
enc_padding = 1 - sequence_mask(
memory_sequence_length, memory.size(1),
dtype=torch.float32)
memory_attention_bias = attn.attention_bias_ignore_padding(
enc_padding)
# record the context, which will be used in step function
# for dynamic_decode
if context is not None:
if context_sequence_length is None:
raise ValueError("'context_sequence_length' must not be None"
"when 'context' is specified.")
self._state_context = context[:, 1:]
self._state_context_sequence_length = context_sequence_length - 1
else:
self._state_context = None
self._state_context_sequence_length = None
# Faster code path for teacher-forcing training
if (helper is None and beam_width is None and
decoding_strategy == 'train_greedy'):
if inputs is None:
raise ValueError("'input' must not be none "
"when using 'train_greedy' decoding strategy.")
if sequence_length is not None:
inputs = mask_sequences(inputs, sequence_length)
decoder_self_attention_bias = (
attn.attention_bias_lower_triangle(inputs.size(1)))
decoder_output = self._self_attention_stack(
inputs, memory, decoder_self_attention_bias,
memory_attention_bias, cache=None)
logits = self._output_layer(decoder_output)
sample_id = torch.argmax(logits, dim=-1)
return TransformerDecoderOutput(logits, sample_id)
# Inference code path.
if max_decoding_length is None:
max_decoding_length = self._hparams.max_decoding_length
self._state_max_decoding_length = max_decoding_length
if beam_width is None or beam_width == 1: # Inference-like decoding
# Prepare helper
if helper is None:
kwargs.update(decoding_strategy=decoding_strategy)
if context is not None:
kwargs.update(start_tokens=context[:, 0])
helper = self._create_or_get_helper(infer_mode, **kwargs)
assert isinstance(helper, EmbeddingHelper)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=False, batch_size=helper.batch_size)
if context is not None:
assert self._state_context is not None
pad_length = max_decoding_length - self._state_context.size(1)
if pad_length > 0:
self._state_context = torch.cat((
self._state_context,
self._state_context.new_zeros(
self._state_context.size(0), pad_length)
), dim=1)
outputs, cache, sequence_lengths = self.dynamic_decode(
helper, inputs=None, sequence_length=None,
initial_state=None, max_decoding_length=max_decoding_length,
impute_finished=impute_finished)
del cache # not used
if context is not None:
# Here the length of sample_id will be larger than that
# of logit by 1, because there will be a additional
# start_token in the returned sample_id.
# the start_id should be the first token of the
# given context
start_tokens = context[:, 0]
outputs = TransformerDecoderOutput(
logits=outputs.logits,
sample_id=torch.cat([
start_tokens.unsqueeze(1),
outputs.sample_id
], dim=1))
sequence_lengths = sequence_lengths + 1
return outputs, sequence_lengths
else: # Beam-search decoding
# Ignore `decoding_strategy` and # assume `helper` is not set.
if helper is not None:
raise ValueError("Must not set 'beam_width' and 'helper' "
"simultaneously.")
if context is not None:
start_tokens = context[:, 0]
else:
if 'start_tokens' not in kwargs:
raise ValueError(
"'start_tokens' must be specified when using"
"beam search decoding.")
start_tokens = kwargs['start_tokens']
_batch_size = start_tokens.size(0)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=True,
batch_size=_batch_size)
end_token: int = kwargs.get('end_token') # type: ignore
# The output format is different when running beam search.
sample_id, log_prob = self._beam_decode(
start_tokens,
end_token,
embedding_fn=kwargs['embedding'],
beam_width=beam_width,
length_penalty=length_penalty,
decode_length=max_decoding_length)
return {
'sample_id': sample_id,
'log_prob': log_prob
}
def _self_attention_stack(
self, inputs: torch.Tensor,
memory: Optional[torch.Tensor],
decoder_self_attention_bias: Optional[torch.Tensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
cache: Optional[Cache] = None) -> torch.Tensor:
r"""Forward through the stacked multi-head attentions.
"""
inputs = self.embed_dropout(inputs)
if cache is not None:
if memory is not None:
memory_attention_bias = cache['memory_attention_bias']
else:
assert decoder_self_attention_bias is not None
x = inputs
for i in range(self._hparams.num_blocks):
layer_cache = cache['layers'][i] if cache is not None else None
selfatt_output = self.self_attns[i](
queries=self.self_attn_layer_norm[i](x),
memory=None,
memory_attention_bias=decoder_self_attention_bias,
cache=layer_cache)
x = x + self.residual_dropout(selfatt_output)
if memory is not None:
encdec_output = self.enc_dec_attns[i](
queries=self.end_dec_attn_layer_norm[i](x),
memory=memory,
memory_attention_bias=memory_attention_bias)
x = x + self.residual_dropout(encdec_output)
sub_output = self.poswise_networks[i](self.poswise_layer_norm[i](x))
x = x + self.residual_dropout(sub_output)
return self.final_layer_norm(x)
def _init_cache(self, memory: Optional[torch.Tensor],
memory_attention_bias: Optional[torch.Tensor],
beam_search_decoding: bool,
batch_size: int) -> Cache:
r"""Returns an initialized cache.
In order to support both inference-like decoding and beam-search
decoding, the elements of each layer must be initialized and extended
as different structure respectively. Specifically, for inference-like
decoding, a simple list is used; for beam-search decoding, a
:tensor:`Tensor` of shape ``[batch_size, current_steps, num_units]``
is maintained, where ``current_steps`` is the number of steps currently
decoded.
"""
device = next(self.parameters()).device
_create_fn = (_create_empty_tensor if beam_search_decoding
else _create_ta)
cache: Cache = {
'memory': memory,
'memory_attention_bias': memory_attention_bias,
'layers': [{
'keys': _create_fn(),
'values': _create_fn(),
} for _ in range(self._hparams.num_blocks)],
}
return cache
@property
def output_size(self) -> int:
r"""Output size of one step.
"""
return self._input_size
| 44.079301 | 80 | 0.596493 | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transformer decoder.
"""
from typing import Callable, Dict, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn
from texar.core import layers
from texar.hyperparams import HParams
from texar.modules.decoders.decoder_base import DecoderBase, _make_output_layer
from texar.modules.decoders.decoder_helpers import EmbeddingHelper, Helper
from texar.modules.encoders.multihead_attention import (
Cache, MultiheadAttentionEncoder)
from texar.modules.encoders.transformer_encoder import (
default_transformer_poswise_net_hparams)
from texar.modules.networks.networks import FeedForwardNetwork
from texar.utils import transformer_attentions as attn
from texar.utils.beam_search import beam_search
from texar.utils.shapes import mask_sequences
from texar.utils.utils import sequence_mask
__all__ = [
'TransformerDecoderOutput',
'TransformerDecoder',
]
class TransformerDecoderOutput(NamedTuple):
r"""The output of :class:`TransformerDecoder`.
"""
logits: torch.Tensor
r"""A :tensor:`Tensor` of shape ``[batch_size, max_time, vocab_size]``
containing the logits."""
sample_id: torch.LongTensor
r"""A :tensor:`LongTensor` of shape ``[batch_size, max_time]`` containing
the sampled token indices."""
class TransformerDecoder(DecoderBase[Cache, TransformerDecoderOutput]):
r"""Transformer decoder that applies multi-head self-attention for
sequence decoding.
It is a stack of :class:`~texar.modules.encoders.MultiheadAttentionEncoder`,
:class:`~texar.modules.FeedForwardNetwork`, and residual connections.
Args:
vocab_size (int, optional): Vocabulary size. Required if
:attr:`output_layer` is `None`.
output_layer (optional): An output layer that transforms cell output
to logits. This can be:
- A callable layer, e.g., an instance of :torch_nn:`Module`.
- A tensor. A :torch_nn:`Linear` layer will be created using the
tensor as weights. The bias of the dense layer is determined
by ``hparams.output_layer_bias``. This can be used to tie the
output layer with the input embedding matrix, as proposed in
https://arxiv.org/pdf/1608.05859.pdf.
- `None`. A :torch_nn:`Linear` layer will be created based on
attr:`vocab_size` and ``hparams.output_layer_bias``.
- If no output layer is needed at the end, set
:attr:`vocab_size` to `None` and ``output_layer`` to
:func:`~texar.core.identity`.
hparams (dict or HParams, optional): Hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
.. document private functions
"""
# State variables used during `dynamic_decode`. Assigned in `forward`.
_state_max_decoding_length: int
_state_context: Optional[torch.LongTensor]
_state_context_sequence_length: Optional[torch.LongTensor]
_state_cache: Cache
def __init__(self,
vocab_size: Optional[int] = None,
output_layer: Optional[Union[nn.Module, torch.Tensor]] = None,
hparams: Optional[HParams] = None):
super().__init__(0, vocab_size, # dummy value for input_size
input_time_major=False,
output_time_major=False, hparams=hparams)
self._input_size = self._hparams.dim
self._output_layer, self._vocab_size = _make_output_layer(
output_layer, vocab_size, self._input_size,
self._hparams.output_layer_bias)
self.self_attns = nn.ModuleList()
self.self_attn_layer_norm = nn.ModuleList()
self.enc_dec_attns = nn.ModuleList()
self.end_dec_attn_layer_norm = nn.ModuleList()
self.poswise_networks = nn.ModuleList()
self.poswise_layer_norm = nn.ModuleList()
if self._hparams.use_gpt_config:
eps = 1e-5
else:
eps = 1e-12
for _ in range(self._hparams.num_blocks):
attn_module = MultiheadAttentionEncoder(
self._input_size, self._hparams.multihead_attention)
if self._hparams.dim != attn_module.output_size:
raise ValueError("The output dimension of "
"MultiheadEncoder should be equal "
"to the dim of TransformerDecoder")
self.self_attns.append(attn_module)
self.self_attn_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
attn_module = MultiheadAttentionEncoder(
self._input_size, self._hparams.multihead_attention)
if self._hparams.dim != attn_module.output_size:
raise ValueError("The output dimension of "
"MultiheadEncoder should be equal "
"to the dim of TransformerDecoder")
self.enc_dec_attns.append(attn_module)
self.end_dec_attn_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
poswise_network = FeedForwardNetwork(
hparams=self._hparams.poswise_feedforward)
if (poswise_network.hparams.layers[-1]['kwargs']['out_features']
!= self._hparams.dim):
raise ValueError("The output dimension of "
"FeedForwardNetwork should be equal "
"to the dim of TransformerDecoder")
self.poswise_networks.append(poswise_network)
self.poswise_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
self.final_layer_norm = nn.LayerNorm(self._input_size, eps=eps)
self.embed_dropout = nn.Dropout(self._hparams.embedding_dropout)
self.residual_dropout = nn.Dropout(self._hparams.residual_dropout)
if self._hparams.initializer:
# TODO: This might be different to what TensorFlow does
initialize = layers.get_initializer(self._hparams.initializer)
assert initialize is not None
# Do not re-initialize LayerNorm modules.
for name, param in self.named_parameters():
if name.split(".")[-1] == "weight" and "layer_norm" not in name:
initialize(param)
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
# Same as in TransformerEncoder
"num_blocks": 6,
"dim": 512,
"use_gpt_config": False,
"embedding_dropout": 0.1,
"residual_dropout": 0.1,
"poswise_feedforward": default_transformer_poswise_net_hparams,
"multihead_attention": {
'name': 'multihead_attention',
'num_units': 512,
'output_dim': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
},
"initializer": None,
"name": "transformer_decoder"
# Additional for TransformerDecoder
"embedding_tie": True,
"output_layer_bias": False,
"max_decoding_length": int(1e10),
}
Here:
`"num_blocks"`: int
Number of stacked blocks.
`"dim"`: int
Hidden dimension of the encoder.
`"use_gpt_config"`: bool
Whether to follow the `eps` setting of OpenAI GPT.
`"embedding_dropout"`: float
Dropout rate of the input word and position embeddings.
`"residual_dropout"`: float
Dropout rate of the residual connections.
`"poswise_feedforward"`: dict
Hyperparameters for a feed-forward network used in residual
connections.
Make sure the dimension of the output tensor is equal to ``dim``.
See :func:`~texar.modules.default_transformer_poswise_net_hparams`
for details.
`"multihead_attention"`: dict
Hyperparameters for the multi-head attention strategy.
Make sure the ``output_dim`` in this module is equal to ``dim``.
See :func:`~texar.modules.MultiheadAttentionEncoder.default_hparams`
for details.
`"initializer"`: dict, optional
Hyperparameters of the default initializer that initializes
variables created in this module.
See :func:`~texar.core.get_initializer` for details.
`"embedding_tie"`: bool
Whether to use the word embedding matrix as the output layer
that computes logits. If `False`, a new dense layer is created.
`"output_layer_bias"`: bool
Whether to use bias to the output layer.
`"max_decoding_length"`: int
The maximum allowed number of decoding steps.
Set to a very large number of avoid the length constraint.
Ignored if provided in :meth:`forward` or ``"train_greedy"``
decoding is used.
`"name"`: str
Name of the module.
"""
dim = 512
return {
'num_blocks': 6,
'dim': dim,
'use_gpt_config': False,
'embedding_tie': True,
'output_layer_bias': False,
'max_decoding_length': int(1e10),
'embedding_dropout': 0.1,
'residual_dropout': 0.1,
'poswise_feedforward': default_transformer_poswise_net_hparams(dim),
'multihead_attention': {
'name': 'multihead_attention',
'num_units': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
},
'initializer': None,
'name': "transformer_decoder",
}
def _inputs_to_outputs(self, inputs: torch.Tensor,
cache: Cache) -> Tuple[torch.Tensor, Cache]:
r"""Returns the outputs of one decoding step (for example,
the predicted logits of the next token).
:attr:`inputs` should be of shape ``[batch_size, dim]``.
Returns:
A tuple of logits and updated cache. Logits are of shape
``[batch_size, vocab_size]``.
"""
outputs = self._self_attention_stack(
inputs.unsqueeze(1), memory=cache['memory'], cache=cache)
outputs = self._output_layer(outputs)
outputs = outputs.squeeze(1)
return outputs, cache
def forward(self, # type: ignore
inputs: Optional[torch.Tensor] = None,
sequence_length: Optional[torch.LongTensor] = None,
memory: Optional[torch.Tensor] = None,
memory_sequence_length: Optional[torch.LongTensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
context: Optional[torch.Tensor] = None,
context_sequence_length: Optional[torch.LongTensor] = None,
helper: Optional[Helper] = None,
decoding_strategy: str = 'train_greedy',
max_decoding_length: Optional[int] = None,
impute_finished: bool = False,
infer_mode: Optional[bool] = None,
beam_width: Optional[int] = None,
length_penalty: float = 0.,
**kwargs) \
-> Union[
TransformerDecoderOutput,
Tuple[TransformerDecoderOutput, torch.LongTensor],
Dict[str, torch.Tensor]]:
r"""Performs decoding.
The interface is very similar to that of RNN decoders
(:class:`texar.modules.RNNDecoderBase`). In particular,
the function provides **3 ways** to specify the decoding method, with
varying flexibility:
1. The :attr:`decoding_strategy` argument.
- **"train_greedy"**: decoding in teacher-forcing fashion (i.e.,
feeding ground truth to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Argument :attr:`inputs` is required for this strategy.
:attr:`sequence_length` is optional.
- **"infer_greedy"**: decoding in inference fashion (i.e., feeding
`generated` sample to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Arguments :attr:`(start_tokens, end_token)` are
required for this strategy, and argument
:attr:`max_decoding_length` is optional.
- **"infer_sample"**: decoding in inference fashion, and for each
step sample is obtained by `random sampling` from the logits.
Arguments :attr:`(start_tokens, end_token)` are required for this
strategy, and argument :attr:`max_decoding_length` is optional.
This argument is used only when arguments :attr:`helper` and
:attr:`beam_width` are both `None`.
2. The :attr:`helper` argument: An instance of subclass of
:class:`texar.modules.decoders.Helper`.
This provides a superset of decoding strategies than above.
The interface is the same as in RNN decoders.
Please refer to :meth:`texar.modules.RNNDecoderBase.forward` for
detailed usage and examples.
Note that, here, though using a
:class:`~texar.decoder.TrainingHelper` corresponding to the
``"train_greedy"`` strategy above, the implementation is *slower*
than directly setting ``decoding_strategy="train_greedy"`` (though
output results are the same).
Argument :attr:`max_decoding_length` is optional.
3. **Beam search**: set :attr:`beam_width` to use beam search decoding.
Arguments :attr:`(start_tokens, end_token)` are required,
and argument :attr:`max_decoding_length` is optional.
.. warning::
Beam search is not yet implemented. Setting :attr:`beam_width`
to any value greater than 1 would raise a
:exc:`NotImplementedError`
Args:
memory (optional): The memory to attend, e.g., the output of an RNN
encoder. A :tensor:`Tensor` of shape
``[batch_size, memory_max_time, dim]``.
memory_sequence_length (optional): A :tensor:`Tensor` of shape
``[batch_size]`` containing the sequence lengths for the batch
entries in memory. Used to create attention bias of
:attr:`memory_attention_bias` is not given. Ignored if
:attr:`memory_attention_bias` is provided.
memory_attention_bias (optional): A :tensor:`Tensor` of shape
``[batch_size, num_heads, memory_max_time, dim]``.
An attention bias typically sets the value of a padding
position to a large negative value for masking. If not given,
:attr:`memory_sequence_length` is used to automatically
create an attention bias.
inputs (optional): Input tensor for teacher forcing decoding, of
shape ``[batch_size, target_max_time, emb_dim]`` containing the
target sequence word embeddings. Used when
:attr:`decoding_strategy` is set to ``"train_greedy"``.
sequence_length (optional): A :tensor:`LongTensor` of shape
``[batch_size]``, containing the sequence length of
:attr:`inputs`. Tokens beyond the respective sequence length are
masked out.
Used when :attr:`decoding_strategy` is set to
``"train_greedy"``.
decoding_strategy (str): A string specifying the decoding
strategy, including ``"train_greedy"``, ``"infer_greedy"``,
``"infer_sample"``.
Different arguments are required based on the
strategy. See above for details. Ignored if
:attr:`beam_width` or :attr:`helper` is set.
beam_width (int): Set to use beam search. If given,
:attr:`decoding_strategy` is ignored.
length_penalty (float): Length penalty coefficient used in beam
search decoding. Refer to https://arxiv.org/abs/1609.08144
for more details.
It should be larger if longer sentences are desired.
context (optional): An :tensor:`LongTensor` of shape
``[batch_size, length]``, containing the starting tokens for
decoding. If context is set, ``start_tokens`` of the
:class:`~texar.modules.Helper` will be ignored.
context_sequence_length (optional): Specify the length of context.
max_decoding_length (int, optional): The maximum allowed number of
decoding steps.
If `None` (default), use ``"max_decoding_length"`` defined in
:attr:`hparams`. Ignored in ``"train_greedy"`` decoding.
impute_finished (bool): If `True`, then states for batch
entries which are marked as finished get copied through and
the corresponding outputs get zeroed out. This causes some
slowdown at each time step, but ensures that the final state
and outputs have the correct values and that backprop ignores
time steps that were marked as finished. Ignored in
``"train_greedy"`` decoding.
helper (optional): An instance of
:class:`texar.modules.decoders.Helper`
that defines the decoding strategy. If given,
``decoding_strategy`` and helper configurations in
:attr:`hparams` are ignored.
infer_mode (optional): If not `None`, overrides mode given by
:attr:`self.training`.
Returns:
- For **"train_greedy"** decoding, returns an instance of
:class:`~texar.modules.TransformerDecoderOutput` which contains
`sample_id` and `logits`.
- For **"infer_greedy"** and **"infer_sample"** decoding or
decoding with :attr:`helper`, returns
a tuple ``(outputs, sequence_lengths)``, where ``outputs`` is an
instance of :class:`~texar.modules.TransformerDecoderOutput` as
in `"train_greedy"`, and ``sequence_lengths`` is a
:tensor:`LongTensor` of shape ``[batch_size]`` containing the
length of each sample.
- For **beam search** decoding, returns a ``dict`` containing keys
``"sample_id"`` and ``"log_prob"``.
- ``"sample_id"`` is a :tensor:`LongTensor` of shape
``[batch_size, max_time, beam_width]`` containing generated
token indexes. ``sample_id[:,:,0]`` is the highest-probable
sample.
- ``"log_prob"`` is a :tensor:`Tensor` of shape
``[batch_size, beam_width]`` containing the log probability
of each sequence sample.
"""
if memory is not None:
if memory_attention_bias is None:
if memory_sequence_length is None:
raise ValueError(
"`memory_sequence_length` is required if "
"`memory_attention_bias` is not given.")
enc_padding = 1 - sequence_mask(
memory_sequence_length, memory.size(1),
dtype=torch.float32)
memory_attention_bias = attn.attention_bias_ignore_padding(
enc_padding)
# record the context, which will be used in step function
# for dynamic_decode
if context is not None:
if context_sequence_length is None:
raise ValueError("'context_sequence_length' must not be None"
"when 'context' is specified.")
self._state_context = context[:, 1:]
self._state_context_sequence_length = context_sequence_length - 1
else:
self._state_context = None
self._state_context_sequence_length = None
# Faster code path for teacher-forcing training
if (helper is None and beam_width is None and
decoding_strategy == 'train_greedy'):
if inputs is None:
raise ValueError("'input' must not be none "
"when using 'train_greedy' decoding strategy.")
if sequence_length is not None:
inputs = mask_sequences(inputs, sequence_length)
decoder_self_attention_bias = (
attn.attention_bias_lower_triangle(inputs.size(1)))
decoder_output = self._self_attention_stack(
inputs, memory, decoder_self_attention_bias,
memory_attention_bias, cache=None)
logits = self._output_layer(decoder_output)
sample_id = torch.argmax(logits, dim=-1)
return TransformerDecoderOutput(logits, sample_id)
# Inference code path.
if max_decoding_length is None:
max_decoding_length = self._hparams.max_decoding_length
self._state_max_decoding_length = max_decoding_length
if beam_width is None or beam_width == 1: # Inference-like decoding
# Prepare helper
if helper is None:
kwargs.update(decoding_strategy=decoding_strategy)
if context is not None:
kwargs.update(start_tokens=context[:, 0])
helper = self._create_or_get_helper(infer_mode, **kwargs)
assert isinstance(helper, EmbeddingHelper)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=False, batch_size=helper.batch_size)
if context is not None:
assert self._state_context is not None
pad_length = max_decoding_length - self._state_context.size(1)
if pad_length > 0:
self._state_context = torch.cat((
self._state_context,
self._state_context.new_zeros(
self._state_context.size(0), pad_length)
), dim=1)
outputs, cache, sequence_lengths = self.dynamic_decode(
helper, inputs=None, sequence_length=None,
initial_state=None, max_decoding_length=max_decoding_length,
impute_finished=impute_finished)
del cache # not used
if context is not None:
# Here the length of sample_id will be larger than that
# of logit by 1, because there will be a additional
# start_token in the returned sample_id.
# the start_id should be the first token of the
# given context
start_tokens = context[:, 0]
outputs = TransformerDecoderOutput(
logits=outputs.logits,
sample_id=torch.cat([
start_tokens.unsqueeze(1),
outputs.sample_id
], dim=1))
sequence_lengths = sequence_lengths + 1
return outputs, sequence_lengths
else: # Beam-search decoding
# Ignore `decoding_strategy` and # assume `helper` is not set.
if helper is not None:
raise ValueError("Must not set 'beam_width' and 'helper' "
"simultaneously.")
if context is not None:
start_tokens = context[:, 0]
else:
if 'start_tokens' not in kwargs:
raise ValueError(
"'start_tokens' must be specified when using"
"beam search decoding.")
start_tokens = kwargs['start_tokens']
_batch_size = start_tokens.size(0)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=True,
batch_size=_batch_size)
end_token: int = kwargs.get('end_token') # type: ignore
# The output format is different when running beam search.
sample_id, log_prob = self._beam_decode(
start_tokens,
end_token,
embedding_fn=kwargs['embedding'],
beam_width=beam_width,
length_penalty=length_penalty,
decode_length=max_decoding_length)
return {
'sample_id': sample_id,
'log_prob': log_prob
}
def _self_attention_stack(
self, inputs: torch.Tensor,
memory: Optional[torch.Tensor],
decoder_self_attention_bias: Optional[torch.Tensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
cache: Optional[Cache] = None) -> torch.Tensor:
r"""Forward through the stacked multi-head attentions.
"""
inputs = self.embed_dropout(inputs)
if cache is not None:
if memory is not None:
memory_attention_bias = cache['memory_attention_bias']
else:
assert decoder_self_attention_bias is not None
x = inputs
for i in range(self._hparams.num_blocks):
layer_cache = cache['layers'][i] if cache is not None else None
selfatt_output = self.self_attns[i](
queries=self.self_attn_layer_norm[i](x),
memory=None,
memory_attention_bias=decoder_self_attention_bias,
cache=layer_cache)
x = x + self.residual_dropout(selfatt_output)
if memory is not None:
encdec_output = self.enc_dec_attns[i](
queries=self.end_dec_attn_layer_norm[i](x),
memory=memory,
memory_attention_bias=memory_attention_bias)
x = x + self.residual_dropout(encdec_output)
sub_output = self.poswise_networks[i](self.poswise_layer_norm[i](x))
x = x + self.residual_dropout(sub_output)
return self.final_layer_norm(x)
def _init_cache(self, memory: Optional[torch.Tensor],
memory_attention_bias: Optional[torch.Tensor],
beam_search_decoding: bool,
batch_size: int) -> Cache:
r"""Returns an initialized cache.
In order to support both inference-like decoding and beam-search
decoding, the elements of each layer must be initialized and extended
as different structure respectively. Specifically, for inference-like
decoding, a simple list is used; for beam-search decoding, a
:tensor:`Tensor` of shape ``[batch_size, current_steps, num_units]``
is maintained, where ``current_steps`` is the number of steps currently
decoded.
"""
device = next(self.parameters()).device
def _create_ta():
return []
def _create_empty_tensor():
ret = torch.zeros(
batch_size, 0, self._hparams.multihead_attention.num_units,
dtype=torch.float, device=device)
return ret
_create_fn = (_create_empty_tensor if beam_search_decoding
else _create_ta)
cache: Cache = {
'memory': memory,
'memory_attention_bias': memory_attention_bias,
'layers': [{
'keys': _create_fn(),
'values': _create_fn(),
} for _ in range(self._hparams.num_blocks)],
}
return cache
def _beam_decode(self, start_tokens: torch.LongTensor, end_token: int,
embedding_fn: Callable[
[torch.LongTensor, torch.LongTensor], torch.Tensor],
decode_length: int = 256, beam_width: int = 5,
length_penalty: float = 0.6) \
-> Tuple[torch.Tensor, torch.Tensor]:
def _symbols_to_logits_fn(ids, cache):
batch_size = ids.size(0)
step = ids.size(-1) - 1
times = ids.new_full((batch_size,), step)
inputs = embedding_fn(ids[:, -1], times)
return self._inputs_to_outputs(inputs, cache)
assert self._vocab_size is not None
outputs, log_prob = beam_search(
_symbols_to_logits_fn,
start_tokens,
beam_width,
decode_length,
self._vocab_size,
length_penalty,
states=self._state_cache,
eos_id=end_token)
# Ignores <BOS>
outputs = outputs[:, :, 1:]
# shape = [batch_size, seq_length, beam_width]
outputs = outputs.permute(0, 2, 1)
return outputs, log_prob
@property
def output_size(self) -> int:
r"""Output size of one step.
"""
return self._input_size
def initialize(self, helper: Helper, inputs: Optional[torch.Tensor],
sequence_length: Optional[torch.LongTensor],
initial_state: Optional[Cache]) \
-> Tuple[torch.ByteTensor, torch.Tensor, Cache]:
initial_finished, initial_inputs = helper.initialize(
inputs, sequence_length)
state = initial_state or self._state_cache
return initial_finished, initial_inputs, state
def step(self, helper: Helper, time: int,
inputs: torch.Tensor, state: Optional[Cache]) \
-> Tuple[TransformerDecoderOutput, Cache,
torch.Tensor, torch.ByteTensor]:
assert state is not None
outputs, state = self._inputs_to_outputs(inputs, state)
sample_ids = helper.sample(time=time, outputs=outputs)
if self._state_context is not None:
assert self._state_context_sequence_length is not None
sample_ids = torch.where(
self._state_context_sequence_length > time,
self._state_context[:, time],
sample_ids)
if time + 1 == self._state_max_decoding_length:
# Maximum decoding length reached, mark all batches as finished.
# This requires special handling because performing lookup on
# position embeddings with `time + 1` may result in IndexError.
finished = torch.ones_like(sample_ids, dtype=torch.uint8)
# Since `next_inputs` will not be used, simply create a null tensor.
next_inputs = torch.empty(0)
else:
finished, next_inputs = helper.next_inputs(
time=time, outputs=outputs, sample_ids=sample_ids)
next_state = state
outputs = TransformerDecoderOutput(
logits=outputs,
sample_id=sample_ids)
return outputs, next_state, next_inputs, finished
def finalize(self, # type: ignore
outputs: TransformerDecoderOutput,
final_state: Optional[Cache],
sequence_lengths: torch.LongTensor) \
-> Tuple[TransformerDecoderOutput, Optional[Cache]]:
# Clear state variables at end of decoding.
del self._state_max_decoding_length
del self._state_context
del self._state_context_sequence_length
del self._state_cache
return super().finalize(outputs, final_state, sequence_lengths)
| 7,088 | 0 | 197 |
8dff8736f6ada9181135c7ce91dcaefe81a44cdd | 477 | py | Python | commands/logout.py | saiblo/saiblo-dev-tools | 143c7264b7ba157b313092a2263875733dafb5f4 | [
"MIT"
] | null | null | null | commands/logout.py | saiblo/saiblo-dev-tools | 143c7264b7ba157b313092a2263875733dafb5f4 | [
"MIT"
] | null | null | null | commands/logout.py | saiblo/saiblo-dev-tools | 143c7264b7ba157b313092a2263875733dafb5f4 | [
"MIT"
] | null | null | null | import os.path
from argparse import ArgumentParser, Namespace
from sys import stdout
from utils.config import COOKIE
| 20.73913 | 54 | 0.691824 | import os.path
from argparse import ArgumentParser, Namespace
from sys import stdout
from utils.config import COOKIE
def _logout() -> None:
if not os.path.exists(COOKIE):
raise RuntimeError('您尚未登录')
if not os.path.isfile(COOKIE):
raise RuntimeError(f'路径 `{COOKIE}` 已存在,但不是文件')
os.remove(COOKIE)
stdout.write('成功删除 Cookie\n')
def subcommand_hook(parser: ArgumentParser) -> None:
return
def main(args: Namespace) -> None:
_logout()
| 327 | 0 | 69 |
653819d97357ad6ffcdcd23f5c56d527b36ece6e | 142 | py | Python | shop/products/urls.py | ahmadreza-smdi/ms-shop | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | [
"MIT"
] | 6 | 2019-11-23T17:16:17.000Z | 2021-09-20T13:12:55.000Z | shop/products/urls.py | abdulkarimFallatah/ms-shop | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | [
"MIT"
] | 5 | 2021-04-08T22:00:07.000Z | 2022-02-10T12:38:25.000Z | shop/products/urls.py | abdulkarimFallatah/ms-shop | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | [
"MIT"
] | 2 | 2019-11-24T20:33:16.000Z | 2021-09-09T00:29:52.000Z | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'buy.',views.buy_product),
url(r'',views.show_product),
]
| 15.777778 | 35 | 0.683099 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'buy.',views.buy_product),
url(r'',views.show_product),
]
| 0 | 0 | 0 |
76db92b5107a972a570bf69f2353586b52f75b43 | 24,883 | py | Python | desktop/libs/notebook/src/notebook/models.py | jubrad/hue | 51535fa06923d6c2fbe8ada47bf33793d938155e | [
"Apache-2.0"
] | null | null | null | desktop/libs/notebook/src/notebook/models.py | jubrad/hue | 51535fa06923d6c2fbe8ada47bf33793d938155e | [
"Apache-2.0"
] | null | null | null | desktop/libs/notebook/src/notebook/models.py | jubrad/hue | 51535fa06923d6c2fbe8ada47bf33793d938155e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
from builtins import str, object
import datetime
import json
import logging
import math
import numbers
import sys
import uuid
from datetime import timedelta
from django.contrib.sessions.models import Session
from django.db.models import Count
from django.db.models.functions import Trunc
from django.urls import reverse
from django.utils.html import escape
from django.utils.translation import ugettext as _
from desktop.conf import has_connectors, TASK_SERVER
from desktop.lib.i18n import smart_unicode
from desktop.lib.paths import SAFE_CHARACTERS_URI
from desktop.models import Document2
from useradmin.models import User
from notebook.conf import EXAMPLES, get_ordered_interpreters
from notebook.connectors.base import Notebook, get_api as _get_api, get_interpreter
if sys.version_info[0] > 2:
from urllib.parse import quote as urllib_quote
else:
from urllib import quote as urllib_quote
LOG = logging.getLogger(__name__)
# Materialize and HTML escape results
def make_notebook(
name='Browse', description='', editor_type='hive', statement='', status='ready',
files=None, functions=None, settings=None, is_saved=False, database='default', snippet_properties=None, batch_submit=False,
on_success_url=None, skip_historify=False, is_task=False, last_executed=-1, is_notebook=False, pub_sub_url=None, result_properties={},
namespace=None, compute=None, is_presentation_mode=False):
'''
skip_historify: do not add the task to the query history. e.g. SQL Dashboard
is_task / isManaged: true when being a managed by Hue operation (include_managed=True in document),
e.g. exporting query result, dropping some tables
'''
from notebook.connectors.hiveserver2 import HS2Api
if has_connectors():
interpreter = get_interpreter(connector_type=editor_type)
editor_connector = editor_type
editor_type = interpreter['dialect']
else:
editor_connector = editor_type
editor = Notebook()
if snippet_properties is None:
snippet_properties = {}
if editor_type == 'hive':
sessions_properties = HS2Api.get_properties(editor_type)
if files is not None:
_update_property_value(sessions_properties, 'files', files)
if functions is not None:
_update_property_value(sessions_properties, 'functions', functions)
if settings is not None:
_update_property_value(sessions_properties, 'settings', settings)
elif editor_type == 'impala':
sessions_properties = HS2Api.get_properties(editor_type)
if settings is not None:
_update_property_value(sessions_properties, 'files', files)
elif editor_type == 'java':
sessions_properties = [] # Java options
else:
sessions_properties = []
data = {
'name': name,
'uuid': str(uuid.uuid4()),
'description': description,
'sessions': [
{
'type': editor_connector,
'properties': sessions_properties,
'id': None
}
],
'selectedSnippet': editor_connector, # TODO: might need update in notebook.ko.js
'type': 'notebook' if is_notebook else 'query-%s' % editor_type,
'showHistory': True,
'isSaved': is_saved,
'onSuccessUrl': urllib_quote(on_success_url.encode('utf-8'), safe=SAFE_CHARACTERS_URI) if on_success_url else None,
'pubSubUrl': pub_sub_url,
'skipHistorify': skip_historify,
'isPresentationModeDefault': is_presentation_mode,
'isManaged': is_task,
'snippets': [
{
'status': status,
'id': str(uuid.uuid4()),
'statement_raw': statement,
'statement': statement,
'type': editor_connector,
'wasBatchExecuted': batch_submit,
'lastExecuted': last_executed,
'properties': {
'files': [] if files is None else files,
'functions': [] if functions is None else functions,
'settings': [] if settings is None else settings
},
'name': name,
'database': database,
'namespace': namespace if namespace else {},
'compute': compute if compute else {},
'result': {'handle': {}},
'variables': []
}
] if not is_notebook else []
}
if has_connectors(): # To improve
data['dialect'] = interpreter['dialect']
data['type'] = 'phoenix-' + editor_connector # 'flink-' + editor_connector
if snippet_properties:
data['snippets'][0]['properties'].update(snippet_properties)
if result_properties:
data['snippets'][0]['result'].update(result_properties)
editor.data = json.dumps(data)
return editor
def _get_notebook_api(user, connector_id, interpreter=None):
'''
Helper utils until the API gets simplified.
'''
notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Query",
"name": "Test Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "hive",
"id": null,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"%(connector_id)s","status":"running",\
"statement":"select * from web_logs","properties":{"settings":[],"variables":[],"files":[],"functions":[]},\
"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table",\
"handle":{"log_context":null,"statements_count":1,\
"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,\
"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,\
"statement":"select * from web_logs","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},\
"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a"
}
""" % {
'connector_id': connector_id,
}
snippet = json.loads(notebook_json)['snippets'][0]
snippet['interpreter'] = interpreter
request = MockRequest(user)
return get_api(request, snippet)
def _update_property_value(properties, key, value):
"""
Update property dict in list of properties where prop has "key": key, set "value": value
"""
for prop in properties:
if prop['key'] == key:
prop.update({'value': value})
| 32.740789 | 138 | 0.669252 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
from builtins import str, object
import datetime
import json
import logging
import math
import numbers
import sys
import uuid
from datetime import timedelta
from django.contrib.sessions.models import Session
from django.db.models import Count
from django.db.models.functions import Trunc
from django.urls import reverse
from django.utils.html import escape
from django.utils.translation import ugettext as _
from desktop.conf import has_connectors, TASK_SERVER
from desktop.lib.i18n import smart_unicode
from desktop.lib.paths import SAFE_CHARACTERS_URI
from desktop.models import Document2
from useradmin.models import User
from notebook.conf import EXAMPLES, get_ordered_interpreters
from notebook.connectors.base import Notebook, get_api as _get_api, get_interpreter
if sys.version_info[0] > 2:
from urllib.parse import quote as urllib_quote
else:
from urllib import quote as urllib_quote
LOG = logging.getLogger(__name__)
# Materialize and HTML escape results
def escape_rows(rows, nulls_only=False, encoding=None):
data = []
try:
for row in rows:
escaped_row = []
for field in row:
if isinstance(field, numbers.Number):
if math.isnan(field) or math.isinf(field):
escaped_field = json.dumps(field)
else:
escaped_field = field
elif field is None:
escaped_field = 'NULL'
else:
# Prevent error when getting back non utf8 like charset=iso-8859-1
escaped_field = smart_unicode(field, errors='replace', encoding=encoding)
if not nulls_only:
escaped_field = escape(escaped_field).replace(' ', ' ')
escaped_row.append(escaped_field)
data.append(escaped_row)
except RuntimeError:
pass # pep-0479: expected Py3.8 generator raised StopIteration
return data
def make_notebook(
name='Browse', description='', editor_type='hive', statement='', status='ready',
files=None, functions=None, settings=None, is_saved=False, database='default', snippet_properties=None, batch_submit=False,
on_success_url=None, skip_historify=False, is_task=False, last_executed=-1, is_notebook=False, pub_sub_url=None, result_properties={},
namespace=None, compute=None, is_presentation_mode=False):
'''
skip_historify: do not add the task to the query history. e.g. SQL Dashboard
is_task / isManaged: true when being a managed by Hue operation (include_managed=True in document),
e.g. exporting query result, dropping some tables
'''
from notebook.connectors.hiveserver2 import HS2Api
if has_connectors():
interpreter = get_interpreter(connector_type=editor_type)
editor_connector = editor_type
editor_type = interpreter['dialect']
else:
editor_connector = editor_type
editor = Notebook()
if snippet_properties is None:
snippet_properties = {}
if editor_type == 'hive':
sessions_properties = HS2Api.get_properties(editor_type)
if files is not None:
_update_property_value(sessions_properties, 'files', files)
if functions is not None:
_update_property_value(sessions_properties, 'functions', functions)
if settings is not None:
_update_property_value(sessions_properties, 'settings', settings)
elif editor_type == 'impala':
sessions_properties = HS2Api.get_properties(editor_type)
if settings is not None:
_update_property_value(sessions_properties, 'files', files)
elif editor_type == 'java':
sessions_properties = [] # Java options
else:
sessions_properties = []
data = {
'name': name,
'uuid': str(uuid.uuid4()),
'description': description,
'sessions': [
{
'type': editor_connector,
'properties': sessions_properties,
'id': None
}
],
'selectedSnippet': editor_connector, # TODO: might need update in notebook.ko.js
'type': 'notebook' if is_notebook else 'query-%s' % editor_type,
'showHistory': True,
'isSaved': is_saved,
'onSuccessUrl': urllib_quote(on_success_url.encode('utf-8'), safe=SAFE_CHARACTERS_URI) if on_success_url else None,
'pubSubUrl': pub_sub_url,
'skipHistorify': skip_historify,
'isPresentationModeDefault': is_presentation_mode,
'isManaged': is_task,
'snippets': [
{
'status': status,
'id': str(uuid.uuid4()),
'statement_raw': statement,
'statement': statement,
'type': editor_connector,
'wasBatchExecuted': batch_submit,
'lastExecuted': last_executed,
'properties': {
'files': [] if files is None else files,
'functions': [] if functions is None else functions,
'settings': [] if settings is None else settings
},
'name': name,
'database': database,
'namespace': namespace if namespace else {},
'compute': compute if compute else {},
'result': {'handle': {}},
'variables': []
}
] if not is_notebook else []
}
if has_connectors(): # To improve
data['dialect'] = interpreter['dialect']
data['type'] = 'phoenix-' + editor_connector # 'flink-' + editor_connector
if snippet_properties:
data['snippets'][0]['properties'].update(snippet_properties)
if result_properties:
data['snippets'][0]['result'].update(result_properties)
editor.data = json.dumps(data)
return editor
def make_notebook2(name='Browse', description='', is_saved=False, snippets=None):
from notebook.connectors.hiveserver2 import HS2Api
editor = Notebook()
_snippets = []
for snippet in snippets:
default_properties = {
'files': [],
'functions': [],
'settings': []
}
default_properties.update(snippet['properties'])
snippet['properties'] = default_properties
_snippets.append(snippet)
data = {
'name': name,
'uuid': str(uuid.uuid4()),
'type': 'notebook',
'description': description,
'sessions': [
{
'type': _snippet['type'],
'properties': HS2Api.get_properties(snippet['type']),
'id': None
} for _snippet in _snippets # Non unique types currently
],
'selectedSnippet': _snippets[0]['type'],
'showHistory': False,
'isSaved': is_saved,
'snippets': [
{
'status': _snippet.get('status', 'ready'),
'id': str(uuid.uuid4()),
'statement_raw': _snippet.get('statement', ''),
'statement': _snippet.get('statement', ''),
'type': _snippet.get('type'),
'properties': _snippet['properties'],
'name': name,
'database': _snippet.get('database'),
'result': {'handle': {}},
'variables': []
} for _snippet in _snippets
]
}
editor.data = json.dumps(data)
return editor
def _get_notebook_api(user, connector_id, interpreter=None):
'''
Helper utils until the API gets simplified.
'''
notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Query",
"name": "Test Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "hive",
"id": null,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"%(connector_id)s","status":"running",\
"statement":"select * from web_logs","properties":{"settings":[],"variables":[],"files":[],"functions":[]},\
"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table",\
"handle":{"log_context":null,"statements_count":1,\
"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,\
"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,\
"statement":"select * from web_logs","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},\
"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a"
}
""" % {
'connector_id': connector_id,
}
snippet = json.loads(notebook_json)['snippets'][0]
snippet['interpreter'] = interpreter
request = MockRequest(user)
return get_api(request, snippet)
class MockedDjangoRequest(object):
def __init__(self, user, get=None, post=None, method='POST'):
self.user = user
self.jt = None
self.GET = get if get is not None else {'format': 'json'}
self.POST = post if post is not None else {}
self.REQUEST = {}
self.method = method
def import_saved_beeswax_query(bquery, interpreter=None):
design = bquery.get_design()
return make_notebook(
name=bquery.name,
description=bquery.desc,
editor_type=interpreter['type'] if interpreter else _convert_type(bquery.type, bquery.data),
statement=design.hql_query,
status='ready',
files=design.file_resources,
functions=design.functions,
settings=design.settings,
is_saved=True,
database=design.database
)
def import_saved_pig_script(pig_script):
snippet_properties = {}
snippet_properties['hadoopProperties'] = []
if pig_script.dict.get('hadoopProperties'):
for prop in pig_script.dict.get('hadoopProperties'):
snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
snippet_properties['parameters'] = []
if pig_script.dict.get('parameters'):
for param in pig_script.dict.get('parameters'):
snippet_properties['parameters'].append("%s=%s" % (param.get('name'), param.get('value')))
snippet_properties['resources'] = []
if pig_script.dict.get('resources'):
for resource in pig_script.dict.get('resources'):
snippet_properties['resources'].append(resource.get('value'))
notebook = make_notebook(
name=pig_script.dict.get('name'),
editor_type='pig',
statement=pig_script.dict.get('script'),
status='ready',
snippet_properties=snippet_properties,
is_saved=True
)
# Remove files, functions, settings from snippet properties
data = notebook.get_data()
data['snippets'][0]['properties'].pop('files')
data['snippets'][0]['properties'].pop('functions')
data['snippets'][0]['properties'].pop('settings')
notebook.data = json.dumps(data)
return notebook
def import_saved_mapreduce_job(wf):
snippet_properties = {}
node = wf.start.get_child('to')
try:
files = json.loads(node.files)
for filepath in files:
snippet_properties['files'].append({'type': 'file', 'path': filepath})
except ValueError as e:
LOG.warn('Failed to parse files for mapreduce job design "%s".' % wf.name)
snippet_properties['archives'] = []
try:
archives = json.loads(node.archives)
for filepath in archives:
snippet_properties['archives'].append(filepath)
except ValueError as e:
LOG.warn('Failed to parse archives for mapreduce job design "%s".' % wf.name)
snippet_properties['hadoopProperties'] = []
try:
properties = json.loads(node.job_properties)
if properties:
for prop in properties:
snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
except ValueError as e:
LOG.warn('Failed to parse job properties for mapreduce job design "%s".' % wf.name)
snippet_properties['app_jar'] = node.jar_path
notebook = make_notebook(
name=wf.name,
description=wf.description,
editor_type='mapreduce',
statement='',
status='ready',
snippet_properties=snippet_properties,
is_saved=True
)
# Remove functions, settings from snippet properties
data = notebook.get_data()
data['snippets'][0]['properties'].pop('functions')
data['snippets'][0]['properties'].pop('settings')
notebook.data = json.dumps(data)
return notebook
def import_saved_shell_job(wf):
snippet_properties = {}
node = wf.start.get_child('to')
snippet_properties['command_path'] = node.command
snippet_properties['arguments'] = []
snippet_properties['env_var'] = []
try:
params = json.loads(node.params)
if params:
for param in params:
if param['type'] == 'argument':
snippet_properties['arguments'].append(param['value'])
else:
snippet_properties['env_var'].append(param['value'])
except ValueError as e:
LOG.warn('Failed to parse parameters for shell job design "%s".' % wf.name)
snippet_properties['hadoopProperties'] = []
try:
properties = json.loads(node.job_properties)
if properties:
for prop in properties:
snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
except ValueError as e:
LOG.warn('Failed to parse job properties for shell job design "%s".' % wf.name)
snippet_properties['files'] = []
try:
files = json.loads(node.files)
for filepath in files:
snippet_properties['files'].append({'type': 'file', 'path': filepath})
except ValueError as e:
LOG.warn('Failed to parse files for shell job design "%s".' % wf.name)
snippet_properties['archives'] = []
try:
archives = json.loads(node.archives)
for archive in archives:
snippet_properties['archives'].append(archive['name'])
except ValueError as e:
LOG.warn('Failed to parse archives for shell job design "%s".' % wf.name)
snippet_properties['capture_output'] = node.capture_output
notebook = make_notebook(
name=wf.name,
description=wf.description,
editor_type='shell',
statement='',
status='ready',
snippet_properties=snippet_properties,
is_saved=True
)
# Remove functions, settings from snippet properties
data = notebook.get_data()
data['snippets'][0]['properties'].pop('functions')
data['snippets'][0]['properties'].pop('settings')
notebook.data = json.dumps(data)
return notebook
def import_saved_java_job(wf):
snippet_properties = {}
node = wf.start.get_child('to')
snippet_properties['app_jar'] = node.jar_path
snippet_properties['class'] = node.main_class
snippet_properties['args'] = node.args if node.args else ''
snippet_properties['java_opts'] = node.java_opts if node.java_opts else ''
snippet_properties['hadoopProperties'] = []
try:
properties = json.loads(node.job_properties)
if properties:
for prop in properties:
snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
except ValueError as e:
LOG.warn('Failed to parse job properties for Java job design "%s".' % wf.name)
snippet_properties['files'] = []
try:
files = json.loads(node.files)
for filepath in files:
snippet_properties['files'].append({'type': 'file', 'path': filepath})
except ValueError as e:
LOG.warn('Failed to parse files for Java job design "%s".' % wf.name)
snippet_properties['archives'] = []
try:
archives = json.loads(node.archives)
for archive in archives:
snippet_properties['archives'].append(archive['name'])
except ValueError as e:
LOG.warn('Failed to parse archives for Java job design "%s".' % wf.name)
snippet_properties['capture_output'] = node.capture_output
notebook = make_notebook(
name=wf.name,
description=wf.description,
editor_type='java',
statement='',
status='ready',
snippet_properties=snippet_properties,
is_saved=True
)
# Remove functions, settings from snippet properties
data = notebook.get_data()
data['snippets'][0]['properties'].pop('functions')
data['snippets'][0]['properties'].pop('settings')
notebook.data = json.dumps(data)
return notebook
def _convert_type(btype, bdata):
from beeswax.models import HQL, IMPALA, RDBMS, SPARK
if btype == HQL:
return 'hive'
elif btype == IMPALA:
return 'impala'
elif btype == RDBMS:
data = json.loads(bdata)
return data['query']['server']
elif btype == SPARK: # We should not import
return 'spark'
else:
return 'hive'
def _update_property_value(properties, key, value):
"""
Update property dict in list of properties where prop has "key": key, set "value": value
"""
for prop in properties:
if prop['key'] == key:
prop.update({'value': value})
def _get_editor_type(editor_id):
document = Document2.objects.get(id=editor_id)
return document.type.rsplit('-', 1)[-1]
class ApiWrapper():
def __init__(self, request, snippet):
self.request = request
self.api = _get_api(request, snippet)
def __getattr__(self, name):
if TASK_SERVER.ENABLED.get():
from notebook import tasks as ntasks
if hasattr(ntasks, name):
attr = getattr(ntasks, name)
def _method(*args, **kwargs):
return attr(*args, **dict(kwargs, postdict=self.request.POST, user_id=self.request.user.id))
return _method
return getattr(self.api, name)
def get_api(request, snippet):
return ApiWrapper(request, snippet)
def upgrade_session_properties(request, notebook):
# Upgrade session data if using old format
data = notebook.get_data()
for session in data.get('sessions', []):
api = get_api(request, session)
if 'type' in session and hasattr(api, 'upgrade_properties'):
properties = session.get('properties', None)
session['properties'] = api.upgrade_properties(session['type'], properties)
notebook.data = json.dumps(data)
return notebook
class Analytics(object):
@classmethod
def admin_stats(cls):
stats = []
one_day = datetime.date.today() - timedelta(days=1)
one_week = datetime.date.today() - timedelta(weeks=1)
one_month = datetime.date.today() - timedelta(days=30)
three_months = datetime.date.today() - timedelta(days=90)
stats.append(('Last modified', '1 day'))
stats.append(('Users', User.objects.filter(last_login__gte=one_day).count()))
stats.append(('Sessions', Session.objects.filter(expire_date__gte=one_day).count()))
stats.append(('Executed queries', Document2.objects.filter(
last_modified__gte=one_day, is_history=True, type__startswith='query-').count()
)
)
stats.append(('\nLast modified', '1 week'))
stats.append(('Users', User.objects.filter(last_login__gte=one_week).count()))
stats.append(('Sessions', Session.objects.filter(expire_date__gte=one_week).count()))
stats.append(('Executed queries', Document2.objects.filter(
last_modified__gte=one_week, is_history=True, type__startswith='query-').count()
)
)
stats.append(('Saved queries', Document2.objects.filter(
last_modified__gte=one_week, is_history=False, type__startswith='query-').count()
)
)
stats.append(('\nAll', ''))
stats.append(('Active users 30 days', User.objects.filter(last_login__gte=one_month).count()))
stats.append(('Sessions 30 days', Session.objects.filter(expire_date__gte=one_month).count()))
stats.append(('Executed queries 30 days', Document2.objects.filter(
last_modified__gte=one_month, is_history=True, type__startswith='query-').count()
)
)
stats.append(('Active users 90 days', User.objects.filter(last_login__gte=three_months).count()))
stats.append(('\nDialect executions', ''))
queries = Document2.objects.filter(type__startswith='query-', is_trashed=False, is_managed=False)
last_month_qdialects = queries.filter(
last_modified__gte=one_month
).values('type').annotate(c=Count('type')).values('type', 'c').order_by('-c')
stats.append(('30 days', ', '.join(['%(type)s: %(c)s' % d for d in last_month_qdialects])))
return stats
@classmethod
def user_stats(cls, user_id=None, user=None):
stats = []
one_month = datetime.date.today() - timedelta(days=30)
user = User.objects.get(id=user_id) if user is None else user
queries = Document2.objects.filter(owner__id=user_id, type__startswith='query-', is_trashed=False, is_managed=False)
stats.append({
'name': 'user',
'value': '%s - %s' % (user_id, user.username), 'description': _('User info')
})
query_executions = queries.filter(is_history=True, type__startswith='query-')
stats.append({
'name': 'query_executions',
'values': query_executions.count(),
'description': _('Query executions count')
})
stats.append({
'name': 'saved_queries_count',
'value': queries.filter(is_history=False, type__startswith='query-').count(),
'description': _('Saved queries count')
})
stats.append({
'name': 'query_executions_30_days_count',
'value': query_executions.filter(last_modified__gte=one_month).count(),
'description': _('Query executions 30 days total')
})
last_month_daily = queries.filter(
last_modified__gte=one_month).annotate(
day=Trunc('last_modified', 'day')
).values('day').annotate(c=Count('day')).values('day', 'c').order_by('day')
stats.append({
'name': 'query_executions_30_days_histogram',
'value': last_month_daily,
'description': _('Daily executions 30 days')
})
return stats
@classmethod
def query_stats(cls, query_id=None, query=None):
stats = []
one_month = datetime.date.today() - timedelta(days=30)
query = Document2.objects.get(id=query_id) if query is None else query
stats.append({
'name': 'query',
'value': '%s - %s' % (query_id, query.name),
'description': _('Query info')
})
executions = query.dependents.filter(is_history=True, type__startswith='query-')
stats.append({
'name': 'execution_count',
'value': executions.count(),
'description': _('How many times executed')
})
stats.append({
'name': 'execution_count_shared',
'value': executions.exclude(owner=query.owner).count(),
'description': _('Executions by others')
})
last_month_daily = executions.filter(
last_modified__gte=one_month).annotate(
day=Trunc('last_modified', 'day')
).values('day').annotate(c=Count('day')).values('day', 'c').order_by('day')
stats.append({
'name': 'executions_30_days_histogram',
'value': last_month_daily,
'description': _('Daily executions 30 days')
})
# Could count number of "forks" (but would need to start tracking parent of Saved As query cf. saveAsNotebook)
return stats
class MockRequest():
def __init__(self, user):
self.user = user
self.POST = {}
def install_custom_examples():
if EXAMPLES.AUTO_LOAD.get():
from desktop.auth.backend import rewrite_user
from beeswax.management.commands import beeswax_install_examples
from useradmin.models import get_default_user_group, install_sample_user, User
user = rewrite_user(
install_sample_user()
)
dialects = [
interpreter['dialect']
for interpreter in get_ordered_interpreters(user)
# Only for hive/impala currently, would also need to port to Notebook install examples.
if interpreter['dialect'] in ('hive', 'impala')
]
queries = EXAMPLES.QUERIES.get()
tables = EXAMPLES.TABLES.get() # No-op. Only for the saved query samples, not the tables currently.
LOG.info('Installing custom examples queries: %(queries)s, tables: %(tables)s for dialects %(dialects)s '
'belonging to user %(user)s' % {
'queries': queries,
'tables': tables,
'dialects': dialects,
'user': user
}
)
result = []
for dialect in dialects:
interpreter = {'type': dialect, 'dialect': dialect}
successes, errors = beeswax_install_examples.Command().handle(
dialect=dialect,
user=user,
interpreter=interpreter,
queries=queries,
tables=tables,
request=None
)
LOG.info('Dialect %(dialect)s installed samples: %(successes)s, %(errors)s,' % {
'dialect': dialect,
'successes': successes,
'errors': errors,
})
result.append((successes, errors))
return result
| 17,156 | 133 | 465 |
90acaa027c2e9c18792577ff8f691e83a9427cb9 | 1,216 | py | Python | web/transiq/utils/migrations/0016_auto_20180428_1842.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | null | null | null | web/transiq/utils/migrations/0016_auto_20180428_1842.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | 14 | 2020-06-05T23:06:45.000Z | 2022-03-12T00:00:18.000Z | web/transiq/utils/migrations/0016_auto_20180428_1842.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-04-28 18:42
from __future__ import unicode_literals
from django.db import migrations, models
| 33.777778 | 142 | 0.619243 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-04-28 18:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('utils', '0015_auto_20180417_1922'),
]
operations = [
migrations.AddField(
model_name='aahooffice',
name='t1_name',
field=models.CharField(max_length=35, null=True, verbose_name='Traffic Person 1 Name'),
),
migrations.AddField(
model_name='aahooffice',
name='t1_phone',
field=models.CharField(help_text='enter 10 digit mobile number', max_length=10, null=True, verbose_name='Traffic Person 1 Phone'),
),
migrations.AddField(
model_name='aahooffice',
name='t2_name',
field=models.CharField(blank=True, max_length=35, null=True, verbose_name='Traffic Person 2 Name'),
),
migrations.AddField(
model_name='aahooffice',
name='t2_phone',
field=models.CharField(help_text='enter 10 digit mobile number', max_length=10, null=True, verbose_name='Traffic Person 2 Phone'),
),
]
| 0 | 1,037 | 23 |
0cac4234f0d7957259cb72e1f74e4a810ff9766f | 13,001 | py | Python | tests/controller_mocking/test_node_group.py | Cray-HPE/cray-crus | 6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17 | [
"MIT"
] | null | null | null | tests/controller_mocking/test_node_group.py | Cray-HPE/cray-crus | 6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17 | [
"MIT"
] | 1 | 2022-03-02T21:06:21.000Z | 2022-03-04T17:32:14.000Z | tests/controller_mocking/test_node_group.py | Cray-HPE/cray-crus | 6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17 | [
"MIT"
] | null | null | null | #
# MIT License
#
# (C) Copyright 2019, 2021-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Tests of the Mock System used for stand-alone development and unit
testing of the Compute Rolling Upgrade Agent.
"""
from crus.controllers.upgrade_agent.node_table import NodeTable
from crus.app import APP, HEADERS
from crus.controllers.mocking.shared import requests
BASE_URI = APP.config['NODE_GROUP_URI']
NODE_GROUPS_URI = BASE_URI
NODE_GROUP_URI = "%s/%%s" % BASE_URI
NODE_GROUP_MEMBERS_URI = "%s/%%s/members" % BASE_URI
NODE_GROUP_MEMBER_URI = "%s/%%s/members/%%s" % BASE_URI
HTTPS_VERIFY = APP.config['HTTPS_VERIFY']
def test_node_group():
"""Test creation of a node group with no members in it, then delete it.
"""
# Create the group
label = "test_group"
create_uri = NODE_GROUPS_URI
data = {
'label': label,
'description': "My test group",
'tags': ['a tag'],
'members': {'ids': []}
}
result = requests.post(create_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify the initial contents
get_uri = NODE_GROUP_URI % label
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Add some XNAMEs to the group
some_xnames = NodeTable.get_all_xnames()[:50]
for xname in some_xnames:
member_data = {
'id': xname
}
add_member_uri = NODE_GROUP_MEMBERS_URI % label
result = requests.post(add_member_uri, json=member_data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify that the members got added...
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert 'ids' in result_data['members']
member_xnames = result_data['members']['ids']
for xname in member_xnames:
assert xname in some_xnames
for xname in some_xnames:
assert xname in member_xnames
# Delete all the members that we added...
for xname in some_xnames:
delete_member_uri = NODE_GROUP_MEMBER_URI % (label, xname)
result = requests.delete(delete_member_uri, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Verify that the members got deleted (we should be back where we were
# right after creation)
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Delete the group
delete_uri = NODE_GROUP_URI % label
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Make sure it is gone
result = requests.get(get_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_node_group_no_input():
"""Test that creating a node group with no input data fails as
expected.
"""
# Create the group
result = requests.post(NODE_GROUPS_URI, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['server_error']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Internal Server Error"
assert 'detail' in result_data
assert result_data['detail'] == \
"error decoding JSON unexpected end of JSON input"
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_node_group_no_label():
"""Test that creating a node group with no label in the input data
fails as expected.
"""
# Create the group
data = {
'description': "My test group",
'members': {'ids': []}
}
result = requests.post(NODE_GROUPS_URI, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['bad']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Bad Request"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_duplicate_group():
"""Test that trying to create the same group twice in a row fails.
"""
# Create the group
label = "test_group"
data = {
'label': label,
'description': "My test group",
'members': {'ids': []}
}
result = requests.post(NODE_GROUPS_URI, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify the initial contents
get_uri = NODE_GROUP_URI % label
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Now try to create it again
result = requests.post(NODE_GROUPS_URI, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['conflict']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Conflict"
assert 'detail' in result_data
assert result_data['detail'] == \
"operation would conflict with an existing group that "\
"has the same label."
assert 'status' in result_data
assert result_data['status'] == result.status_code
# Delete the group
delete_uri = NODE_GROUP_URI % label
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Make sure it is gone
result = requests.get(get_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
# pylint: disable=invalid-name
def test_fail_delete_group_unknown():
"""Test that trying to delete an unknown group fails as expected.
"""
delete_uri = NODE_GROUP_URI % "not_there"
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_member_no_data():
"""Test that trying to create a new member in a node group without
supplying any data fails as expected.
"""
add_member_uri = NODE_GROUP_MEMBERS_URI % "fake"
result = requests.post(add_member_uri, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['server_error']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Internal Server Error"
assert 'detail' in result_data
assert result_data['detail'] == \
"error decoding JSON unexpected end of JSON input"
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_member_no_id():
"""Test that trying to create a new member in a node group without
supplying any data fails as expected.
"""
data = {
'filler': 1
}
add_member_uri = NODE_GROUP_MEMBERS_URI % "fake"
result = requests.post(add_member_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['bad']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Bad Request"
assert 'detail' in result_data
assert result_data['detail'] == \
"invalid xname ID"
assert 'status' in result_data
assert result_data['status'] == result.status_code
def test_fail_delete_unknown_member():
"""Test that trying to delete a node group member that does not exist
fails as expected.
"""
# Create the group
label = "test_group"
create_uri = NODE_GROUPS_URI
data = {
'label': label,
'description': "My test group",
'members': {'ids': []}
}
result = requests.post(create_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify the initial contents
get_uri = NODE_GROUP_URI % label
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Delete an invalid group member
delete_member_uri = NODE_GROUP_MEMBER_URI % (label, "not_there")
result = requests.delete(delete_member_uri, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert result_data['detail'] == "group has no such member."
assert 'status' in result_data
assert result_data['status'] == result.status_code
# Delete the group
delete_uri = NODE_GROUP_URI % label
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Make sure it is gone
result = requests.get(get_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
| 37.90379 | 78 | 0.683332 | #
# MIT License
#
# (C) Copyright 2019, 2021-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Tests of the Mock System used for stand-alone development and unit
testing of the Compute Rolling Upgrade Agent.
"""
from crus.controllers.upgrade_agent.node_table import NodeTable
from crus.app import APP, HEADERS
from crus.controllers.mocking.shared import requests
BASE_URI = APP.config['NODE_GROUP_URI']
NODE_GROUPS_URI = BASE_URI
NODE_GROUP_URI = "%s/%%s" % BASE_URI
NODE_GROUP_MEMBERS_URI = "%s/%%s/members" % BASE_URI
NODE_GROUP_MEMBER_URI = "%s/%%s/members/%%s" % BASE_URI
HTTPS_VERIFY = APP.config['HTTPS_VERIFY']
def test_node_group():
"""Test creation of a node group with no members in it, then delete it.
"""
# Create the group
label = "test_group"
create_uri = NODE_GROUPS_URI
data = {
'label': label,
'description': "My test group",
'tags': ['a tag'],
'members': {'ids': []}
}
result = requests.post(create_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify the initial contents
get_uri = NODE_GROUP_URI % label
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Add some XNAMEs to the group
some_xnames = NodeTable.get_all_xnames()[:50]
for xname in some_xnames:
member_data = {
'id': xname
}
add_member_uri = NODE_GROUP_MEMBERS_URI % label
result = requests.post(add_member_uri, json=member_data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify that the members got added...
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert 'ids' in result_data['members']
member_xnames = result_data['members']['ids']
for xname in member_xnames:
assert xname in some_xnames
for xname in some_xnames:
assert xname in member_xnames
# Delete all the members that we added...
for xname in some_xnames:
delete_member_uri = NODE_GROUP_MEMBER_URI % (label, xname)
result = requests.delete(delete_member_uri, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Verify that the members got deleted (we should be back where we were
# right after creation)
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Delete the group
delete_uri = NODE_GROUP_URI % label
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Make sure it is gone
result = requests.get(get_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_node_group_no_input():
"""Test that creating a node group with no input data fails as
expected.
"""
# Create the group
result = requests.post(NODE_GROUPS_URI, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['server_error']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Internal Server Error"
assert 'detail' in result_data
assert result_data['detail'] == \
"error decoding JSON unexpected end of JSON input"
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_node_group_no_label():
"""Test that creating a node group with no label in the input data
fails as expected.
"""
# Create the group
data = {
'description': "My test group",
'members': {'ids': []}
}
result = requests.post(NODE_GROUPS_URI, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['bad']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Bad Request"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_duplicate_group():
"""Test that trying to create the same group twice in a row fails.
"""
# Create the group
label = "test_group"
data = {
'label': label,
'description': "My test group",
'members': {'ids': []}
}
result = requests.post(NODE_GROUPS_URI, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify the initial contents
get_uri = NODE_GROUP_URI % label
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Now try to create it again
result = requests.post(NODE_GROUPS_URI, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['conflict']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Conflict"
assert 'detail' in result_data
assert result_data['detail'] == \
"operation would conflict with an existing group that "\
"has the same label."
assert 'status' in result_data
assert result_data['status'] == result.status_code
# Delete the group
delete_uri = NODE_GROUP_URI % label
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Make sure it is gone
result = requests.get(get_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
# pylint: disable=invalid-name
def test_fail_delete_group_unknown():
"""Test that trying to delete an unknown group fails as expected.
"""
delete_uri = NODE_GROUP_URI % "not_there"
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_member_no_data():
"""Test that trying to create a new member in a node group without
supplying any data fails as expected.
"""
add_member_uri = NODE_GROUP_MEMBERS_URI % "fake"
result = requests.post(add_member_uri, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['server_error']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Internal Server Error"
assert 'detail' in result_data
assert result_data['detail'] == \
"error decoding JSON unexpected end of JSON input"
assert 'status' in result_data
assert result_data['status'] == result.status_code
# pylint: disable=invalid-name
def test_fail_create_member_no_id():
"""Test that trying to create a new member in a node group without
supplying any data fails as expected.
"""
data = {
'filler': 1
}
add_member_uri = NODE_GROUP_MEMBERS_URI % "fake"
result = requests.post(add_member_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['bad']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Bad Request"
assert 'detail' in result_data
assert result_data['detail'] == \
"invalid xname ID"
assert 'status' in result_data
assert result_data['status'] == result.status_code
def test_fail_delete_unknown_member():
"""Test that trying to delete a node group member that does not exist
fails as expected.
"""
# Create the group
label = "test_group"
create_uri = NODE_GROUPS_URI
data = {
'label': label,
'description': "My test group",
'members': {'ids': []}
}
result = requests.post(create_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['created']
# Verify the initial contents
get_uri = NODE_GROUP_URI % label
result = requests.get(get_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
result_data = result.json()
assert 'label' in result_data
assert result_data['label'] == data['label']
assert 'description' in result_data
assert result_data['description'] == data['description']
assert 'members' in result_data
assert result_data['members'] == data['members']
# Delete an invalid group member
delete_member_uri = NODE_GROUP_MEMBER_URI % (label, "not_there")
result = requests.delete(delete_member_uri, headers=HEADERS,
verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert result_data['detail'] == "group has no such member."
assert 'status' in result_data
assert result_data['status'] == result.status_code
# Delete the group
delete_uri = NODE_GROUP_URI % label
result = requests.delete(delete_uri, headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['ok']
# Make sure it is gone
result = requests.get(get_uri, json=data,
headers=HEADERS, verify=HTTPS_VERIFY)
assert result.status_code == requests.codes['not_found']
result_data = result.json()
assert 'title' in result_data
assert result_data['title'] == "Not Found"
assert 'detail' in result_data
assert 'status' in result_data
assert result_data['status'] == result.status_code
| 0 | 0 | 0 |
de99225d25d779b72e99d154c6f9d546ee0a3037 | 242 | py | Python | src/kvhelper/urls.py | The-New-Fork/api-juicychain | 9bee78a1ab5de53dc3a4284fa0f57c4edc746650 | [
"Apache-2.0"
] | null | null | null | src/kvhelper/urls.py | The-New-Fork/api-juicychain | 9bee78a1ab5de53dc3a4284fa0f57c4edc746650 | [
"Apache-2.0"
] | null | null | null | src/kvhelper/urls.py | The-New-Fork/api-juicychain | 9bee78a1ab5de53dc3a4284fa0f57c4edc746650 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
# from rest_framework import routers
# router = routers.DefaultRouter()
# router.register('getinfo', views.getinfo)
urlpatterns = [
path('kvupdate1', views.kvupdate1, name='kvupdate1')
]
| 22 | 56 | 0.743802 | from django.urls import path
from . import views
# from rest_framework import routers
# router = routers.DefaultRouter()
# router.register('getinfo', views.getinfo)
urlpatterns = [
path('kvupdate1', views.kvupdate1, name='kvupdate1')
]
| 0 | 0 | 0 |
d0e982280a38a4e3f4d7f1da23db03fcff439104 | 1,181 | py | Python | cluster/scripts/n2v/predict.py | juglab/VoidSeg_cluster | 71339f9bdd6df9feb26fa197d5dfc390c371910c | [
"BSD-2-Clause"
] | 1 | 2020-03-12T14:00:15.000Z | 2020-03-12T14:00:15.000Z | cluster/scripts/n2v/predict.py | juglab/VoidSeg_cluster | 71339f9bdd6df9feb26fa197d5dfc390c371910c | [
"BSD-2-Clause"
] | null | null | null | cluster/scripts/n2v/predict.py | juglab/VoidSeg_cluster | 71339f9bdd6df9feb26fa197d5dfc390c371910c | [
"BSD-2-Clause"
] | null | null | null | #Imports
from n2v.models import Config, CARE
import numpy as np
from n2v.utils import plot_some, plot_history
from n2v.utils.n2v_utils import manipulate_val_data
import urllib
import os
import zipfile
import json
from os.path import join
from skimage import io
with open('experiment.json', 'r') as f:
exp_params = json.load(f)
#Read training images and GT from StarVoid/dataset/...
test_files = np.load(exp_params["test_path"])
X = test_files['X_test']
train_files = np.load(exp_params["train_path"])
X_trn = train_files['X_train']
mean, std = np.mean(X_trn), np.std(X_trn)
X = normalize(X, mean, std)
model = CARE(None, name= exp_params['model_name'], basedir= exp_params['base_dir'])
# X = X[...,np.newaxis]
#predictions = []
# Denoise all images
for i in range(X.shape[0]):
pred = denormalize(model.predict(X[i][..., np.newaxis], axes='YXC',normalizer=None ), mean, std)
# predictions.append(pred)
io.imsave(join(exp_params['base_dir'], 'mask'+str(i).zfill(3)+'.tif'), pred)
#predictions = np.array(predictions)
| 24.604167 | 100 | 0.711262 | #Imports
from n2v.models import Config, CARE
import numpy as np
from n2v.utils import plot_some, plot_history
from n2v.utils.n2v_utils import manipulate_val_data
import urllib
import os
import zipfile
import json
from os.path import join
from skimage import io
def normalize(img, mean, std):
zero_mean = img - mean
return zero_mean/std
def denormalize(x, mean, std):
return x*std + mean
with open('experiment.json', 'r') as f:
exp_params = json.load(f)
#Read training images and GT from StarVoid/dataset/...
test_files = np.load(exp_params["test_path"])
X = test_files['X_test']
train_files = np.load(exp_params["train_path"])
X_trn = train_files['X_train']
mean, std = np.mean(X_trn), np.std(X_trn)
X = normalize(X, mean, std)
model = CARE(None, name= exp_params['model_name'], basedir= exp_params['base_dir'])
# X = X[...,np.newaxis]
#predictions = []
# Denoise all images
for i in range(X.shape[0]):
pred = denormalize(model.predict(X[i][..., np.newaxis], axes='YXC',normalizer=None ), mean, std)
# predictions.append(pred)
io.imsave(join(exp_params['base_dir'], 'mask'+str(i).zfill(3)+'.tif'), pred)
#predictions = np.array(predictions)
| 94 | 0 | 46 |
c6a42659ffbd81eeb22f230e66d6f72c1dd4db73 | 725 | py | Python | services/users/app/app.py | yuuta1999/microservice-with-flask | 6ad64341edb42c7f145aabc1e38e2619df75d444 | [
"MIT"
] | 1 | 2019-07-12T07:38:16.000Z | 2019-07-12T07:38:16.000Z | services/users/app/app.py | yuuta1999/microservice-with-flask | 6ad64341edb42c7f145aabc1e38e2619df75d444 | [
"MIT"
] | 4 | 2021-03-09T09:19:49.000Z | 2022-02-26T12:14:12.000Z | services/users/app/app.py | yuuta1999/microservice-with-flask | 6ad64341edb42c7f145aabc1e38e2619df75d444 | [
"MIT"
] | 1 | 2020-03-31T17:36:11.000Z | 2020-03-31T17:36:11.000Z | # users/app/app.py
import os
from flask import Flask
from app.api.utils.extensions import (
db, bcrypt, jwt, cors
)
from app.api.utils.func import JSONEncoder
def create_app(app_info=None):
"""Create Flask application in factory pattern
"""
app = Flask(__name__)
app.config.from_object(os.environ.get('APP_SETTINGS'))
cors.init_app(app, origins=app.config.get('CORS_ORIGINS'), supports_credentials=True)
db.init_app(app)
bcrypt.init_app(app)
jwt.init_app(app)
from .api.views import user_api as user_bp
app.register_blueprint(user_bp, url_prefix='/v1/')
app.json_encoder = JSONEncoder
@app.route('/')
return app
| 21.323529 | 89 | 0.692414 | # users/app/app.py
import os
from flask import Flask
from app.api.utils.extensions import (
db, bcrypt, jwt, cors
)
from app.api.utils.func import JSONEncoder
def create_app(app_info=None):
"""Create Flask application in factory pattern
"""
app = Flask(__name__)
app.config.from_object(os.environ.get('APP_SETTINGS'))
cors.init_app(app, origins=app.config.get('CORS_ORIGINS'), supports_credentials=True)
db.init_app(app)
bcrypt.init_app(app)
jwt.init_app(app)
from .api.views import user_api as user_bp
app.register_blueprint(user_bp, url_prefix='/v1/')
app.json_encoder = JSONEncoder
@app.route('/')
def index():
return 'Hello, World'
return app
| 21 | 0 | 26 |
2483bd002844af7edc662e681fe21df5fb364740 | 4,995 | py | Python | src/cogs/warframe.py | dallas-hyde/adhara-bot | e184ebd12eba0ed3217ac8944c0ce52773a6f655 | [
"MIT"
] | null | null | null | src/cogs/warframe.py | dallas-hyde/adhara-bot | e184ebd12eba0ed3217ac8944c0ce52773a6f655 | [
"MIT"
] | null | null | null | src/cogs/warframe.py | dallas-hyde/adhara-bot | e184ebd12eba0ed3217ac8944c0ce52773a6f655 | [
"MIT"
] | null | null | null | from discord.ext import commands
import requests
import json
| 44.598214 | 217 | 0.734735 | from discord.ext import commands
import requests
import json
class Warframe(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, aliases=["baro"])
async def baro_kiteer(self, ctx, platform="pc"):
# Query the warframestat.us API for Baro Ki'teer's data on the supplied platform
response = requests.get(("https://api.warframestat.us/{0}/voidTrader").format(platform))
# If the response is valid, decode it and get the baro kiteer data from the decoded resposne and send a message with the relevant data
if response:
decoded_response = json.loads(response.content)
# Get relevant void trader information here so the message line isn't as cluttered
baro_location = decoded_response.get("location")
baro_start_time = decoded_response.get("startString")
baro_end_time = decoded_response.get("endString")
# TODO: Format baro's inventory
baro_inventory = decoded_response.get("inventory")
await ctx.send("Baro Ki'Teer:\n\tPlatform:\t`{0}`\n\tLocation:\t`{1}`\n\tStart Time:\t`{2}`\n\tEnd Time:\t`{3}`\n\tInventory:\t`{4}`".format(platform, baro_location, baro_start_time, baro_end_time, baro_inventory))
# If the response was not valid, send an error message
else:
await ctx.send("There was a problem retrieving Baro Ki'Teer's data!\nPlease try again later. 😟")
@commands.command(pass_context=True, aliases=["earth"])
async def earth_time(self, ctx, platform="pc"):
# Query warframestat.us API for Earth's time data on the supplied platform
response = requests.get(("https://api.warframestat.us/{0}/earthCycle").format(platform))
# If the response is valid, decode it and send the user a message with the relevant data
if response:
decoded_response = json.loads(response.content)
# Get relevant data here so the message line isn't as cluttered
earth_state = decoded_response.get("state").capitalize()
earth_time_left = decoded_response.get("timeLeft")
await ctx.send("Earth:\n\tTime:\t`{0}`\n\tTime Left:\t`{1}`".format(earth_state, earth_time_left))
# If the response was not valid, send an error message
else:
await ctx.send("There was a problem retrieving Earth's data!\nPlease try again later. 😟")
@commands.command(pass_context=True, aliases=["plains", "eidolon", "cetus"])
async def plains_of_eidolon(self, ctx, platform="pc"):
# Query warframestat.us API for Cetus's time data on the supplied platform
response = requests.get(("https://api.warframestat.us/{0}/cetusCycle").format(platform))
# If the response is valid, decode it and send the user a message with the relevant data
if response:
decoded_response = json.loads(response.content)
# Get relevant data here so the message line isn't as cluttered
cetus_state = decoded_response.get("state").capitalize()
cetus_time_left = decoded_response.get("timeLeft")
await ctx.send("Plains of Eidolon:\n\tTime:\t`{0}`\n\tTime Left:\t`{1}`".format(cetus_state, cetus_time_left))
# If the response was not valid, send an error message
else:
await ctx.send("There was a problem retrieving Cetus' data!\nPlease try again later. 😟")
@commands.command(pass_context=True, aliases=["orb", "vallis"])
async def orb_vallis(self, ctx, platform="pc"):
# Query warframestat.us API for the Orb Vallis' weather data on the supplied platform
response = requests.get(("https://api.warframestat.us/{0}/vallisCycle").format(platform))
# If the response is valid, decode it and send the user a message with the relevant data
if response:
decoded_response = json.loads(response.content)
# Get relevant data here so the message line isn't as cluttered
vallis_state = decoded_response.get("state").capitalize()
vallis_time_left = decoded_response.get("timeLeft")
await ctx.send("Orb Vallis:\n\tWeather:\t`{0}`\n\tTime Left:\t`{1}`".format(vallis_state, vallis_time_left))
# If the response was not valid, send an error message
else:
await ctx.send("There was a problem retrieving the Orb Vallis' data!\nPlease try again later. 😟")
@commands.command(pass_context=True, aliases=["cambion", "drift", "deimos"])
async def cambion_drift(self, ctx, platform="pc"):
# Query warframestat.us API for the Cambion Drift's state data on the supplied platform
response = requests.get(("https://api.warframestat.us/{0}/cambionCycle").format(platform))
# If the response is valid, decode it and send the user a message with the relevant data
if response:
decoded_response = json.loads(response.content)
# Get relevant data here so the message line isn't as cluttered
# TODO: Get the time remaining using the activation and expiry fields
cambion_state = decoded_response.get("active").capitalize()
await ctx.send("Cambion Drift:\n\tState:\t`{0}`".format(cambion_state))
# If the response was not valid, send an error message
else:
await ctx.send("There was a problem retrieving the Cambion Drift's data!\nPlease try again later. 😟")
| 4,432 | 493 | 23 |
cee8091aaca44fa4ac8f01bb33ebf04cd18d899f | 1,228 | py | Python | boardlaw/arena/live/analysis.py | jzf2101/boardlaw | 29126c2a6ab7f11154fb242c303d3b11f1566201 | [
"MIT"
] | 20 | 2021-01-20T17:15:18.000Z | 2022-01-25T21:51:29.000Z | boardlaw/arena/live/analysis.py | jzf2101/boardlaw | 29126c2a6ab7f11154fb242c303d3b11f1566201 | [
"MIT"
] | 17 | 2021-01-21T08:14:11.000Z | 2021-06-09T22:27:00.000Z | boardlaw/arena/live/analysis.py | jzf2101/boardlaw | 29126c2a6ab7f11154fb242c303d3b11f1566201 | [
"MIT"
] | 3 | 2021-02-15T05:18:41.000Z | 2021-06-30T14:11:26.000Z | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pavlov import runs, stats
from rebar import dotdict
from . import json
import activelo
| 27.909091 | 66 | 0.630293 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pavlov import runs, stats
from rebar import dotdict
from . import json
import activelo
def pandas(soln, names):
return dotdict.dotdict(
μ=pd.Series(soln.μ, names),
Σ=pd.DataFrame(soln.Σ, names, names))
def difference(soln, contrast, name=None):
μ, Σ = soln.μ, soln.Σ
σ2 = np.diag(Σ) + Σ.loc[contrast, contrast] - 2*Σ[contrast]
μc = μ - μ[contrast]
if name:
return μc[name], σ2[name]**.5
else:
return μc, σ2**.5
def mask(games, wins, filter):
mask = games.index.str.match(filter)
games, wins = games.loc[mask, mask], wins.loc[mask, mask]
return games, wins
def elos(run, target=None, filter='.*'):
run = runs.resolve(run)
games, wins = json.symmetric(run)
games, wins = mask(games, wins, filter)
soln = activelo.solve(games.values, wins.values)
soln = pandas(soln, games.index)
if isinstance(target, int):
μ, σ = difference(soln, soln.μ.index[target])
elif isinstance(target, str):
μ, σ = difference(soln, target)
else:
μ, σ = soln.μ, pd.Series(np.diag(soln.Σ)**.5, games.index)
return pd.concat({'μ': μ, 'σ': σ}, 1)
| 1,005 | 0 | 92 |
b9c0d1a87ab9ddf08f4e9867aa6cbedacb169489 | 38,077 | py | Python | feasibgs/catalogs.py | changhoonhahn/feasiBGS | b5f535f12cf64babc9e25bcec75edd45d8668f74 | [
"MIT"
] | 1 | 2021-02-24T15:02:34.000Z | 2021-02-24T15:02:34.000Z | feasibgs/catalogs.py | michaelJwilson/feasiBGS | 63975b1e60f6f93f3b5020ee51ca565f325b918d | [
"MIT"
] | 7 | 2020-10-23T16:02:01.000Z | 2020-11-04T18:53:20.000Z | feasibgs/catalogs.py | michaelJwilson/feasiBGS | 63975b1e60f6f93f3b5020ee51ca565f325b918d | [
"MIT"
] | 1 | 2020-11-12T00:19:41.000Z | 2020-11-12T00:19:41.000Z | '''
submodules to handle Catalogs used in the project
'''
import os
import numpy as np
import h5py
from astropy.io import fits
from astropy.table import Table as aTable
from astropy.cosmology import FlatLambdaCDM
# -- local --
from . import util as UT
class Catalog(object):
''' parent object for the objects in this module. Currently
has no functionality
'''
def _h5py_create_dataset(self, grp, key, data):
''' the arrays from the fits files do not play well with the new h5py
and python3
'''
if isinstance(data, np.chararray) or isinstance(data[0], np.str_):
_chararray = np.array(data, dtype=h5py.special_dtype(vlen=str))
grp.create_dataset(key.lower(), data=_chararray)
elif isinstance(data[0], np.bool_):
_bool = np.zeros(len(data)).astype(bool)
_bool[data] = True
grp.create_dataset(key.lower(), data=_bool)
else:
grp.create_dataset(key.lower(), data=data)
return None
class GAMA(Catalog):
''' class to build/read in photometric and spectroscopic overlap
of the GAMA DR2/DR3 data.
The GAMA DR2 data contains photometry and
spectroscopy from GAMA I, which covers three regions of 48 deg^2
area for a total of 144 deg^2.
The GAMA DR3 data contains photometry and spectroscopy from GAMA II,
which covers the 14x6.5 GAMA regions in NGP (G02 region is EXCLUDED).
'''
def Read(self, field, data_release=3, silent=True):
''' Read in spherematched photometric and spectroscopic
data from GAMA DR2 (constructed using _Build).
'''
_file = self._File(field, data_release=data_release)
if not os.path.isfile(_file): # if file is not constructed
if not silent: print('Building %s' % _file)
if field == 'all': self._Build(data_release=data_release, silent=silent)
else: self._fieldSplit(data_release=data_release, silent=silent)
# read in data and compile onto a dictionary
f = h5py.File(_file, 'r')
grp_p = f['photo'] # photo data
grp_s = f['spec'] # spec data
grp_k0 = f['kcorr_z0.0']
grp_k1 = f['kcorr_z0.1']
if not silent:
print('colums in GAMA photometry')
print(sorted(grp_p.keys()))
print('========================')
print('colums in GAMA spectroscopy')
print(sorted(grp_s.keys()))
print('========================')
print('colums in GAMA kcorrects')
print(sorted(grp_k0.keys()))
print('========================')
print('%i objects' % len(grp_p['ra'][...]))
print('========================')
data = {}
for dkey, grp in zip(['photo', 'spec', 'kcorr_z0.0', 'kcorr_z0.1'], [grp_p, grp_s, grp_k0, grp_k1]):
data[dkey] = {}
for key in grp.keys():
data[dkey][key] = grp[key][...]
return data
def _File(self, field, data_release=3):
''' hdf5 file name of spherematched photometric and spectroscopic
data from GAMA DR3.
notes
-----
* v2 flag was added when photometry catalog was changed from InputCatA.fits
to TilingCat.fits
'''
if field == 'all':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.v2.hdf5']) # output file
elif field == 'g09':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G09.v2.hdf5']) # output file
elif field == 'g12':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G12.v2.hdf5']) # output file
elif field == 'g15':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G15.v2.hdf5']) # output file
def _Build(self, data_release=3, silent=True):
''' Read in the photometric data and the spectroscopic data,
spherematch them and write the intersecting data to hdf5 file.
'''
if data_release == 3:
# this includes *three* of the four gama fields G02 field has its own data
# read in photometry (GAMA`s tiling catalog; http://www.gama-survey.org/dr3/schema/table.php?id=3)
gama_p = fits.open(UT.dat_dir()+'gama/dr3/TilingCat.fits')[1].data
# read in emission line measurements (http://www.gama-survey.org/dr3/schema/table.php?id=40)
gama_s = fits.open(UT.dat_dir()+'gama/dr3/GaussFitSimple.fits')[1].data
# read in kcorrect z = 0.0 (http://www.gama-survey.org/dr2/schema/table.php?id=177)
gama_k0 = self._readKcorrect(UT.dat_dir()+'gama/dr3/kcorr_model_z00.fits')
# read in kcorrect z = 0.1 (http://www.gama-survey.org/dr2/schema/table.php?id=178)
gama_k1 = self._readKcorrect(UT.dat_dir()+'gama/dr3/kcorr_model_z01.fits')
elif data_release == 2: # Data Release 2 (what I had before)
# read in photometry (GAMA`s master input catalogue; http://www.gama-survey.org/dr2/schema/table.php?id=156)
gama_p = fits.open(UT.dat_dir()+'gama/InputCatA.fits')[1].data
# read in spectroscopy (http://www.gama-survey.org/dr2/schema/table.php?id=197)
gama_s = fits.open(UT.dat_dir()+'gama/SpecLines.fits')[1].data
# read in kcorrect z = 0.0 (http://www.gama-survey.org/dr2/schema/table.php?id=177)
gama_k0 = self._readKcorrect(UT.dat_dir()+'gama/kcorr_z00.fits')
# read in kcorrect z = 0.1 (http://www.gama-survey.org/dr2/schema/table.php?id=178)
gama_k1 = self._readKcorrect(UT.dat_dir()+'gama/kcorr_z01.fits')
if not silent:
#print('colums in GAMA photometry')
#print(sorted(gama_p.__dict__.keys()))
print('%i GAMA photometry objects' % len(gama_p['ra']))
print('========================')
#print('colums in GAMA spectroscopy')
#print(sorted(gama_s.__dict__.keys()))
print('%i GAMA spectroscopy (emission line) objects' % len(gama_s['ra']))
print('========================')
#print('colums in GAMA k-correct')
#print(sorted(gama_k0.__dict__.keys()))
print('%i GAMA k-correct objects' % len(gama_k0['mass']))
print('========================')
# impose some common sense cuts to make sure there's SDSS photometry
# these magnitudes are extinction corrected!
has_sdss_photo = (
(gama_p['u_model'] > -9999.) &
(gama_p['g_model'] > -9999.) &
(gama_p['r_model'] > -9999.) &
(gama_p['i_model'] > -9999.) &
(gama_p['z_model'] > -9999.))
# impose science catalog cuts
# sc >= 4: r < 19.8, GAMA II main survey
# sc >= 5: r < 19.8 and satisfies r-band star-galaxy separation
# sc = 6: r < 19.4 and satisfies r-band star-galaxy separation
# r = r_petro
sciencecut = (gama_p['survey_class'] > 3)
# match cataid with spectroscopic data
has_spec = np.in1d(gama_p['cataid'], gama_s['cataid'])
# match cataid with k-correct data
assert np.array_equal(gama_k0['cataid'], gama_k1['cataid'])
has_kcorr = np.in1d(gama_p['cataid'], gama_k0['cataid'])
# combined sample cut
sample_cut = (has_spec & sciencecut & has_kcorr & has_sdss_photo)
if not silent:
print('of %i GAMA photometry objects' % len(gama_p['cataid']))
print('========================')
print('%i have SDSS photometry data' % np.sum(has_sdss_photo))
print('========================')
print('%i have spectroscopic data' % np.sum(has_spec))
print('========================')
print('%i have k-correct data' % np.sum(has_kcorr))
print('========================')
print('%i have all of the above' % np.sum(sample_cut))
print('========================')
# match up with spectroscopic data
s_match = np.searchsorted(gama_s['cataid'], gama_p['cataid'][sample_cut])
assert np.array_equal(gama_s['cataid'][s_match], gama_p['cataid'][sample_cut])
# match up with k-correct data
k_match = np.searchsorted(gama_k0['cataid'], gama_p['cataid'][sample_cut])
assert np.array_equal(gama_k0['cataid'][k_match], gama_p['cataid'][sample_cut])
# write everything into a hdf5 file
f = h5py.File(self._File('all', data_release=data_release), 'w')
# store photometry data in photometry group
grp_p = f.create_group('photo')
for key in gama_p.names:
self._h5py_create_dataset(grp_p, key, gama_p[key][sample_cut])
# store spectroscopic data in spectroscopic group
grp_s = f.create_group('spec')
for key in gama_s.names:
self._h5py_create_dataset(grp_s, key, gama_s[key][s_match])
# store kcorrect data in kcorrect groups
grp_k0 = f.create_group('kcorr_z0.0')
for key in gama_k0.names:
self._h5py_create_dataset(grp_k0, key, gama_k0[key][k_match])
grp_k1 = f.create_group('kcorr_z0.1')
for key in gama_k1.names:
self._h5py_create_dataset(grp_k1, key, gama_k1[key][k_match])
f.close()
return None
def _fieldSplit(self, data_release=3, silent=True):
''' Split the GAMA photo-spectroscopic data into the differnt
GAMA regions. Different regions have different r-mag limits and
etc so treating them separately is the most sensible!
'''
all_gama = self.Read('all', data_release=data_release, silent=True)
fields = ['g09', 'g12', 'g15']
ra_min = [129.0, 174.0, 211.5]
ra_max = [141.0, 186.0, 223.5]
for i_f, field in enumerate(fields):
in_ra = ((all_gama['photo']['ra'] >= ra_min[i_f]) & (all_gama['photo']['ra'] <= ra_max[i_f]))
if not silent: print('%i objects in %s field' % (np.sum(in_ra), field.upper()))
# write each field into hdf5 files
f = h5py.File(self._File(field, data_release=data_release), 'w')
for k_grp in all_gama.keys(): # photo, spec, kcorr_z0.0, kcorr_z0.1
grp = f.create_group(k_grp)
for key in all_gama[k_grp].keys():
grp.create_dataset(key, data=all_gama[k_grp][key][in_ra])
f.close()
return None
def _readKcorrect(self, fitsfile):
''' GAMA Kcorrect raises VerifyError if read in the usual fashion.
'''
f = fits.open(fitsfile)
f.verify('fix')
return f[1].data
class GamaLegacy(Catalog):
''' class to append imaging data from the Legacy survey DR7 for the objects
in the GAMA DR3 photo+spec data (.GAMA object). The objects in the final
catalog has GAMA photometry, GAMA spectroscopy, and Legacy-survey photometry
'''
def AbsMag(self, data, kcorr=0.1, H0=70, Om0=0.3, galext=False):
''' Calculate absolute magnitude in SDSS u, g, r, i, z bands with kcorrect
at z=`kcorr` given the data dictionary from the `GamaLegacy.Read` method.
H0 and Om0 specifies the cosmology for the distance modulus.
'''
# check data's structure
for k in ['gama-photo', 'gama-spec','gama-kcorr-z0.0', 'gama-kcorr-z0.1']:
if k not in data.keys():
raise ValueError('input data does not have the approprite keys')
# check kcorr
if kcorr not in [0.0, 0.1]: raise ValueError('kcorr = 0.0, 0.1 only')
bands_sdss = ['u','g','r','i','z']
# apparent magnitude from GAMA photometry
if not galext:
mag_ugriz = np.array([data['gama-photo'][b+'_model'] for b in bands_sdss])
else:
mag_ugriz =np.array([data['gama-kcorr-z0.1'][b+'_model'] for b in bands_sdss])
redshift = data['gama-spec']['z'] # redshift
# distance modulus
cosmo = FlatLambdaCDM(H0=H0, Om0=Om0)
D_L = cosmo.luminosity_distance(redshift).value # Mpc
DM = 5. * np.log10(1e5*D_L)
# k-correct
if kcorr == 0.0:
kcorr = np.array([data['gama-kcorr-z0.0']['kcorr_'+b] for b in bands_sdss])
elif kcorr == 0.1:
kcorr = np.array([data['gama-kcorr-z0.1']['kcorr_'+b] for b in bands_sdss])
absmag_ugriz = mag_ugriz - DM - kcorr
return absmag_ugriz
def Read(self, field, dr_gama=3, dr_legacy=7, silent=True):
''' Read in objects from legacy survey DR 5 that overlap with the
GAMA photo+spectra objects
'''
fgleg = self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy)
if not os.path.isfile(fgleg): # if file is not constructed
if not silent: print('Building %s' % fgleg)
self._Build(field, dr_gama=dr_gama, dr_legacy=dr_legacy, silent=silent)
# read in data and compile onto a dictionary
f = h5py.File(fgleg, 'r')
grp_gp = f['gama-photo']
grp_gs = f['gama-spec']
grp_k0 = f['gama-kcorr-z0.0']
grp_k1 = f['gama-kcorr-z0.1']
grp_lp = f['legacy-photo']
if not silent:
print('colums in GAMA Photo Data:')
print(sorted(grp_gp.keys()))
print('colums in GAMA Spec Data:')
print(sorted(grp_gs.keys()))
print('colums in Legacy Data:')
print(sorted(grp_lp.keys()))
print('========================')
print('%i objects' % len(grp_gp['ra'][...]))
data = {}
for dk, grp in zip(['gama-photo', 'gama-spec', 'gama-kcorr-z0.0', 'gama-kcorr-z0.1', 'legacy-photo'],
[grp_gp, grp_gs, grp_k0, grp_k1, grp_lp]):
data[dk] = {}
for key in grp.keys():
data[dk][key] = grp[key][...]
self.catalog = data.copy()
return data
def select(self, index=None):
''' select objects in the catalog by their index
'''
if index is not None:
if isinstance(index, list):
index = np.array(index)
elif isinstance(index, np.ndarray):
pass
else:
raise ValueError("index can only be a list of array")
select_data = {}
for grp in self.catalog.keys():
select_data[grp] = {}
for key in self.catalog[grp].keys():
select_data[grp][key] = self.catalog[grp][key][index]
return select_data
def write(self, catalog, fname):
''' Given dictionary with same structure as self.catalog
write to hdf5 file
'''
f = h5py.File(fname, 'w')
for g in catalog.keys():
grp = f.create_group(g)
for k in catalog[g].keys():
grp.create_dataset(k, data=catalog[g][k])
f.close()
return None
def _Build(self, field, dr_gama=3, dr_legacy=7, silent=True):
''' Get Legacy Survey photometry for objects in the GAMA DR`dr_gama`
photo+spec objects from the sweep files. This is meant to run on nersc
but you can also manually download the sweep files and specify the dir
where the sweep files are located in.
'''
from pydl.pydlutils.spheregroup import spherematch
if dr_legacy == 5:
sweep_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/'
sweep_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/'
tractor_n_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr5/tractor/'
elif dr_legacy == 7:
sweep_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
sweep_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
tractor_n_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/'
tractor_s_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/'
elif dr_legacy == 8:
sweep_n_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/sweep/8.0/'
sweep_s_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/south/sweep/8.0/'
tractor_n_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/tractor/'
tractor_s_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr7/south/tractor/'
# read in the names of the sweep files
fsweep = ''.join([UT.dat_dir(), 'legacy/', field, '.sweep_list.dat'])
if not os.path.isfile(fsweep): _ = self._getSweeps(field, silent=silent)
sweep_files = np.loadtxt(fsweep, unpack=True, usecols=[0], dtype='S')
if not silent: print("there are %i sweep files in the %s GAMA region" % (len(sweep_files), field))
# read in GAMA objects
gama = GAMA()
gama_data = gama.Read(field, data_release=dr_gama, silent=silent)
sweep_dict = {}
gama_photo_dict, gama_spec_dict, gama_kcorr0_dict, gama_kcorr1_dict = {}, {}, {}, {}
# loop through the files and only keep ones that spherematch with GAMA objects
for i_f, f in enumerate(sweep_files):
# read in sweep object
for sweep_dir in [sweep_n_dir, sweep_s_dir]:
fsweep = os.path.join(sweep_dir, f.decode('unicode_escape'))
if os.path.isfile(fsweep): break
sweep = fits.open(fsweep)[1].data
if not silent: print('matching %s' % fsweep)
# spherematch the sweep objects with GAMA objects
if len(sweep['ra']) > len(gama_data['photo']['ra']):
match = spherematch(sweep['ra'], sweep['dec'],
gama_data['photo']['ra'], gama_data['photo']['dec'], 0.000277778)
else:
match_inv = spherematch(gama_data['photo']['ra'], gama_data['photo']['dec'],
sweep['ra'], sweep['dec'], 0.000277778)
match = [match_inv[1], match_inv[0], match_inv[2]]
if not silent:
print('%i matches from the %s sweep file' % (len(match[0]), f))
# save sweep photometry to `sweep_dict`
for key in sweep.names:
if i_f == 0:
sweep_dict[key.lower()] = sweep[key][match[0]]
else:
sweep_dict[key.lower()] = np.concatenate([sweep_dict[key.lower()], sweep[key][match[0]]])
# save matching GAMA data ('photo', 'spec', and kcorrects)
for gkey, gdict in zip(['photo', 'spec', 'kcorr_z0.0', 'kcorr_z0.1'],
[gama_photo_dict, gama_spec_dict, gama_kcorr0_dict, gama_kcorr1_dict]):
for key in gama_data[gkey].keys():
if i_f == 0:
gdict[key] = gama_data[gkey][key][match[1]]
else:
gdict[key] = np.concatenate([gdict[key], gama_data[gkey][key][match[1]]])
del sweep # free memory? (apparently not really)
if not silent:
print('========================')
print('%i objects out of %i GAMA objects mached' % (len(sweep_dict['ra']), len(gama_data['photo']['dec'])) )
assert len(sweep_dict['ra']) == len(gama_photo_dict['ra'])
assert len(sweep_dict['ra']) == len(gama_spec_dict['ra'])
assert len(sweep_dict['ra']) == len(gama_kcorr0_dict['mass'])
assert len(sweep_dict['ra']) == len(gama_kcorr1_dict['mass'])
# writeout all the GAMA objects without sweep objects
if not silent:
nosweep = ~np.in1d(gama_data['photo']['objid'], gama_photo_dict['objid'])
f_nosweep = ''.join([UT.dat_dir(),
'GAMAdr', str(dr_gama), '.', field, '.LEGACYdr', str(dr_legacy), '.nosweep_match.fits'])
print('========================')
print('Writing out RA, Dec of %i GAMA objects without Legacy sweep objects to %s' %
(np.sum(nosweep), f_nosweep))
tb = aTable([gama_data['photo']['ra'][nosweep], gama_data['photo']['dec'][nosweep]],
names=('ra', 'dec'))
tb.meta['COMMENTS'] = 'RA, Dec of GAMA objects without matches in Legacy DR5 sweep'
tb.write(f_nosweep, format='fits', overwrite=True)
#np.savetxt(f_nosweep, np.array([gama_data['photo']['ra'], gama_data['photo']['dec']]).T, header='RA, Dec')
# read apfluxes from tractor catalogs
try:
apflux_dict = self._getTractorApflux(sweep_dict['brickname'],
sweep_dict['objid'], tractor_dir=tractor_n_dir)
except ValueError:
apflux_dict = self._getTractorApflux(sweep_dict['brickname'],
sweep_dict['objid'], tractor_dir=tractor_s_dir)
assert apflux_dict['apflux_g'].shape[0] == len(sweep_dict['brickname'])
# save data to hdf5 file
if not silent: print('writing to %s' % self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy))
f = h5py.File(self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy), 'w')
grp_gp = f.create_group('gama-photo')
grp_gs = f.create_group('gama-spec')
grp_k0 = f.create_group('gama-kcorr-z0.0')
grp_k1 = f.create_group('gama-kcorr-z0.1')
grp_lp = f.create_group('legacy-photo')
for key in sweep_dict.keys():
self._h5py_create_dataset(grp_lp, key, sweep_dict[key])
for key in apflux_dict.keys(): # additional apflux data.
self._h5py_create_dataset(grp_lp, key, apflux_dict[key])
for key in gama_photo_dict.keys():
grp_gp.create_dataset(key, data=gama_photo_dict[key])
for key in gama_spec_dict.keys():
grp_gs.create_dataset(key, data=gama_spec_dict[key])
for key in gama_kcorr0_dict.keys():
grp_k0.create_dataset(key, data=gama_kcorr0_dict[key])
for key in gama_kcorr1_dict.keys():
grp_k1.create_dataset(key, data=gama_kcorr1_dict[key])
f.close()
return None
def _getSweeps(self, field, silent=True):
''' Construct list of sweep files given GAMA object.
'''
# read in GAMA objects in field
gama = GAMA()
if field == 'all': raise ValueError("only select specific GAMA fields; not the entire data release")
gama_data = gama.Read(field, silent=silent)
# get brickmin and brickmax of sweep files
ra_mins = 10.*np.arange(gama_data['photo']['ra'].min() // 10., (gama_data['photo']['ra'].max() // 10.) + 1)
ra_maxs = ra_mins + 10.
dec_mins = 5.*np.arange(gama_data['photo']['dec'].min() // 5., (gama_data['photo']['dec'].max() // 5.) + 1)
dec_maxs = dec_mins + 5.
legacy_gama_sweep = []
for i in range(len(ra_mins)):
for j in range(len(dec_mins)):
if dec_mins[j] < 0: pm_sign = 'm'
else: pm_sign = 'p'
brickmin = ''.join([str(int(ra_mins[i])).zfill(3), pm_sign,
str(int(np.abs(dec_mins[j]))).zfill(3)])
if dec_maxs[j] < 0: pm_sign = 'm'
else: pm_sign = 'p'
brickmax = ''.join([str(int(ra_maxs[i])).zfill(3), pm_sign,
str(int(np.abs(dec_maxs[j]))).zfill(3)])
f_sweep = ''.join(['sweep-', brickmin, '-', brickmax, '.fits'])
legacy_gama_sweep.append(f_sweep)
if not silent: print('... %s' % f_sweep)
np.savetxt(''.join([UT.dat_dir(), 'legacy/', field, '.sweep_list.dat']),
legacy_gama_sweep, fmt='%s')
return ra_mins, dec_mins
def _getTractorApflux(self, brickname, objids,
tractor_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/', silent=True):
''' The catalog is constructed from the sweep catalog and the
GAMA DR3 photo+spec data. The sweep catalog does not include
all the photometric data from the legacy survey. This methods
appends 'apflux_g', 'apflux_r', 'apflux_z' and relevant columsn
from the tractor files.
This can (and probably should) be extended to other columns
'''
bricks_uniq = np.unique(brickname) # unique bricks
AAAs = np.array([brick[:3] for brick in bricks_uniq])
# apfluxes in 'g', 'r', and 'z' bands
bands = ['g', 'r', 'z']
apfluxes = np.zeros((3, len(brickname), 8))
apflux_ivars = np.zeros((3, len(brickname), 8))
apflux_resids = np.zeros((3, len(brickname), 8))
n_brick = 0
for ii, AAA, brick in zip(range(len(AAAs)), AAAs, bricks_uniq):
name = ''.join([tractor_dir, AAA, '/tractor-', brick, '.fits'])
if not silent: print('%i of %i unique bricks -- %s' % (ii, len(AAAs), brick))
if not os.path.isfile(name): raise ValueError('%s tractor file not available' % name)
f_tractor = fits.open(name)
tractor = f_tractor[1].data
inbrick = (brickname == brick)
for i_k, key in enumerate(bands):
apfluxes[i_k, inbrick, :] = tractor.field('apflux_'+key)[objids[inbrick]]
apflux_ivars[i_k, inbrick, :] = tractor.field('apflux_ivar_'+key)[objids[inbrick]]
apflux_resids[i_k, inbrick, :] = tractor.field('apflux_resid_'+key)[objids[inbrick]]
n_brick += np.sum(inbrick)
assert n_brick == len(brickname)
# return dictionary with appropriate keys
apflux_dict = {}
for i_k, key in enumerate(bands):
apflux_dict['apflux_'+key] = apfluxes[i_k,:,:]
apflux_dict['apflux_ivar_'+key] = apflux_ivars[i_k,:,:]
apflux_dict['apflux_resid_'+key] = apflux_resids[i_k,:,:]
return apflux_dict
class Legacy(Catalog):
'''
'''
def _1400deg2_test(self, dr=8, rlimit=None):
'''
'''
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
area = self._1400deg2_area()
# read legacy sweeps data in 1400 deg^2 region
if rlimit is None:
_fsweep = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.hdf5')
else:
_fsweep = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.rlim%.1f.hdf5' % rlimit)
fsweep = h5py.File(_fsweep, 'r')
sweep = {}
for k in fsweep.keys(): sweep[k] = fsweep[k][...]
print('%i sweep objects' % len(sweep['flux_r']))
# spatial masking
_spatial_mask = self.spatial_mask(sweep['maskbits'], [sweep['nobs_g'], sweep['nobs_r'], sweep['nobs_z']])
print('%i spatial mask' % np.sum(_spatial_mask))
# star-galaxy separation
_star_galaxy = self.star_galaxy(sweep['gaia_phot_g_mean_mag'], sweep['flux_r'])
print('%i star-galaxy separation' % np.sum(_star_galaxy))
# quality cut
gmag = self.flux_to_mag(sweep['flux_g']/sweep['mw_transmission_g'])
rmag = self.flux_to_mag(sweep['flux_r']/sweep['mw_transmission_r'])
zmag = self.flux_to_mag(sweep['flux_z']/sweep['mw_transmission_z'])
_quality_cut = self.quality_cut(
np.array([sweep['fracflux_g'], sweep['fracflux_r'], sweep['fracflux_z']]),
np.array([sweep['fracmasked_g'], sweep['fracmasked_r'], sweep['fracmasked_z']]),
np.array([sweep['fracin_g'], sweep['fracin_r'], sweep['fracin_z']]),
gmag - rmag,
rmag - zmag)
print('%i quality cut' % np.sum(_quality_cut))
sample_select = (_spatial_mask & _star_galaxy & _quality_cut)
print('%i (spatial mask) & (star-galaxy sep.) & (quality cut)' % (np.sum(sample_select)))
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'bgs.1400deg2.rlim%.1f.hdf5' % rlimit)
f = h5py.File(fout, 'w')
for k in sweep.keys():
self._h5py_create_dataset(f, k, sweep[k][sample_select])
f.close()
return None
return None
def _1400deg2_area(self):
''' area of 1400 deg^2 test region
'''
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
area = (np.radians(ra_max) - np.radians(ra_min))*(np.sin(np.radians(dec_max)) - np.sin(np.radians(dec_min)))
area *= (180/np.pi)**2
print('%.f deg^2 test region' % area)
return area
def quality_cut(self, frac_flux, fracmasked, fracin, g_r, r_z):
''' apply baseline quality cut
* frac_flux_[g,r,z]<5 Not overwhelmed by neighbouring source (any band)
* fracmasked_[g,r,z]<0.4 Model not dominated by masked pixels in any band
* fracin_[g,r,z]>0.3 Most of the model flux not outside the region of the data used to fit the model
* -1< g-r < 4 Not an absolutely bizarre colour
* -1< r-z < 4 Not an absolutely bizarre colour
'''
assert frac_flux.shape[0] == 3
assert fracmasked.shape[0] == 3
assert fracin.shape[0] == 3
# Not overwhelmed by neighbouring source (any band)
_frac_flux = ((frac_flux[0] < 5.) & (frac_flux[1] < 5.) & (frac_flux[2] < 5.))
# Model not dominated by masked pixels in any band
_fracmasked = ((fracmasked[0] < 0.4) & (fracmasked[1] < 0.4) & (fracmasked[2] < 0.4))
# Most of the model flux not outside the region of the data used to fit the model
_fracin = ((fracin[0] > 0.3) & (fracin[1] > 0.3) & (fracin[2] > 0.3))
# color cut
_colorcut = ((g_r > -1.) & (g_r < 4.) & (r_z > -1.) & (r_z < 4.))
cut = (_frac_flux & _fracmasked & _fracin & _colorcut)
return cut
def star_galaxy(self, gaia_G, r_flux):
''' star-galaxy separation using GAIA and tractor photometry
(gaia G mag) - (raw r mag) > 0.6 or (gaia G mag) == 0
'''
G_rr = gaia_G - self.flux_to_mag(r_flux)
isgalaxy = (G_rr > 0.6) | (gaia_G == 0)
return isgalaxy
def spatial_mask(self, maskbits, nobs):
''' spatial masking around
* bright stars
* medium bright stars
* clusters
* large galaxies
'''
nobs_g, nobs_r, nobs_z = nobs
BS = (np.uint64(maskbits) & np.uint64(2**1))!=0 # bright stars
MS = (np.uint64(maskbits) & np.uint64(2**11))!=0 # medium bright stars
GC = (np.uint64(maskbits) & np.uint64(2**13))!=0 # clusters
LG = (np.uint64(maskbits) & np.uint64(2**12))!=0 # large galaxies
allmask = ((maskbits & 2**6) != 0) | ((maskbits & 2**5) != 0) | ((maskbits & 2**7) != 0)
nobs = ((nobs_g < 1) | (nobs_r < 1) | (nobs_z < 1))
mask = ~(BS | MS | GC | LG | allmask | nobs)
return mask
def _collect_1400deg2_test(self, dr=8, rlimit=None):
''' collect sweeps data within the same 1400 deg2 test region that Omar used for dr7
and save to file.
'''
import glob
if dr != 8: raise NotImplementedError
if os.environ['NERSC_HOST'] != 'cori': raise ValueError('this script is meant to run on cori only')
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
dir_legacy = '/project/projectdirs/cosmo/data/legacysurvey/'
dir_north = os.path.join(dir_legacy, 'dr8/north/sweep/8.0')
dir_south = os.path.join(dir_legacy, 'dr8/south/sweep/8.0')
fsweeps_N = glob.glob('%s/*.fits' % dir_north)
print('%i North sweep files' % len(fsweeps_N))
fsweeps_S = glob.glob('%s/*.fits' % dir_south)
print('%i South sweep files' % len(fsweeps_S))
fsweeps = sorted([os.path.join(dir_north, _fs) for _fs in fsweeps_N] + [os.path.join(dir_south, _fs) for _fs in fsweeps_S])
sweeps = {}
for _fsweep in fsweeps:
# get sweep RA and Dec range
sweep_ra_min, sweep_ra_max, sweep_dec_min, sweep_dec_max = self._parse_brickname(_fsweep)
# check whether it's in the region or not
not_in_region = (
(sweep_ra_max < ra_min) |
(sweep_ra_min > ra_max) |
(sweep_dec_max < dec_min) |
(sweep_dec_min > dec_max)
)
if not_in_region: continue
# read sweep file
sweep = fits.open(_fsweep)[1].data
# area that's within the test region
mask_region = (
(sweep['RA'] >= ra_min) &
(sweep['RA'] <= ra_max) &
(sweep['DEC'] >= dec_min) &
(sweep['DEC'] <= dec_max))
if np.sum(mask_region) == 0: continue
if rlimit is None:
rcut = np.ones(sweep['RA']).astype(bool)
else:
rflux = sweep['FLUX_R'] / sweep['MW_TRANSMISSION_R']
rcut = (rflux > 10**((22.5-rlimit)/2.5))
print('%i obj in %s' % (np.sum(mask_region), os.path.basename(_fsweep)))
if len(sweeps.keys()) == 0:
for k in sweep.names:
sweeps[k] = sweep[k][mask_region & rcut]
else:
for k in sweep.names:
sweeps[k] = np.concatenate([sweeps[k], sweep[k][mask_region & rcut]], axis=0)
if rlimit is None:
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.hdf5')
else:
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.rlim%.1f.hdf5' % rlimit)
f = h5py.File(fout, 'w')
for k in sweeps.keys():
self._h5py_create_dataset(f, k, sweeps[k])
f.close()
return None
def _parse_brickname(self, brickname):
''' parse ra and dec range from brick name
'''
name = os.path.basename(brickname).replace('.fits', '') # get rid of directory and ext
radec1 = name.split('-')[1]
radec2 = name.split('-')[2]
if 'p' in radec1: _c = 'p'
elif 'm' in radec1: _c = 'm'
ra_min = float(radec1.split(_c)[0])
dec_min = float(radec1.split(_c)[1])
if 'p' in radec2: _c = 'p'
elif 'm' in radec2: _c = 'm'
ra_max = float(radec2.split(_c)[0])
dec_max = float(radec2.split(_c)[1])
return ra_min, ra_max, dec_min, dec_max
def _Tycho(self, ra_lim=None, dec_lim=None):
''' read in tycho2 catalog within RA and Dec range
'''
_tycho = fits.open(os.path.join(UT.dat_dir(), 'survey_validation', 'tycho2.fits'))[1].data
mask_region = np.ones(len(_tycho['RA'])).astype(bool)
if ra_lim is not None:
mask_region = mask_region & (_tycho['RA'] >= ra_lim[0]) & (_tycho['RA'] <= ra_lim[1])
if dec_lim is not None:
mask_region = mask_region & (_tycho['DEC'] >= dec_lim[0]) & (_tycho['DEC'] <= dec_lim[1])
tycho = {}
for k in _tycho.names:
tycho[k] = _tycho[k][mask_region]
return tycho
def _LSLGA(self, ra_lim=None, dec_lim=None):
''' read in Legacy Survey Large Galaxy Atlas
'''
_lslga = fits.open(os.path.join(UT.dat_dir(), 'survey_validation', 'LSLGA-v2.0.fits'))[1].data
mask_region = np.ones(len(_lslga['RA'])).astype(bool)
if ra_lim is not None:
mask_region = mask_region & (_lslga['RA'] >= ra_lim[0]) & (_lslga['RA'] <= ra_lim[1])
if dec_lim is not None:
mask_region = mask_region & (_lslga['DEC'] >= dec_lim[0]) & (_lslga['DEC'] <= dec_lim[1])
lslga = {}
for k in _lslga.names:
lslga[k] = _lslga[k][mask_region]
return lslga
def _GamaLegacy_TractorAPFLUX():
''' Retroactively add apflux columns from the tractor catalogs
to the GamaLegacy catalog constructed and saved to file. This is a
hack.
'''
gleg = GamaLegacy()
# open saved gama-legacy catalog for appending
f_gleg = h5py.File(gleg._File(), 'r+')
# legacy photometry group
grp_lp = f_gleg['legacy-photo']
if 'apflux_g' in grp_lp.keys():
# check that the columsn dont' already exist
f_gleg.close()
raise ValueError('apfluxes already in the catalog')
# read apfluxes from tractor catalogs
apflux_dict = gleg._getTractorApflux(grp_lp['brickname'].value, grp_lp['objid'].value,
dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr5/tractor/')
assert apflux_dict['apflux_g'].shape[0] == len(grp_lp['brickname'].value)
# save fluxes to the dataset
for key in apflux_dict.keys():
grp_lp.create_dataset(key, data=apflux_dict[key])
f_gleg.close()
return None
| 45.601198 | 131 | 0.565118 | '''
submodules to handle Catalogs used in the project
'''
import os
import numpy as np
import h5py
from astropy.io import fits
from astropy.table import Table as aTable
from astropy.cosmology import FlatLambdaCDM
# -- local --
from . import util as UT
class Catalog(object):
''' parent object for the objects in this module. Currently
has no functionality
'''
def __init__(self):
self.catalog = None
def _h5py_create_dataset(self, grp, key, data):
''' the arrays from the fits files do not play well with the new h5py
and python3
'''
if isinstance(data, np.chararray) or isinstance(data[0], np.str_):
_chararray = np.array(data, dtype=h5py.special_dtype(vlen=str))
grp.create_dataset(key.lower(), data=_chararray)
elif isinstance(data[0], np.bool_):
_bool = np.zeros(len(data)).astype(bool)
_bool[data] = True
grp.create_dataset(key.lower(), data=_bool)
else:
grp.create_dataset(key.lower(), data=data)
return None
def flux_to_mag(self, flux):
return 22.5 - 2.5*np.log10(flux)
class GAMA(Catalog):
''' class to build/read in photometric and spectroscopic overlap
of the GAMA DR2/DR3 data.
The GAMA DR2 data contains photometry and
spectroscopy from GAMA I, which covers three regions of 48 deg^2
area for a total of 144 deg^2.
The GAMA DR3 data contains photometry and spectroscopy from GAMA II,
which covers the 14x6.5 GAMA regions in NGP (G02 region is EXCLUDED).
'''
def __init__(self):
pass
def Read(self, field, data_release=3, silent=True):
''' Read in spherematched photometric and spectroscopic
data from GAMA DR2 (constructed using _Build).
'''
_file = self._File(field, data_release=data_release)
if not os.path.isfile(_file): # if file is not constructed
if not silent: print('Building %s' % _file)
if field == 'all': self._Build(data_release=data_release, silent=silent)
else: self._fieldSplit(data_release=data_release, silent=silent)
# read in data and compile onto a dictionary
f = h5py.File(_file, 'r')
grp_p = f['photo'] # photo data
grp_s = f['spec'] # spec data
grp_k0 = f['kcorr_z0.0']
grp_k1 = f['kcorr_z0.1']
if not silent:
print('colums in GAMA photometry')
print(sorted(grp_p.keys()))
print('========================')
print('colums in GAMA spectroscopy')
print(sorted(grp_s.keys()))
print('========================')
print('colums in GAMA kcorrects')
print(sorted(grp_k0.keys()))
print('========================')
print('%i objects' % len(grp_p['ra'][...]))
print('========================')
data = {}
for dkey, grp in zip(['photo', 'spec', 'kcorr_z0.0', 'kcorr_z0.1'], [grp_p, grp_s, grp_k0, grp_k1]):
data[dkey] = {}
for key in grp.keys():
data[dkey][key] = grp[key][...]
return data
def _File(self, field, data_release=3):
''' hdf5 file name of spherematched photometric and spectroscopic
data from GAMA DR3.
notes
-----
* v2 flag was added when photometry catalog was changed from InputCatA.fits
to TilingCat.fits
'''
if field == 'all':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.v2.hdf5']) # output file
elif field == 'g09':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G09.v2.hdf5']) # output file
elif field == 'g12':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G12.v2.hdf5']) # output file
elif field == 'g15':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G15.v2.hdf5']) # output file
def _Build(self, data_release=3, silent=True):
''' Read in the photometric data and the spectroscopic data,
spherematch them and write the intersecting data to hdf5 file.
'''
if data_release == 3:
# this includes *three* of the four gama fields G02 field has its own data
# read in photometry (GAMA`s tiling catalog; http://www.gama-survey.org/dr3/schema/table.php?id=3)
gama_p = fits.open(UT.dat_dir()+'gama/dr3/TilingCat.fits')[1].data
# read in emission line measurements (http://www.gama-survey.org/dr3/schema/table.php?id=40)
gama_s = fits.open(UT.dat_dir()+'gama/dr3/GaussFitSimple.fits')[1].data
# read in kcorrect z = 0.0 (http://www.gama-survey.org/dr2/schema/table.php?id=177)
gama_k0 = self._readKcorrect(UT.dat_dir()+'gama/dr3/kcorr_model_z00.fits')
# read in kcorrect z = 0.1 (http://www.gama-survey.org/dr2/schema/table.php?id=178)
gama_k1 = self._readKcorrect(UT.dat_dir()+'gama/dr3/kcorr_model_z01.fits')
elif data_release == 2: # Data Release 2 (what I had before)
# read in photometry (GAMA`s master input catalogue; http://www.gama-survey.org/dr2/schema/table.php?id=156)
gama_p = fits.open(UT.dat_dir()+'gama/InputCatA.fits')[1].data
# read in spectroscopy (http://www.gama-survey.org/dr2/schema/table.php?id=197)
gama_s = fits.open(UT.dat_dir()+'gama/SpecLines.fits')[1].data
# read in kcorrect z = 0.0 (http://www.gama-survey.org/dr2/schema/table.php?id=177)
gama_k0 = self._readKcorrect(UT.dat_dir()+'gama/kcorr_z00.fits')
# read in kcorrect z = 0.1 (http://www.gama-survey.org/dr2/schema/table.php?id=178)
gama_k1 = self._readKcorrect(UT.dat_dir()+'gama/kcorr_z01.fits')
if not silent:
#print('colums in GAMA photometry')
#print(sorted(gama_p.__dict__.keys()))
print('%i GAMA photometry objects' % len(gama_p['ra']))
print('========================')
#print('colums in GAMA spectroscopy')
#print(sorted(gama_s.__dict__.keys()))
print('%i GAMA spectroscopy (emission line) objects' % len(gama_s['ra']))
print('========================')
#print('colums in GAMA k-correct')
#print(sorted(gama_k0.__dict__.keys()))
print('%i GAMA k-correct objects' % len(gama_k0['mass']))
print('========================')
# impose some common sense cuts to make sure there's SDSS photometry
# these magnitudes are extinction corrected!
has_sdss_photo = (
(gama_p['u_model'] > -9999.) &
(gama_p['g_model'] > -9999.) &
(gama_p['r_model'] > -9999.) &
(gama_p['i_model'] > -9999.) &
(gama_p['z_model'] > -9999.))
# impose science catalog cuts
# sc >= 4: r < 19.8, GAMA II main survey
# sc >= 5: r < 19.8 and satisfies r-band star-galaxy separation
# sc = 6: r < 19.4 and satisfies r-band star-galaxy separation
# r = r_petro
sciencecut = (gama_p['survey_class'] > 3)
# match cataid with spectroscopic data
has_spec = np.in1d(gama_p['cataid'], gama_s['cataid'])
# match cataid with k-correct data
assert np.array_equal(gama_k0['cataid'], gama_k1['cataid'])
has_kcorr = np.in1d(gama_p['cataid'], gama_k0['cataid'])
# combined sample cut
sample_cut = (has_spec & sciencecut & has_kcorr & has_sdss_photo)
if not silent:
print('of %i GAMA photometry objects' % len(gama_p['cataid']))
print('========================')
print('%i have SDSS photometry data' % np.sum(has_sdss_photo))
print('========================')
print('%i have spectroscopic data' % np.sum(has_spec))
print('========================')
print('%i have k-correct data' % np.sum(has_kcorr))
print('========================')
print('%i have all of the above' % np.sum(sample_cut))
print('========================')
# match up with spectroscopic data
s_match = np.searchsorted(gama_s['cataid'], gama_p['cataid'][sample_cut])
assert np.array_equal(gama_s['cataid'][s_match], gama_p['cataid'][sample_cut])
# match up with k-correct data
k_match = np.searchsorted(gama_k0['cataid'], gama_p['cataid'][sample_cut])
assert np.array_equal(gama_k0['cataid'][k_match], gama_p['cataid'][sample_cut])
# write everything into a hdf5 file
f = h5py.File(self._File('all', data_release=data_release), 'w')
# store photometry data in photometry group
grp_p = f.create_group('photo')
for key in gama_p.names:
self._h5py_create_dataset(grp_p, key, gama_p[key][sample_cut])
# store spectroscopic data in spectroscopic group
grp_s = f.create_group('spec')
for key in gama_s.names:
self._h5py_create_dataset(grp_s, key, gama_s[key][s_match])
# store kcorrect data in kcorrect groups
grp_k0 = f.create_group('kcorr_z0.0')
for key in gama_k0.names:
self._h5py_create_dataset(grp_k0, key, gama_k0[key][k_match])
grp_k1 = f.create_group('kcorr_z0.1')
for key in gama_k1.names:
self._h5py_create_dataset(grp_k1, key, gama_k1[key][k_match])
f.close()
return None
def _fieldSplit(self, data_release=3, silent=True):
''' Split the GAMA photo-spectroscopic data into the differnt
GAMA regions. Different regions have different r-mag limits and
etc so treating them separately is the most sensible!
'''
all_gama = self.Read('all', data_release=data_release, silent=True)
fields = ['g09', 'g12', 'g15']
ra_min = [129.0, 174.0, 211.5]
ra_max = [141.0, 186.0, 223.5]
for i_f, field in enumerate(fields):
in_ra = ((all_gama['photo']['ra'] >= ra_min[i_f]) & (all_gama['photo']['ra'] <= ra_max[i_f]))
if not silent: print('%i objects in %s field' % (np.sum(in_ra), field.upper()))
# write each field into hdf5 files
f = h5py.File(self._File(field, data_release=data_release), 'w')
for k_grp in all_gama.keys(): # photo, spec, kcorr_z0.0, kcorr_z0.1
grp = f.create_group(k_grp)
for key in all_gama[k_grp].keys():
grp.create_dataset(key, data=all_gama[k_grp][key][in_ra])
f.close()
return None
def _readKcorrect(self, fitsfile):
''' GAMA Kcorrect raises VerifyError if read in the usual fashion.
'''
f = fits.open(fitsfile)
f.verify('fix')
return f[1].data
class GamaLegacy(Catalog):
''' class to append imaging data from the Legacy survey DR7 for the objects
in the GAMA DR3 photo+spec data (.GAMA object). The objects in the final
catalog has GAMA photometry, GAMA spectroscopy, and Legacy-survey photometry
'''
def AbsMag(self, data, kcorr=0.1, H0=70, Om0=0.3, galext=False):
''' Calculate absolute magnitude in SDSS u, g, r, i, z bands with kcorrect
at z=`kcorr` given the data dictionary from the `GamaLegacy.Read` method.
H0 and Om0 specifies the cosmology for the distance modulus.
'''
# check data's structure
for k in ['gama-photo', 'gama-spec','gama-kcorr-z0.0', 'gama-kcorr-z0.1']:
if k not in data.keys():
raise ValueError('input data does not have the approprite keys')
# check kcorr
if kcorr not in [0.0, 0.1]: raise ValueError('kcorr = 0.0, 0.1 only')
bands_sdss = ['u','g','r','i','z']
# apparent magnitude from GAMA photometry
if not galext:
mag_ugriz = np.array([data['gama-photo'][b+'_model'] for b in bands_sdss])
else:
mag_ugriz =np.array([data['gama-kcorr-z0.1'][b+'_model'] for b in bands_sdss])
redshift = data['gama-spec']['z'] # redshift
# distance modulus
cosmo = FlatLambdaCDM(H0=H0, Om0=Om0)
D_L = cosmo.luminosity_distance(redshift).value # Mpc
DM = 5. * np.log10(1e5*D_L)
# k-correct
if kcorr == 0.0:
kcorr = np.array([data['gama-kcorr-z0.0']['kcorr_'+b] for b in bands_sdss])
elif kcorr == 0.1:
kcorr = np.array([data['gama-kcorr-z0.1']['kcorr_'+b] for b in bands_sdss])
absmag_ugriz = mag_ugriz - DM - kcorr
return absmag_ugriz
def Read(self, field, dr_gama=3, dr_legacy=7, silent=True):
''' Read in objects from legacy survey DR 5 that overlap with the
GAMA photo+spectra objects
'''
fgleg = self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy)
if not os.path.isfile(fgleg): # if file is not constructed
if not silent: print('Building %s' % fgleg)
self._Build(field, dr_gama=dr_gama, dr_legacy=dr_legacy, silent=silent)
# read in data and compile onto a dictionary
f = h5py.File(fgleg, 'r')
grp_gp = f['gama-photo']
grp_gs = f['gama-spec']
grp_k0 = f['gama-kcorr-z0.0']
grp_k1 = f['gama-kcorr-z0.1']
grp_lp = f['legacy-photo']
if not silent:
print('colums in GAMA Photo Data:')
print(sorted(grp_gp.keys()))
print('colums in GAMA Spec Data:')
print(sorted(grp_gs.keys()))
print('colums in Legacy Data:')
print(sorted(grp_lp.keys()))
print('========================')
print('%i objects' % len(grp_gp['ra'][...]))
data = {}
for dk, grp in zip(['gama-photo', 'gama-spec', 'gama-kcorr-z0.0', 'gama-kcorr-z0.1', 'legacy-photo'],
[grp_gp, grp_gs, grp_k0, grp_k1, grp_lp]):
data[dk] = {}
for key in grp.keys():
data[dk][key] = grp[key][...]
self.catalog = data.copy()
return data
def select(self, index=None):
''' select objects in the catalog by their index
'''
if index is not None:
if isinstance(index, list):
index = np.array(index)
elif isinstance(index, np.ndarray):
pass
else:
raise ValueError("index can only be a list of array")
select_data = {}
for grp in self.catalog.keys():
select_data[grp] = {}
for key in self.catalog[grp].keys():
select_data[grp][key] = self.catalog[grp][key][index]
return select_data
def write(self, catalog, fname):
''' Given dictionary with same structure as self.catalog
write to hdf5 file
'''
f = h5py.File(fname, 'w')
for g in catalog.keys():
grp = f.create_group(g)
for k in catalog[g].keys():
grp.create_dataset(k, data=catalog[g][k])
f.close()
return None
def _File(self, field, dr_gama=3, dr_legacy=7):
return ''.join([UT.dat_dir(), 'GAMAdr', str(dr_gama), '.', field, '.LEGACYdr', str(dr_legacy), '.v2.hdf5'])
def _Build(self, field, dr_gama=3, dr_legacy=7, silent=True):
''' Get Legacy Survey photometry for objects in the GAMA DR`dr_gama`
photo+spec objects from the sweep files. This is meant to run on nersc
but you can also manually download the sweep files and specify the dir
where the sweep files are located in.
'''
from pydl.pydlutils.spheregroup import spherematch
if dr_legacy == 5:
sweep_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/'
sweep_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/'
tractor_n_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr5/tractor/'
elif dr_legacy == 7:
sweep_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
sweep_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
tractor_n_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/'
tractor_s_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/'
elif dr_legacy == 8:
sweep_n_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/sweep/8.0/'
sweep_s_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/south/sweep/8.0/'
tractor_n_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/tractor/'
tractor_s_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr7/south/tractor/'
# read in the names of the sweep files
fsweep = ''.join([UT.dat_dir(), 'legacy/', field, '.sweep_list.dat'])
if not os.path.isfile(fsweep): _ = self._getSweeps(field, silent=silent)
sweep_files = np.loadtxt(fsweep, unpack=True, usecols=[0], dtype='S')
if not silent: print("there are %i sweep files in the %s GAMA region" % (len(sweep_files), field))
# read in GAMA objects
gama = GAMA()
gama_data = gama.Read(field, data_release=dr_gama, silent=silent)
sweep_dict = {}
gama_photo_dict, gama_spec_dict, gama_kcorr0_dict, gama_kcorr1_dict = {}, {}, {}, {}
# loop through the files and only keep ones that spherematch with GAMA objects
for i_f, f in enumerate(sweep_files):
# read in sweep object
for sweep_dir in [sweep_n_dir, sweep_s_dir]:
fsweep = os.path.join(sweep_dir, f.decode('unicode_escape'))
if os.path.isfile(fsweep): break
sweep = fits.open(fsweep)[1].data
if not silent: print('matching %s' % fsweep)
# spherematch the sweep objects with GAMA objects
if len(sweep['ra']) > len(gama_data['photo']['ra']):
match = spherematch(sweep['ra'], sweep['dec'],
gama_data['photo']['ra'], gama_data['photo']['dec'], 0.000277778)
else:
match_inv = spherematch(gama_data['photo']['ra'], gama_data['photo']['dec'],
sweep['ra'], sweep['dec'], 0.000277778)
match = [match_inv[1], match_inv[0], match_inv[2]]
if not silent:
print('%i matches from the %s sweep file' % (len(match[0]), f))
# save sweep photometry to `sweep_dict`
for key in sweep.names:
if i_f == 0:
sweep_dict[key.lower()] = sweep[key][match[0]]
else:
sweep_dict[key.lower()] = np.concatenate([sweep_dict[key.lower()], sweep[key][match[0]]])
# save matching GAMA data ('photo', 'spec', and kcorrects)
for gkey, gdict in zip(['photo', 'spec', 'kcorr_z0.0', 'kcorr_z0.1'],
[gama_photo_dict, gama_spec_dict, gama_kcorr0_dict, gama_kcorr1_dict]):
for key in gama_data[gkey].keys():
if i_f == 0:
gdict[key] = gama_data[gkey][key][match[1]]
else:
gdict[key] = np.concatenate([gdict[key], gama_data[gkey][key][match[1]]])
del sweep # free memory? (apparently not really)
if not silent:
print('========================')
print('%i objects out of %i GAMA objects mached' % (len(sweep_dict['ra']), len(gama_data['photo']['dec'])) )
assert len(sweep_dict['ra']) == len(gama_photo_dict['ra'])
assert len(sweep_dict['ra']) == len(gama_spec_dict['ra'])
assert len(sweep_dict['ra']) == len(gama_kcorr0_dict['mass'])
assert len(sweep_dict['ra']) == len(gama_kcorr1_dict['mass'])
# writeout all the GAMA objects without sweep objects
if not silent:
nosweep = ~np.in1d(gama_data['photo']['objid'], gama_photo_dict['objid'])
f_nosweep = ''.join([UT.dat_dir(),
'GAMAdr', str(dr_gama), '.', field, '.LEGACYdr', str(dr_legacy), '.nosweep_match.fits'])
print('========================')
print('Writing out RA, Dec of %i GAMA objects without Legacy sweep objects to %s' %
(np.sum(nosweep), f_nosweep))
tb = aTable([gama_data['photo']['ra'][nosweep], gama_data['photo']['dec'][nosweep]],
names=('ra', 'dec'))
tb.meta['COMMENTS'] = 'RA, Dec of GAMA objects without matches in Legacy DR5 sweep'
tb.write(f_nosweep, format='fits', overwrite=True)
#np.savetxt(f_nosweep, np.array([gama_data['photo']['ra'], gama_data['photo']['dec']]).T, header='RA, Dec')
# read apfluxes from tractor catalogs
try:
apflux_dict = self._getTractorApflux(sweep_dict['brickname'],
sweep_dict['objid'], tractor_dir=tractor_n_dir)
except ValueError:
apflux_dict = self._getTractorApflux(sweep_dict['brickname'],
sweep_dict['objid'], tractor_dir=tractor_s_dir)
assert apflux_dict['apflux_g'].shape[0] == len(sweep_dict['brickname'])
# save data to hdf5 file
if not silent: print('writing to %s' % self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy))
f = h5py.File(self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy), 'w')
grp_gp = f.create_group('gama-photo')
grp_gs = f.create_group('gama-spec')
grp_k0 = f.create_group('gama-kcorr-z0.0')
grp_k1 = f.create_group('gama-kcorr-z0.1')
grp_lp = f.create_group('legacy-photo')
for key in sweep_dict.keys():
self._h5py_create_dataset(grp_lp, key, sweep_dict[key])
for key in apflux_dict.keys(): # additional apflux data.
self._h5py_create_dataset(grp_lp, key, apflux_dict[key])
for key in gama_photo_dict.keys():
grp_gp.create_dataset(key, data=gama_photo_dict[key])
for key in gama_spec_dict.keys():
grp_gs.create_dataset(key, data=gama_spec_dict[key])
for key in gama_kcorr0_dict.keys():
grp_k0.create_dataset(key, data=gama_kcorr0_dict[key])
for key in gama_kcorr1_dict.keys():
grp_k1.create_dataset(key, data=gama_kcorr1_dict[key])
f.close()
return None
def _getSweeps(self, field, silent=True):
''' Construct list of sweep files given GAMA object.
'''
# read in GAMA objects in field
gama = GAMA()
if field == 'all': raise ValueError("only select specific GAMA fields; not the entire data release")
gama_data = gama.Read(field, silent=silent)
# get brickmin and brickmax of sweep files
ra_mins = 10.*np.arange(gama_data['photo']['ra'].min() // 10., (gama_data['photo']['ra'].max() // 10.) + 1)
ra_maxs = ra_mins + 10.
dec_mins = 5.*np.arange(gama_data['photo']['dec'].min() // 5., (gama_data['photo']['dec'].max() // 5.) + 1)
dec_maxs = dec_mins + 5.
legacy_gama_sweep = []
for i in range(len(ra_mins)):
for j in range(len(dec_mins)):
if dec_mins[j] < 0: pm_sign = 'm'
else: pm_sign = 'p'
brickmin = ''.join([str(int(ra_mins[i])).zfill(3), pm_sign,
str(int(np.abs(dec_mins[j]))).zfill(3)])
if dec_maxs[j] < 0: pm_sign = 'm'
else: pm_sign = 'p'
brickmax = ''.join([str(int(ra_maxs[i])).zfill(3), pm_sign,
str(int(np.abs(dec_maxs[j]))).zfill(3)])
f_sweep = ''.join(['sweep-', brickmin, '-', brickmax, '.fits'])
legacy_gama_sweep.append(f_sweep)
if not silent: print('... %s' % f_sweep)
np.savetxt(''.join([UT.dat_dir(), 'legacy/', field, '.sweep_list.dat']),
legacy_gama_sweep, fmt='%s')
return ra_mins, dec_mins
def _getTractorApflux(self, brickname, objids,
tractor_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/', silent=True):
''' The catalog is constructed from the sweep catalog and the
GAMA DR3 photo+spec data. The sweep catalog does not include
all the photometric data from the legacy survey. This methods
appends 'apflux_g', 'apflux_r', 'apflux_z' and relevant columsn
from the tractor files.
This can (and probably should) be extended to other columns
'''
bricks_uniq = np.unique(brickname) # unique bricks
AAAs = np.array([brick[:3] for brick in bricks_uniq])
# apfluxes in 'g', 'r', and 'z' bands
bands = ['g', 'r', 'z']
apfluxes = np.zeros((3, len(brickname), 8))
apflux_ivars = np.zeros((3, len(brickname), 8))
apflux_resids = np.zeros((3, len(brickname), 8))
n_brick = 0
for ii, AAA, brick in zip(range(len(AAAs)), AAAs, bricks_uniq):
name = ''.join([tractor_dir, AAA, '/tractor-', brick, '.fits'])
if not silent: print('%i of %i unique bricks -- %s' % (ii, len(AAAs), brick))
if not os.path.isfile(name): raise ValueError('%s tractor file not available' % name)
f_tractor = fits.open(name)
tractor = f_tractor[1].data
inbrick = (brickname == brick)
for i_k, key in enumerate(bands):
apfluxes[i_k, inbrick, :] = tractor.field('apflux_'+key)[objids[inbrick]]
apflux_ivars[i_k, inbrick, :] = tractor.field('apflux_ivar_'+key)[objids[inbrick]]
apflux_resids[i_k, inbrick, :] = tractor.field('apflux_resid_'+key)[objids[inbrick]]
n_brick += np.sum(inbrick)
assert n_brick == len(brickname)
# return dictionary with appropriate keys
apflux_dict = {}
for i_k, key in enumerate(bands):
apflux_dict['apflux_'+key] = apfluxes[i_k,:,:]
apflux_dict['apflux_ivar_'+key] = apflux_ivars[i_k,:,:]
apflux_dict['apflux_resid_'+key] = apflux_resids[i_k,:,:]
return apflux_dict
class Legacy(Catalog):
'''
'''
def _1400deg2_test(self, dr=8, rlimit=None):
'''
'''
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
area = self._1400deg2_area()
# read legacy sweeps data in 1400 deg^2 region
if rlimit is None:
_fsweep = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.hdf5')
else:
_fsweep = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.rlim%.1f.hdf5' % rlimit)
fsweep = h5py.File(_fsweep, 'r')
sweep = {}
for k in fsweep.keys(): sweep[k] = fsweep[k][...]
print('%i sweep objects' % len(sweep['flux_r']))
# spatial masking
_spatial_mask = self.spatial_mask(sweep['maskbits'], [sweep['nobs_g'], sweep['nobs_r'], sweep['nobs_z']])
print('%i spatial mask' % np.sum(_spatial_mask))
# star-galaxy separation
_star_galaxy = self.star_galaxy(sweep['gaia_phot_g_mean_mag'], sweep['flux_r'])
print('%i star-galaxy separation' % np.sum(_star_galaxy))
# quality cut
gmag = self.flux_to_mag(sweep['flux_g']/sweep['mw_transmission_g'])
rmag = self.flux_to_mag(sweep['flux_r']/sweep['mw_transmission_r'])
zmag = self.flux_to_mag(sweep['flux_z']/sweep['mw_transmission_z'])
_quality_cut = self.quality_cut(
np.array([sweep['fracflux_g'], sweep['fracflux_r'], sweep['fracflux_z']]),
np.array([sweep['fracmasked_g'], sweep['fracmasked_r'], sweep['fracmasked_z']]),
np.array([sweep['fracin_g'], sweep['fracin_r'], sweep['fracin_z']]),
gmag - rmag,
rmag - zmag)
print('%i quality cut' % np.sum(_quality_cut))
sample_select = (_spatial_mask & _star_galaxy & _quality_cut)
print('%i (spatial mask) & (star-galaxy sep.) & (quality cut)' % (np.sum(sample_select)))
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'bgs.1400deg2.rlim%.1f.hdf5' % rlimit)
f = h5py.File(fout, 'w')
for k in sweep.keys():
self._h5py_create_dataset(f, k, sweep[k][sample_select])
f.close()
return None
return None
def _1400deg2_area(self):
''' area of 1400 deg^2 test region
'''
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
area = (np.radians(ra_max) - np.radians(ra_min))*(np.sin(np.radians(dec_max)) - np.sin(np.radians(dec_min)))
area *= (180/np.pi)**2
print('%.f deg^2 test region' % area)
return area
def quality_cut(self, frac_flux, fracmasked, fracin, g_r, r_z):
''' apply baseline quality cut
* frac_flux_[g,r,z]<5 Not overwhelmed by neighbouring source (any band)
* fracmasked_[g,r,z]<0.4 Model not dominated by masked pixels in any band
* fracin_[g,r,z]>0.3 Most of the model flux not outside the region of the data used to fit the model
* -1< g-r < 4 Not an absolutely bizarre colour
* -1< r-z < 4 Not an absolutely bizarre colour
'''
assert frac_flux.shape[0] == 3
assert fracmasked.shape[0] == 3
assert fracin.shape[0] == 3
# Not overwhelmed by neighbouring source (any band)
_frac_flux = ((frac_flux[0] < 5.) & (frac_flux[1] < 5.) & (frac_flux[2] < 5.))
# Model not dominated by masked pixels in any band
_fracmasked = ((fracmasked[0] < 0.4) & (fracmasked[1] < 0.4) & (fracmasked[2] < 0.4))
# Most of the model flux not outside the region of the data used to fit the model
_fracin = ((fracin[0] > 0.3) & (fracin[1] > 0.3) & (fracin[2] > 0.3))
# color cut
_colorcut = ((g_r > -1.) & (g_r < 4.) & (r_z > -1.) & (r_z < 4.))
cut = (_frac_flux & _fracmasked & _fracin & _colorcut)
return cut
def star_galaxy(self, gaia_G, r_flux):
''' star-galaxy separation using GAIA and tractor photometry
(gaia G mag) - (raw r mag) > 0.6 or (gaia G mag) == 0
'''
G_rr = gaia_G - self.flux_to_mag(r_flux)
isgalaxy = (G_rr > 0.6) | (gaia_G == 0)
return isgalaxy
def spatial_mask(self, maskbits, nobs):
''' spatial masking around
* bright stars
* medium bright stars
* clusters
* large galaxies
'''
nobs_g, nobs_r, nobs_z = nobs
BS = (np.uint64(maskbits) & np.uint64(2**1))!=0 # bright stars
MS = (np.uint64(maskbits) & np.uint64(2**11))!=0 # medium bright stars
GC = (np.uint64(maskbits) & np.uint64(2**13))!=0 # clusters
LG = (np.uint64(maskbits) & np.uint64(2**12))!=0 # large galaxies
allmask = ((maskbits & 2**6) != 0) | ((maskbits & 2**5) != 0) | ((maskbits & 2**7) != 0)
nobs = ((nobs_g < 1) | (nobs_r < 1) | (nobs_z < 1))
mask = ~(BS | MS | GC | LG | allmask | nobs)
return mask
def _collect_1400deg2_test(self, dr=8, rlimit=None):
''' collect sweeps data within the same 1400 deg2 test region that Omar used for dr7
and save to file.
'''
import glob
if dr != 8: raise NotImplementedError
if os.environ['NERSC_HOST'] != 'cori': raise ValueError('this script is meant to run on cori only')
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
dir_legacy = '/project/projectdirs/cosmo/data/legacysurvey/'
dir_north = os.path.join(dir_legacy, 'dr8/north/sweep/8.0')
dir_south = os.path.join(dir_legacy, 'dr8/south/sweep/8.0')
fsweeps_N = glob.glob('%s/*.fits' % dir_north)
print('%i North sweep files' % len(fsweeps_N))
fsweeps_S = glob.glob('%s/*.fits' % dir_south)
print('%i South sweep files' % len(fsweeps_S))
fsweeps = sorted([os.path.join(dir_north, _fs) for _fs in fsweeps_N] + [os.path.join(dir_south, _fs) for _fs in fsweeps_S])
sweeps = {}
for _fsweep in fsweeps:
# get sweep RA and Dec range
sweep_ra_min, sweep_ra_max, sweep_dec_min, sweep_dec_max = self._parse_brickname(_fsweep)
# check whether it's in the region or not
not_in_region = (
(sweep_ra_max < ra_min) |
(sweep_ra_min > ra_max) |
(sweep_dec_max < dec_min) |
(sweep_dec_min > dec_max)
)
if not_in_region: continue
# read sweep file
sweep = fits.open(_fsweep)[1].data
# area that's within the test region
mask_region = (
(sweep['RA'] >= ra_min) &
(sweep['RA'] <= ra_max) &
(sweep['DEC'] >= dec_min) &
(sweep['DEC'] <= dec_max))
if np.sum(mask_region) == 0: continue
if rlimit is None:
rcut = np.ones(sweep['RA']).astype(bool)
else:
rflux = sweep['FLUX_R'] / sweep['MW_TRANSMISSION_R']
rcut = (rflux > 10**((22.5-rlimit)/2.5))
print('%i obj in %s' % (np.sum(mask_region), os.path.basename(_fsweep)))
if len(sweeps.keys()) == 0:
for k in sweep.names:
sweeps[k] = sweep[k][mask_region & rcut]
else:
for k in sweep.names:
sweeps[k] = np.concatenate([sweeps[k], sweep[k][mask_region & rcut]], axis=0)
if rlimit is None:
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.hdf5')
else:
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.rlim%.1f.hdf5' % rlimit)
f = h5py.File(fout, 'w')
for k in sweeps.keys():
self._h5py_create_dataset(f, k, sweeps[k])
f.close()
return None
def _parse_brickname(self, brickname):
''' parse ra and dec range from brick name
'''
name = os.path.basename(brickname).replace('.fits', '') # get rid of directory and ext
radec1 = name.split('-')[1]
radec2 = name.split('-')[2]
if 'p' in radec1: _c = 'p'
elif 'm' in radec1: _c = 'm'
ra_min = float(radec1.split(_c)[0])
dec_min = float(radec1.split(_c)[1])
if 'p' in radec2: _c = 'p'
elif 'm' in radec2: _c = 'm'
ra_max = float(radec2.split(_c)[0])
dec_max = float(radec2.split(_c)[1])
return ra_min, ra_max, dec_min, dec_max
def _Tycho(self, ra_lim=None, dec_lim=None):
''' read in tycho2 catalog within RA and Dec range
'''
_tycho = fits.open(os.path.join(UT.dat_dir(), 'survey_validation', 'tycho2.fits'))[1].data
mask_region = np.ones(len(_tycho['RA'])).astype(bool)
if ra_lim is not None:
mask_region = mask_region & (_tycho['RA'] >= ra_lim[0]) & (_tycho['RA'] <= ra_lim[1])
if dec_lim is not None:
mask_region = mask_region & (_tycho['DEC'] >= dec_lim[0]) & (_tycho['DEC'] <= dec_lim[1])
tycho = {}
for k in _tycho.names:
tycho[k] = _tycho[k][mask_region]
return tycho
def _LSLGA(self, ra_lim=None, dec_lim=None):
''' read in Legacy Survey Large Galaxy Atlas
'''
_lslga = fits.open(os.path.join(UT.dat_dir(), 'survey_validation', 'LSLGA-v2.0.fits'))[1].data
mask_region = np.ones(len(_lslga['RA'])).astype(bool)
if ra_lim is not None:
mask_region = mask_region & (_lslga['RA'] >= ra_lim[0]) & (_lslga['RA'] <= ra_lim[1])
if dec_lim is not None:
mask_region = mask_region & (_lslga['DEC'] >= dec_lim[0]) & (_lslga['DEC'] <= dec_lim[1])
lslga = {}
for k in _lslga.names:
lslga[k] = _lslga[k][mask_region]
return lslga
def _GamaLegacy_TractorAPFLUX():
''' Retroactively add apflux columns from the tractor catalogs
to the GamaLegacy catalog constructed and saved to file. This is a
hack.
'''
gleg = GamaLegacy()
# open saved gama-legacy catalog for appending
f_gleg = h5py.File(gleg._File(), 'r+')
# legacy photometry group
grp_lp = f_gleg['legacy-photo']
if 'apflux_g' in grp_lp.keys():
# check that the columsn dont' already exist
f_gleg.close()
raise ValueError('apfluxes already in the catalog')
# read apfluxes from tractor catalogs
apflux_dict = gleg._getTractorApflux(grp_lp['brickname'].value, grp_lp['objid'].value,
dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr5/tractor/')
assert apflux_dict['apflux_g'].shape[0] == len(grp_lp['brickname'].value)
# save fluxes to the dataset
for key in apflux_dict.keys():
grp_lp.create_dataset(key, data=apflux_dict[key])
f_gleg.close()
return None
| 231 | 0 | 108 |
cefda1f1bb598fc270505fb669b6c7f27ea76a85 | 604 | py | Python | Chapter09/combining/switch_latest.py | PacktPublishing/Hands-On-Reactive-Programming-with-Python | d9da4f3f070695508bb36ef9d97f1212ecaf6fab | [
"MIT"
] | 56 | 2018-06-28T05:04:36.000Z | 2022-02-06T18:36:29.000Z | Chapter09/combining/switch_latest.py | azataiot/Hands-On-Reactive-Programming-with-Python | d9da4f3f070695508bb36ef9d97f1212ecaf6fab | [
"MIT"
] | 2 | 2019-08-19T03:51:49.000Z | 2019-09-25T09:00:57.000Z | Chapter09/combining/switch_latest.py | azataiot/Hands-On-Reactive-Programming-with-Python | d9da4f3f070695508bb36ef9d97f1212ecaf6fab | [
"MIT"
] | 18 | 2018-09-16T05:50:13.000Z | 2022-01-02T19:59:04.000Z | import rx
import rx.operators as ops
from rx.subject import Subject
obs1 = Subject()
obs2 = Subject()
obs3 = Subject()
higher_order = Subject()
higher_order.pipe(ops.switch_latest()).subscribe(
on_next=lambda i: print("on_next {}".format(i)),
on_error=lambda e: print("on_error: {}".format(e)),
on_completed=lambda: print("on_completed")
)
higher_order.on_next(obs1)
obs1.on_next("1: 1")
obs1.on_next("1: 2")
higher_order.on_next(obs2)
obs1.on_next("1: 3")
obs2.on_next("2: 1")
obs2.on_next("2: 2")
higher_order.on_next(obs3)
obs2.on_next("2: 3")
obs3.on_next("3: 1")
obs3.on_next("3: 2")
| 22.37037 | 55 | 0.703642 | import rx
import rx.operators as ops
from rx.subject import Subject
obs1 = Subject()
obs2 = Subject()
obs3 = Subject()
higher_order = Subject()
higher_order.pipe(ops.switch_latest()).subscribe(
on_next=lambda i: print("on_next {}".format(i)),
on_error=lambda e: print("on_error: {}".format(e)),
on_completed=lambda: print("on_completed")
)
higher_order.on_next(obs1)
obs1.on_next("1: 1")
obs1.on_next("1: 2")
higher_order.on_next(obs2)
obs1.on_next("1: 3")
obs2.on_next("2: 1")
obs2.on_next("2: 2")
higher_order.on_next(obs3)
obs2.on_next("2: 3")
obs3.on_next("3: 1")
obs3.on_next("3: 2")
| 0 | 0 | 0 |
4fdc4c51c9361762ea204928f6dd37955fd5ddb7 | 730 | py | Python | relocatable.py | xyzz/rop-rpc | 8772172cc1d796233e8f069f65749519f8f07813 | [
"MIT"
] | 88 | 2017-12-31T16:19:38.000Z | 2020-05-30T03:54:48.000Z | relocatable.py | xyzz/rop-rpc | 8772172cc1d796233e8f069f65749519f8f07813 | [
"MIT"
] | 1 | 2018-01-02T17:51:17.000Z | 2018-01-02T17:51:17.000Z | relocatable.py | xyzz/rop-rpc | 8772172cc1d796233e8f069f65749519f8f07813 | [
"MIT"
] | 9 | 2018-01-01T04:29:12.000Z | 2021-10-30T12:19:41.000Z | from util import isint
data_base = Relocatable(Relocatable.data, 0)
main_base = Relocatable(Relocatable.main, 0)
wk_base = Relocatable(Relocatable.wk, 0)
| 24.333333 | 73 | 0.608219 | from util import isint
class Relocatable():
data = 1
main = 2
wk = 3
def __init__(self, tag, imm):
self.tag = tag
self.imm = imm
def __add__(self, x):
if not isint(x):
raise RuntimeError("cannot __add__ a {}".format(x))
return Relocatable(self.tag, self.imm + x)
def __sub__(self, x):
if not isint(x):
raise RuntimeError("cannot __sub__ a {}".format(x))
return Relocatable(self.tag, self.imm - x)
def __repr__(self):
return "Relocable<tag={}, imm=0x{:x}>".format(self.tag, self.imm)
data_base = Relocatable(Relocatable.data, 0)
main_base = Relocatable(Relocatable.main, 0)
wk_base = Relocatable(Relocatable.wk, 0)
| 406 | 145 | 23 |
f5d1ede32658260aefabba71fbd975e01fbb4f23 | 1,674 | py | Python | server/api/resources/user.py | nwfsc-fram/Warehouse | c1a06ab7bacd6c15ab5fe2bb6076e3ea3c95757d | [
"BSD-3-Clause"
] | 5 | 2019-02-27T03:06:02.000Z | 2021-11-15T20:12:50.000Z | server/api/resources/user.py | nwfsc-fram/Warehouse | c1a06ab7bacd6c15ab5fe2bb6076e3ea3c95757d | [
"BSD-3-Clause"
] | 48 | 2019-02-14T21:15:18.000Z | 2021-10-02T01:18:49.000Z | server/api/resources/user.py | nwfsc-fram/Warehouse | c1a06ab7bacd6c15ab5fe2bb6076e3ea3c95757d | [
"BSD-3-Clause"
] | 1 | 2021-03-22T23:47:54.000Z | 2021-03-22T23:47:54.000Z | """
Module defining a Falcon resource to provide login session info
Copyright (C) 2016 ERT Inc.
"""
import falcon
import api.json as json
from api.auth import auth
route = "user"
class User():
"""
Falcon resource object providing API login session info
"""
def on_get(self, request, resp):
"""
return JSON object, representing the current session's user info
"""
user_id = auth.get_user_id(request)
# return JSON user representation
user = get_user(user_id)
json_user = json.dumps(user)
resp.body = json_user
def get_user(user_id=None):
"""
Return object representing the logged in user
Keyword Parameters:
user_id -- String, identifier representing the logged in user
(Default: None, representing an public/anonymous user session)
>>> # Check public/Anonymous user
>>> from pprint import pprint
>>> anonymous_user = get_user()
>>> pprint(anonymous_user)
{'user': {'description': 'Anonymous user.', 'id': None}}
>>> anonymous_user = get_user(None) #public/Anonymous user
>>> pprint(anonymous_user)
{'user': {'description': 'Anonymous user.', 'id': None}}
>>> # Check logged in user
>>> user = get_user('uid=bob.newhart,ou=People,o=bobnewhart.com')
>>> pprint(user)
{'user': {'description': 'Authenticated user.',
'id': 'uid=bob.newhart,ou=People,o=bobnewhart.com'}}
"""
description = "Authenticated user."
if user_id is None:
description = "Anonymous user."
attributes = {'id': user_id, 'description': description}
user_object = {'user': attributes}
return user_object
| 29.892857 | 72 | 0.643967 | """
Module defining a Falcon resource to provide login session info
Copyright (C) 2016 ERT Inc.
"""
import falcon
import api.json as json
from api.auth import auth
route = "user"
class User():
"""
Falcon resource object providing API login session info
"""
def on_get(self, request, resp):
"""
return JSON object, representing the current session's user info
"""
user_id = auth.get_user_id(request)
# return JSON user representation
user = get_user(user_id)
json_user = json.dumps(user)
resp.body = json_user
def get_user(user_id=None):
"""
Return object representing the logged in user
Keyword Parameters:
user_id -- String, identifier representing the logged in user
(Default: None, representing an public/anonymous user session)
>>> # Check public/Anonymous user
>>> from pprint import pprint
>>> anonymous_user = get_user()
>>> pprint(anonymous_user)
{'user': {'description': 'Anonymous user.', 'id': None}}
>>> anonymous_user = get_user(None) #public/Anonymous user
>>> pprint(anonymous_user)
{'user': {'description': 'Anonymous user.', 'id': None}}
>>> # Check logged in user
>>> user = get_user('uid=bob.newhart,ou=People,o=bobnewhart.com')
>>> pprint(user)
{'user': {'description': 'Authenticated user.',
'id': 'uid=bob.newhart,ou=People,o=bobnewhart.com'}}
"""
description = "Authenticated user."
if user_id is None:
description = "Anonymous user."
attributes = {'id': user_id, 'description': description}
user_object = {'user': attributes}
return user_object
| 0 | 0 | 0 |
e732e1d19ad6c4523e954b94bb4b809dbdca8ac0 | 840 | py | Python | capp/data/models/talk.py | Thornycrackers-Forks/capp | b2493676cb4645a82955f56faa990776d73d00a2 | [
"MIT"
] | 1 | 2019-10-08T00:49:18.000Z | 2019-10-08T00:49:18.000Z | capp/data/models/talk.py | Thornycrackers-Forks/capp | b2493676cb4645a82955f56faa990776d73d00a2 | [
"MIT"
] | 14 | 2019-10-08T02:24:07.000Z | 2022-02-26T18:40:20.000Z | capp/data/models/talk.py | Thornycrackers-Forks/capp | b2493676cb4645a82955f56faa990776d73d00a2 | [
"MIT"
] | 1 | 2019-11-01T00:38:39.000Z | 2019-11-01T00:38:39.000Z | """Talk model."""
from typing import Any
from django.db import models
from django.utils.translation import gettext_lazy as _
from domain import utils
class Talk(models.Model):
"""Talk submission."""
name = models.CharField(_("name"), max_length=255, unique=True)
slug = models.SlugField(unique=True, blank=True)
abstract = models.TextField(blank=True)
description = models.TextField(blank=True)
notes = models.TextField(blank=True)
def __str__(self) -> str:
"""Return name as a string."""
return self.name
def save(self, *args: Any, **kwargs: Any) -> None:
"""Save a slug on save."""
if not self.slug:
self.slug = utils.generate_unique_slug(Talk, self.name)
super().save(*args, **kwargs)
| 26.25 | 67 | 0.644048 | """Talk model."""
from typing import Any
from django.db import models
from django.utils.translation import gettext_lazy as _
from domain import utils
class Talk(models.Model):
"""Talk submission."""
name = models.CharField(_("name"), max_length=255, unique=True)
slug = models.SlugField(unique=True, blank=True)
abstract = models.TextField(blank=True)
description = models.TextField(blank=True)
notes = models.TextField(blank=True)
def __str__(self) -> str:
"""Return name as a string."""
return self.name
def save(self, *args: Any, **kwargs: Any) -> None:
"""Save a slug on save."""
if not self.slug:
self.slug = utils.generate_unique_slug(Talk, self.name)
super().save(*args, **kwargs)
class Meta: # noqa D201
verbose_name = "talk"
| 0 | 33 | 27 |
483309e08191fc64cd88b33fb408c72552b1938c | 2,319 | py | Python | code/NonLinearRegression/HowNNWorks.py | Knowledge-Precipitation-Tribe/Neural-network | eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8 | [
"MIT"
] | 3 | 2021-05-25T10:18:23.000Z | 2022-02-09T08:55:14.000Z | code/NonLinearRegression/HowNNWorks.py | Knowledge-Precipitation-Tribe/Neural-network | eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8 | [
"MIT"
] | null | null | null | code/NonLinearRegression/HowNNWorks.py | Knowledge-Precipitation-Tribe/Neural-network | eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-#
'''
# Name: HowNNWorks
# Description:
# Author: super
# Date: 2020/5/24
'''
import numpy as np
import matplotlib.pyplot as plt
from HelperClass2.NeuralNet_2_0 import *
train_data_name = "../data/ch08.train.npz"
test_data_name = "../data/ch08.test.npz"
if __name__ == '__main__':
dataReader = DataReader_2_0(train_data_name, test_data_name)
dataReader.ReadData()
dataReader.GenerateValidationSet()
n_input, n_hidden, n_output = 1, 2, 1
eta, batch_size, max_epoch = 0.05, 10, 5000
eps = 0.001
hp = HyperParameters_2_0(n_input, n_hidden, n_output, eta, max_epoch, batch_size, eps, NetType.Fitting,
InitialMethod.Xavier)
net = NeuralNet_2_0(hp, "sin_121")
net.LoadResult()
print(net.wb1.W)
print(net.wb1.B)
print(net.wb2.W)
print(net.wb2.B)
# net.train(dataReader, 50, True)
# net.ShowTrainingHistory_2_0()
# ShowResult(net, dataReader, hp.toString())
ShowResult2D(net, hp.toString()) | 28.280488 | 107 | 0.547219 | # -*- coding: utf-8 -*-#
'''
# Name: HowNNWorks
# Description:
# Author: super
# Date: 2020/5/24
'''
import numpy as np
import matplotlib.pyplot as plt
from HelperClass2.NeuralNet_2_0 import *
train_data_name = "../data/ch08.train.npz"
test_data_name = "../data/ch08.test.npz"
def ShowResult2D(net, title):
count = 21
TX = np.linspace(0, 1, count).reshape(count, 1)
TY = net.inference(TX)
print("TX=", TX)
print("Z1=", net.Z1)
print("A1=", net.A1)
print("Z=", net.Z2)
fig = plt.figure(figsize=(6, 6))
p1, = plt.plot(TX, np.zeros((count, 1)), '.', c='black')
p2, = plt.plot(TX, net.Z1[:, 0], '.', c='r')
p3, = plt.plot(TX, net.Z1[:, 1], '.', c='g')
plt.legend([p1, p2, p3], ["x", "z1", "z2"])
plt.grid()
plt.show()
fig = plt.figure(figsize=(6, 6))
p1, = plt.plot(TX, np.zeros((count, 1)), '.', c='black')
p2, = plt.plot(TX, net.Z1[:, 0], '.', c='r')
p3, = plt.plot(TX, net.A1[:, 0], 'x', c='r')
plt.legend([p1, p2, p3], ["x", "z1", "a1"])
plt.grid()
plt.show()
fig = plt.figure(figsize=(6, 6))
p1, = plt.plot(TX, np.zeros((count, 1)), '.', c='black')
p2, = plt.plot(TX, net.Z1[:, 1], '.', c='g')
p3, = plt.plot(TX, net.A1[:, 1], 'x', c='g')
plt.legend([p1, p2, p3], ["x", "z2", "a2"])
plt.show()
fig = plt.figure(figsize=(6, 6))
p1, = plt.plot(TX, net.A1[:, 0], '.', c='r')
p2, = plt.plot(TX, net.A1[:, 1], '.', c='g')
p3, = plt.plot(TX, net.Z2[:, 0], 'x', c='blue')
plt.legend([p1, p2, p3], ["a1", "a2", "z"])
plt.show()
if __name__ == '__main__':
dataReader = DataReader_2_0(train_data_name, test_data_name)
dataReader.ReadData()
dataReader.GenerateValidationSet()
n_input, n_hidden, n_output = 1, 2, 1
eta, batch_size, max_epoch = 0.05, 10, 5000
eps = 0.001
hp = HyperParameters_2_0(n_input, n_hidden, n_output, eta, max_epoch, batch_size, eps, NetType.Fitting,
InitialMethod.Xavier)
net = NeuralNet_2_0(hp, "sin_121")
net.LoadResult()
print(net.wb1.W)
print(net.wb1.B)
print(net.wb2.W)
print(net.wb2.B)
# net.train(dataReader, 50, True)
# net.ShowTrainingHistory_2_0()
# ShowResult(net, dataReader, hp.toString())
ShowResult2D(net, hp.toString()) | 1,260 | 0 | 23 |
c69de8e9df90e58e11b6179aa79de2e44942e573 | 2,709 | py | Python | FCN/utils.py | MattToul/CycleGAN | 55b03de11c7c89ee55d1d8943721ea65991b9b05 | [
"BSD-3-Clause"
] | 11 | 2019-02-28T21:43:13.000Z | 2021-04-28T09:08:18.000Z | FCN/utils.py | superkoma/SNCycleGAN | 129c65ae14a9a5a497eb2b387ee75431181c48c2 | [
"BSD-3-Clause"
] | null | null | null | FCN/utils.py | superkoma/SNCycleGAN | 129c65ae14a9a5a497eb2b387ee75431181c48c2 | [
"BSD-3-Clause"
] | 2 | 2019-02-15T07:11:33.000Z | 2019-09-11T06:02:57.000Z | import os
import numpy as np
import torch
import pickle
| 33.444444 | 128 | 0.607235 | import os
import numpy as np
import torch
import pickle
def train_epoch(model, dataloader, epoch, optimizer, loss,use_cuda=True):
applyCuda = lambda x: x.cuda() if use_cuda else x
losses = []
model.train()
for idx, batch in enumerate(dataloader):
images = applyCuda(batch['image'])
targets = applyCuda(batch['target'])
predictions = model(images)
try:
Loss = loss(predictions, targets)
except:
print(idx)
print(images.size())
print(targets.size())
print(predictions.size())
raise ()
model.zero_grad()
optimizer.zero_grad()
Loss.backward()
optimizer.step()
losses.append(Loss.data.cpu().numpy())
if idx%80==0:
print("Epoch {} - batch {} - Loss {}".format(epoch, idx, Loss.data.cpu().numpy()))
return np.mean(losses)
def validate(model, dataloader,loss,use_cuda=True):
applyCuda = lambda x: x.cuda() if use_cuda else x
losses = []
model.eval()
for idx, batch in enumerate(dataloader):
images = applyCuda(batch['image'])
targets = applyCuda(batch['target'])
predictions = model(images)
Loss = loss(predictions, targets)
losses.append(Loss.data.cpu().numpy())
return np.mean(losses)
def Train(
model,
optimizer,
scheduler,
loss,
train_loader,
val_loader,
epochs,
save_folder,
use_cuda):
save_checkpoint = os.path.join(save_folder,'checkpoint.pth.tar')
train_losses = []
val_losses = []
print("STARTING TRAINING")
for epoch in range(epochs):
scheduler.step()
train_loss = train_epoch(model, dataloader=train_loader, epoch=epoch, optimizer=optimizer, loss=loss, use_cuda=use_cuda)
val_loss = validate(model, dataloader=val_loader, loss=loss, use_cuda=use_cuda)
print("EPOCH " + str(epoch) + "/"+str(epochs))
print("TRAINING LOSS "+ str(train_loss))
print("VALIDATION LOSS "+str(val_loss))
print("--------------------------------------------------")
train_losses.append(train_loss)
val_losses.append(val_loss)
if val_loss == np.min(val_losses):
torch.save(model.state_dict(), save_checkpoint)
print("BEST MODEL AT EPOCH : " + str(np.argmin(val_losses)))
with open(os.path.join(save_folder, "train_losses.pickle"), 'wb') as handle:
pickle.dump(train_losses, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(save_folder, "val_losses.pickle"), 'wb') as handle:
pickle.dump(val_losses, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 2,584 | 0 | 69 |
e2ac9df57320b119185b922215385361177ccb79 | 23 | py | Python | __init__.py | coxg/PyDataTable | 1d6db80308abd157c4575d55265b49627de5411d | [
"BSD-2-Clause"
] | null | null | null | __init__.py | coxg/PyDataTable | 1d6db80308abd157c4575d55265b49627de5411d | [
"BSD-2-Clause"
] | null | null | null | __init__.py | coxg/PyDataTable | 1d6db80308abd157c4575d55265b49627de5411d | [
"BSD-2-Clause"
] | null | null | null | from dataTable import * | 23 | 23 | 0.826087 | from dataTable import * | 0 | 0 | 0 |
b45346844e46c21d15018781842514318bb9828d | 9,912 | py | Python | python/sparkdl/transformers/named_image.py | happyapple668/spark-deep-learning | aeff9c9e2c24ed09c093414a7b51ec9bd42f1e86 | [
"Apache-2.0"
] | 1 | 2019-02-25T15:02:19.000Z | 2019-02-25T15:02:19.000Z | python/sparkdl/transformers/named_image.py | happyapple668/spark-deep-learning | aeff9c9e2c24ed09c093414a7b51ec9bd42f1e86 | [
"Apache-2.0"
] | null | null | null | python/sparkdl/transformers/named_image.py | happyapple668/spark-deep-learning | aeff9c9e2c24ed09c093414a7b51ec9bd42f1e86 | [
"Apache-2.0"
] | 2 | 2020-02-08T06:51:36.000Z | 2020-06-18T05:38:09.000Z | # Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.applications.imagenet_utils import decode_predictions
import numpy as np
from pyspark.ml import Transformer
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.sql.functions import udf
from pyspark.sql.types import (ArrayType, FloatType, StringType, StructField, StructType)
import sparkdl.graph.utils as tfx
from sparkdl.image.imageIO import createResizeImageUDF
import sparkdl.transformers.keras_applications as keras_apps
from sparkdl.param import (
keyword_only, HasInputCol, HasOutputCol, SparkDLTypeConverters)
from sparkdl.transformers.tf_image import TFImageTransformer
SUPPORTED_MODELS = ["InceptionV3", "Xception", "ResNet50", "VGG16", "VGG19"]
class DeepImagePredictor(Transformer, HasInputCol, HasOutputCol):
"""
Applies the model specified by its popular name to the image column in DataFrame.
The input image column should be 3-channel SpImage.
The output is a MLlib Vector.
"""
modelName = Param(Params._dummy(), "modelName", "A deep learning model name",
typeConverter=SparkDLTypeConverters.buildSupportedItemConverter(SUPPORTED_MODELS))
decodePredictions = Param(Params._dummy(), "decodePredictions",
"If true, output predictions in the (class, description, probability) format",
typeConverter=TypeConverters.toBoolean)
topK = Param(Params._dummy(), "topK", "How many classes to return if decodePredictions is True",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5):
"""
__init__(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5)
"""
super(DeepImagePredictor, self).__init__()
self._setDefault(decodePredictions=False)
self._setDefault(topK=5)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5):
"""
setParams(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5)
"""
kwargs = self._input_kwargs
self._set(**kwargs)
return self
# TODO: give an option to take off multiple layers so it can be used in tuning
# (could be the name of the layer or int for how many to take off).
class DeepImageFeaturizer(Transformer, HasInputCol, HasOutputCol):
"""
Applies the model specified by its popular name, with its prediction layer(s) chopped off,
to the image column in DataFrame. The output is a MLlib Vector so that DeepImageFeaturizer
can be used in a MLlib Pipeline.
The input image column should be 3-channel SpImage.
"""
modelName = Param(Params._dummy(), "modelName", "A deep learning model name",
typeConverter=SparkDLTypeConverters.buildSupportedItemConverter(SUPPORTED_MODELS))
@keyword_only
def __init__(self, inputCol=None, outputCol=None, modelName=None):
"""
__init__(self, inputCol=None, outputCol=None, modelName=None)
"""
super(DeepImageFeaturizer, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, modelName=None):
"""
setParams(self, inputCol=None, outputCol=None, modelName=None)
"""
kwargs = self._input_kwargs
self._set(**kwargs)
return self
class _NamedImageTransformer(Transformer, HasInputCol, HasOutputCol):
"""
For internal use only. NamedImagePredictor and NamedImageFeaturizer are the recommended classes
to use.
Applies the model specified by its popular name to the image column in DataFrame. There are
two output modes: predictions or the featurization from the model. In either case the output
is a MLlib Vector.
"""
modelName = Param(Params._dummy(), "modelName", "A deep learning model name",
typeConverter=SparkDLTypeConverters.buildSupportedItemConverter(SUPPORTED_MODELS))
featurize = Param(Params._dummy(), "featurize",
"If true, output features. If false, output predictions. Either way the output is a vector.",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, modelName=None, featurize=False):
"""
__init__(self, inputCol=None, outputCol=None, modelName=None, featurize=False)
"""
super(_NamedImageTransformer, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
self._inputTensorName = None
self._outputTensorName = None
self._outputMode = None
@keyword_only
def setParams(self, inputCol=None, outputCol=None, modelName=None, featurize=False):
"""
setParams(self, inputCol=None, outputCol=None, modelName=None, featurize=False)
"""
kwargs = self._input_kwargs
self._set(**kwargs)
return self
def _buildTFGraphForName(name, featurize):
"""
Currently only supports pre-trained models from the Keras applications module.
"""
modelData = keras_apps.getKerasApplicationModel(name).getModelData(featurize)
sess = modelData["session"]
outputTensorName = modelData["outputTensorName"]
graph = tfx.strip_and_freeze_until([outputTensorName], sess.graph, sess, return_graph=True)
modelData["graph"] = graph
return modelData
| 41.472803 | 115 | 0.675444 | # Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.applications.imagenet_utils import decode_predictions
import numpy as np
from pyspark.ml import Transformer
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.sql.functions import udf
from pyspark.sql.types import (ArrayType, FloatType, StringType, StructField, StructType)
import sparkdl.graph.utils as tfx
from sparkdl.image.imageIO import createResizeImageUDF
import sparkdl.transformers.keras_applications as keras_apps
from sparkdl.param import (
keyword_only, HasInputCol, HasOutputCol, SparkDLTypeConverters)
from sparkdl.transformers.tf_image import TFImageTransformer
SUPPORTED_MODELS = ["InceptionV3", "Xception", "ResNet50", "VGG16", "VGG19"]
class DeepImagePredictor(Transformer, HasInputCol, HasOutputCol):
"""
Applies the model specified by its popular name to the image column in DataFrame.
The input image column should be 3-channel SpImage.
The output is a MLlib Vector.
"""
modelName = Param(Params._dummy(), "modelName", "A deep learning model name",
typeConverter=SparkDLTypeConverters.buildSupportedItemConverter(SUPPORTED_MODELS))
decodePredictions = Param(Params._dummy(), "decodePredictions",
"If true, output predictions in the (class, description, probability) format",
typeConverter=TypeConverters.toBoolean)
topK = Param(Params._dummy(), "topK", "How many classes to return if decodePredictions is True",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5):
"""
__init__(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5)
"""
super(DeepImagePredictor, self).__init__()
self._setDefault(decodePredictions=False)
self._setDefault(topK=5)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5):
"""
setParams(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,
topK=5)
"""
kwargs = self._input_kwargs
self._set(**kwargs)
return self
def setModelName(self, value):
return self._set(modelName=value)
def getModelName(self):
return self.getOrDefault(self.modelName)
def _transform(self, dataset):
transformer = _NamedImageTransformer(inputCol=self.getInputCol(),
outputCol=self._getIntermediateOutputCol(),
modelName=self.getModelName(), featurize=False)
transformed = transformer.transform(dataset)
if self.getOrDefault(self.decodePredictions):
return self._decodeOutputAsPredictions(transformed)
else:
return transformed.withColumnRenamed(
self._getIntermediateOutputCol(), self.getOutputCol())
def _decodeOutputAsPredictions(self, df):
# If we start having different weights than imagenet, we'll need to
# move this logic to individual model building in NamedImageTransformer.
# Also, we could put the computation directly in the main computation
# graph or use a scala UDF for potentially better performance.
topK = self.getOrDefault(self.topK)
def decode(predictions):
pred_arr = np.expand_dims(np.array(predictions), axis=0)
decoded = decode_predictions(pred_arr, top=topK)[0]
# convert numpy dtypes to python native types
return [(t[0], t[1], t[2].item()) for t in decoded]
decodedSchema = ArrayType(
StructType([StructField("class", StringType(), False),
StructField("description", StringType(), False),
StructField("probability", FloatType(), False)]))
decodeUDF = udf(decode, decodedSchema)
interim_output = self._getIntermediateOutputCol()
return (
df.withColumn(self.getOutputCol(), decodeUDF(df[interim_output]))
.drop(interim_output)
)
def _getIntermediateOutputCol(self):
return "__tmp_" + self.getOutputCol()
# TODO: give an option to take off multiple layers so it can be used in tuning
# (could be the name of the layer or int for how many to take off).
class DeepImageFeaturizer(Transformer, HasInputCol, HasOutputCol):
"""
Applies the model specified by its popular name, with its prediction layer(s) chopped off,
to the image column in DataFrame. The output is a MLlib Vector so that DeepImageFeaturizer
can be used in a MLlib Pipeline.
The input image column should be 3-channel SpImage.
"""
modelName = Param(Params._dummy(), "modelName", "A deep learning model name",
typeConverter=SparkDLTypeConverters.buildSupportedItemConverter(SUPPORTED_MODELS))
@keyword_only
def __init__(self, inputCol=None, outputCol=None, modelName=None):
"""
__init__(self, inputCol=None, outputCol=None, modelName=None)
"""
super(DeepImageFeaturizer, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, modelName=None):
"""
setParams(self, inputCol=None, outputCol=None, modelName=None)
"""
kwargs = self._input_kwargs
self._set(**kwargs)
return self
def setModelName(self, value):
return self._set(modelName=value)
def getModelName(self):
return self.getOrDefault(self.modelName)
def _transform(self, dataset):
transformer = _NamedImageTransformer(inputCol=self.getInputCol(),
outputCol=self.getOutputCol(),
modelName=self.getModelName(), featurize=True)
return transformer.transform(dataset)
class _NamedImageTransformer(Transformer, HasInputCol, HasOutputCol):
"""
For internal use only. NamedImagePredictor and NamedImageFeaturizer are the recommended classes
to use.
Applies the model specified by its popular name to the image column in DataFrame. There are
two output modes: predictions or the featurization from the model. In either case the output
is a MLlib Vector.
"""
modelName = Param(Params._dummy(), "modelName", "A deep learning model name",
typeConverter=SparkDLTypeConverters.buildSupportedItemConverter(SUPPORTED_MODELS))
featurize = Param(Params._dummy(), "featurize",
"If true, output features. If false, output predictions. Either way the output is a vector.",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, modelName=None, featurize=False):
"""
__init__(self, inputCol=None, outputCol=None, modelName=None, featurize=False)
"""
super(_NamedImageTransformer, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
self._inputTensorName = None
self._outputTensorName = None
self._outputMode = None
@keyword_only
def setParams(self, inputCol=None, outputCol=None, modelName=None, featurize=False):
"""
setParams(self, inputCol=None, outputCol=None, modelName=None, featurize=False)
"""
kwargs = self._input_kwargs
self._set(**kwargs)
return self
def setModelName(self, value):
return self._set(modelName=value)
def getModelName(self):
return self.getOrDefault(self.modelName)
def setFeaturize(self, value):
return self._set(featurize=value)
def getFeaturize(self):
return self.getOrDefault(self.featurize)
def _transform(self, dataset):
modelGraphSpec = _buildTFGraphForName(self.getModelName(), self.getFeaturize())
inputCol = self.getInputCol()
resizedCol = "__sdl_imagesResized"
tfTransformer = TFImageTransformer(
channelOrder='BGR',
inputCol=resizedCol,
outputCol=self.getOutputCol(),
graph=modelGraphSpec["graph"],
inputTensor=modelGraphSpec["inputTensorName"],
outputTensor=modelGraphSpec["outputTensorName"],
outputMode=modelGraphSpec["outputMode"])
resizeUdf = createResizeImageUDF(modelGraphSpec["inputTensorSize"])
result = tfTransformer.transform(dataset.withColumn(resizedCol, resizeUdf(inputCol)))
return result.drop(resizedCol)
def _buildTFGraphForName(name, featurize):
"""
Currently only supports pre-trained models from the Keras applications module.
"""
modelData = keras_apps.getKerasApplicationModel(name).getModelData(featurize)
sess = modelData["session"]
outputTensorName = modelData["outputTensorName"]
graph = tfx.strip_and_freeze_until([outputTensorName], sess.graph, sess, return_graph=True)
modelData["graph"] = graph
return modelData
| 3,246 | 0 | 351 |
5d9fade7dd98fcb1e00b8c04b11501292daaf28d | 557 | py | Python | xcauth.py | gibberfishinc/xmpp-cloud-auth | 9e5856f49ba1fb98fd0d5ac34d75b5ca8f9a5e2a | [
"MIT"
] | null | null | null | xcauth.py | gibberfishinc/xmpp-cloud-auth | 9e5856f49ba1fb98fd0d5ac34d75b5ca8f9a5e2a | [
"MIT"
] | null | null | null | xcauth.py | gibberfishinc/xmpp-cloud-auth | 9e5856f49ba1fb98fd0d5ac34d75b5ca8f9a5e2a | [
"MIT"
] | null | null | null | #!/usr/bin/python3 -tt
from xclib.configuration import get_args
from xclib.authops import perform
DEFAULT_LOG_DIR = '/var/log/xcauth'
DESC = '''XMPP server authentication against JSXC>=3.2.0 on Nextcloud.
See https://jsxc.org or https://github.com/jsxc/xmpp-cloud-auth.'''
EPILOG = '''-I, -R, and -A take precedence over -t. One of them is required.
-I, -R, and -A imply -i and -d.'''
if __name__ == '__main__':
args = get_args(DEFAULT_LOG_DIR, DESC, EPILOG, 'xcauth')
perform(args)
# vim: tabstop=8 softtabstop=0 expandtab shiftwidth=4
| 32.764706 | 76 | 0.698384 | #!/usr/bin/python3 -tt
from xclib.configuration import get_args
from xclib.authops import perform
DEFAULT_LOG_DIR = '/var/log/xcauth'
DESC = '''XMPP server authentication against JSXC>=3.2.0 on Nextcloud.
See https://jsxc.org or https://github.com/jsxc/xmpp-cloud-auth.'''
EPILOG = '''-I, -R, and -A take precedence over -t. One of them is required.
-I, -R, and -A imply -i and -d.'''
if __name__ == '__main__':
args = get_args(DEFAULT_LOG_DIR, DESC, EPILOG, 'xcauth')
perform(args)
# vim: tabstop=8 softtabstop=0 expandtab shiftwidth=4
| 0 | 0 | 0 |
3a7b330cc1ccf3eade6fc19ff7af43126efa5055 | 326 | py | Python | app/__init__.py | stanfordmlgroup/blm | 38c53f10078a1851c91db8f286d5b7c9cdcaa95c | [
"MIT"
] | 40 | 2020-07-23T22:32:04.000Z | 2022-02-07T23:51:10.000Z | app/__init__.py | stanfordmlgroup/blm | 38c53f10078a1851c91db8f286d5b7c9cdcaa95c | [
"MIT"
] | 2 | 2020-07-24T12:35:36.000Z | 2021-01-24T19:00:36.000Z | app/__init__.py | stanfordmlgroup/blm | 38c53f10078a1851c91db8f286d5b7c9cdcaa95c | [
"MIT"
] | 6 | 2020-07-25T05:40:06.000Z | 2021-07-31T03:39:03.000Z | from flask import Flask
import os
#from model import LSCCNN
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config.from_object(__name__)
# Load model
#checkpoint_path = path_to_gcp_bucket
#model = LSCCNN(checkpoint_path=checkpoint_path)
#model.eval()
#model.cuda() ??
from app import views
| 18.111111 | 52 | 0.776074 | from flask import Flask
import os
#from model import LSCCNN
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config.from_object(__name__)
# Load model
#checkpoint_path = path_to_gcp_bucket
#model = LSCCNN(checkpoint_path=checkpoint_path)
#model.eval()
#model.cuda() ??
from app import views
| 0 | 0 | 0 |
3e8f52f918e1bed7695935c473bdf3b3406b9430 | 801 | py | Python | Teacher/DigitalHistory/Assignments_To_Be_Graded/123456789/Week_Two/Now_Try_This/4.py | ShayanRiyaz/BitUGrading | ecc069de292d371cf6805df70be94408d5c7894b | [
"MIT"
] | null | null | null | Teacher/DigitalHistory/Assignments_To_Be_Graded/123456789/Week_Two/Now_Try_This/4.py | ShayanRiyaz/BitUGrading | ecc069de292d371cf6805df70be94408d5c7894b | [
"MIT"
] | null | null | null | Teacher/DigitalHistory/Assignments_To_Be_Graded/123456789/Week_Two/Now_Try_This/4.py | ShayanRiyaz/BitUGrading | ecc069de292d371cf6805df70be94408d5c7894b | [
"MIT"
] | null | null | null | # Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'simple_key':'hello'}
answer1 = d['simple_key']
print(answer1)# Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'k1':{'k2':'hello'}}
answer2 = d['k1']['k2']
print(answer2)# Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'k1':[{'nest_key':['this is deep',['hello']]}]}
answer3 = d['k1'][0]['nest_key'][1][0]
print(answer3)# Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'k1':[1,2,{'k2':['this is tricky',{'tough':[1,2,['hello']]}]}]}
answer4 = d['k1'][2]['k2'][1]['tough'][2][0]
print(answer4) | 38.142857 | 120 | 0.677903 | # Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'simple_key':'hello'}
answer1 = d['simple_key']
print(answer1)# Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'k1':{'k2':'hello'}}
answer2 = d['k1']['k2']
print(answer2)# Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'k1':[{'nest_key':['this is deep',['hello']]}]}
answer3 = d['k1'][0]['nest_key'][1][0]
print(answer3)# Please note that if you uncomment and press multiple times, the program will keep appending to the file.
d = {'k1':[1,2,{'k2':['this is tricky',{'tough':[1,2,['hello']]}]}]}
answer4 = d['k1'][2]['k2'][1]['tough'][2][0]
print(answer4) | 0 | 0 | 0 |
ca3a008a2eb5a2eae5ca80ec9266427c854f6046 | 621 | py | Python | impute/tests/test_fpc.py | nimily/low-rank-impute | 0b2fb5c911a4b7505e61f9a7e412bcb11bf3e89a | [
"MIT"
] | null | null | null | impute/tests/test_fpc.py | nimily/low-rank-impute | 0b2fb5c911a4b7505e61f9a7e412bcb11bf3e89a | [
"MIT"
] | null | null | null | impute/tests/test_fpc.py | nimily/low-rank-impute | 0b2fb5c911a4b7505e61f9a7e412bcb11bf3e89a | [
"MIT"
] | null | null | null | import numpy as np
import numpy.random as npr
import numpy.testing as npt
import pytest
from impute import FpcImpute
@pytest.mark.usefixtures('rae_case')
| 18.264706 | 53 | 0.603865 | import numpy as np
import numpy.random as npr
import numpy.testing as npt
import pytest
from impute import FpcImpute
@pytest.mark.usefixtures('rae_case')
class TestFpc:
@staticmethod
def test_debias(rae_case):
b, ds = rae_case
n_row, n_col = b.shape
rank = 10
npr.seed(10)
u = npr.randn(n_row, rank)
v = npr.randn(n_col, rank)
svd = FpcImpute.debias(ds, u, v.T)
z = svd.to_matrix()
g = u.T @ ds.rss_grad(z) @ v
actual = np.diagonal(g)
expect = np.zeros(rank)
npt.assert_array_almost_equal(actual, expect)
| 403 | 38 | 22 |
27392dd2348e088769e3781375c34d4651b959c6 | 428 | py | Python | test/tests.py | JReese1212/SoftwareEngineering | 2fba90e4851911f7113578ddb8a158c1f260b353 | [
"MIT"
] | 5 | 2021-02-04T00:04:25.000Z | 2022-03-07T04:43:59.000Z | test/tests.py | JReese1212/SoftwareEngineering | 2fba90e4851911f7113578ddb8a158c1f260b353 | [
"MIT"
] | 119 | 2021-02-11T01:28:18.000Z | 2021-05-03T23:14:39.000Z | test/tests.py | JReese1212/SoftwareEngineering | 2fba90e4851911f7113578ddb8a158c1f260b353 | [
"MIT"
] | null | null | null | import sys
sys.path.append('./test/tests')
from driver import driver, Keys
driver.get("http://www.python.org")
assert "Python" in driver.title
elem = driver.find_element_by_name("q")
elem.clear()
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
#import test_example
#import test_printtaskbook
#import test_class
#import test_duedates
import test_deletion
driver.close()
| 22.526316 | 52 | 0.787383 | import sys
sys.path.append('./test/tests')
from driver import driver, Keys
driver.get("http://www.python.org")
assert "Python" in driver.title
elem = driver.find_element_by_name("q")
elem.clear()
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
#import test_example
#import test_printtaskbook
#import test_class
#import test_duedates
import test_deletion
driver.close()
| 0 | 0 | 0 |
93e8a0133186a9ed504c690d0c5523bb68f053ed | 534 | py | Python | test/test_add_contact.py | agnieszkazm/python_for_tests | 13514181f40e0f948030f0c1189bf7337b479015 | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | agnieszkazm/python_for_tests | 13514181f40e0f948030f0c1189bf7337b479015 | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | agnieszkazm/python_for_tests | 13514181f40e0f948030f0c1189bf7337b479015 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.contact import Contact | 48.545455 | 125 | 0.71161 | # -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app):
old_contacts = app.contact.get_contacts_list()
contact = Contact("Testname", "Testname2", "Testlastname", "test", "555", "666", "777", "888", "test", "www.example.com")
app.contact.create(contact)
new_contacts = app.contact.get_contacts_list()
assert len(old_contacts) + 1 == len(new_contacts)
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max) | 454 | 0 | 23 |
18817bea520ab9e0f9a7b98680e50e6f2e1ba6fb | 2,843 | py | Python | Currency-Converter/Curreny-Converter.py | maayon2521/Python-Projects | 31225d56b6a600c40be5c7e92c1792071959a32f | [
"MIT"
] | null | null | null | Currency-Converter/Curreny-Converter.py | maayon2521/Python-Projects | 31225d56b6a600c40be5c7e92c1792071959a32f | [
"MIT"
] | null | null | null | Currency-Converter/Curreny-Converter.py | maayon2521/Python-Projects | 31225d56b6a600c40be5c7e92c1792071959a32f | [
"MIT"
] | 1 | 2020-10-25T14:52:32.000Z | 2020-10-25T14:52:32.000Z | # This is an python GUI program which is used to conver the current vlue for different conutries.
# importing the modules
from tkinter import *
# creating a class called CurrencyConverter
# definfing a init function for the class TipCal
# function to convert values
# function to clear values
CurrencyConverter()
| 40.614286 | 133 | 0.633837 | # This is an python GUI program which is used to conver the current vlue for different conutries.
# importing the modules
from tkinter import *
# creating a class called CurrencyConverter
class CurrencyConverter():
# definfing a init function for the class TipCal
def __init__(self):
# creating a display window variable
window = Tk()
# setting up the title for the display window
window.title("Currency Converter")
# setting up the background for the display window
window.configure(background="blue")
# setting the width and height of the display window
window.geometry("450x320")
# making the display window as fixed size one
window.resizable(width=False, height=False)
# Creating labels to display text
Label(window, font="Helvetica 12 bold", bg ="blue", text="Amout to Convert").grid(column = 1, row=3,padx=25, pady=20)
Label(window, font="Helvetica 12 bold", bg ="blue", text="Conversion Rate").grid(column = 1, row=5, padx=25, pady=20)
Label(window, font="Helvetica 12 bold", bg ="blue", text="Converted Amount").grid(column = 1, row=7, padx=25, pady=20)
# creating entry box for variables.
self.amount = StringVar()
Entry(window, textvariable = self.amount, justify = RIGHT).grid(column = 2, row = 3, padx=50, pady=20)
self.ConRate = StringVar()
Entry(window, textvariable = self.ConRate, justify = RIGHT).grid(column = 2, row = 5, padx=50, pady=20)
self.ConAmount = StringVar()
Entry(window, textvariable = self.ConAmount, justify = RIGHT).grid(column = 2, row = 7, padx=50, pady=20)
# Creating a button for converting feet to meter.
convert_btn=Button(window, text="Convert", bg="purple", fg="white", width=15, command=self.convert,font="Helvetica 10 bold")
convert_btn.grid(column=1, row=9,padx=15, pady=30)
# Creating a button for clearing all the values.
clear_btn=Button(window, text="Clear", bg="black", fg="white", width=15, command=self.clear,font="Helvetica 10 bold")
clear_btn.grid(column=2, row=9,padx=15, pady=30)
window.mainloop()
# function to convert values
def convert(self):
conRate = float(self.ConRate.get())
conAmount = float(self.amount.get()) * conRate
self.ConAmount.set(format(conAmount, '10.2f'))
# function to clear values
def clear(self):
self.amount.set("0") # making a empty value in amount entry box
self.ConRate.set("0") # making a empty value in conversion rate entry box
self.ConAmount.set("0") # making a empty value in converted amount entry box
CurrencyConverter()
| 2,370 | 5 | 103 |