hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e21389138faeef26f2c2ee2be44922a1fad42cf9
| 6,501
|
py
|
Python
|
oldScripts/xtraScripts/tracker_cm.py
|
crackmech/flyclimb
|
551621d1d2747d22b407a6b640d7ccaf680b53e5
|
[
"MIT"
] | null | null | null |
oldScripts/xtraScripts/tracker_cm.py
|
crackmech/flyclimb
|
551621d1d2747d22b407a6b640d7ccaf680b53e5
|
[
"MIT"
] | null | null | null |
oldScripts/xtraScripts/tracker_cm.py
|
crackmech/flyclimb
|
551621d1d2747d22b407a6b640d7ccaf680b53e5
|
[
"MIT"
] | null | null | null |
'''
File name : tracker.py
File Description : Tracker Using Kalman Filter & Hungarian Algorithm
Author : Srini Ananthakrishnan
Date created : 07/14/2017
Date last modified: 07/16/2017
Python Version : 2.7
'''
# Import python libraries
import numpy as np
from kalman_filter import KalmanFilter
from common import dprint
from scipy.optimize import linear_sum_assignment
class Track(object):
"""Track class for every object to be tracked
Attributes:
None
"""
def __init__(self, prediction, trackIdCount):
"""Initialize variables used by Track class
Args:
prediction: predicted centroids of object to be tracked
trackIdCount: identification of each track object
Return:
None
"""
self.track_id = trackIdCount # identification of each track object
self.KF = KalmanFilter() # KF instance to track this object
self.prediction = np.asarray(prediction) # predicted centroids (x,y)
self.skipped_frames = 0 # number of frames skipped undetected
self.trace = [] # trace path
class Tracker(object):
"""Tracker class that updates track vectors of object tracked
Attributes:
None
"""
def __init__(self, dist_thresh, max_frames_to_skip, max_trace_length,
trackIdCount):
"""Initialize variable used by Tracker class
Args:
dist_thresh: distance threshold. When exceeds the threshold,
track will be deleted and new track is created
max_frames_to_skip: maximum allowed frames to be skipped for
the track object undetected
max_trace_lenght: trace path history length
trackIdCount: identification of each track object
Return:
None
"""
self.dist_thresh = dist_thresh
self.max_frames_to_skip = max_frames_to_skip
self.max_trace_length = max_trace_length
self.tracks = []
self.trackIdCount = trackIdCount
def Update(self, detections):
"""Update tracks vector using following steps:
- Create tracks if no tracks vector found
- Calculate cost using sum of square distance
between predicted vs detected centroids
- Using Hungarian Algorithm assign the correct
detected measurements to predicted tracks
https://en.wikipedia.org/wiki/Hungarian_algorithm
- Identify tracks with no assignment, if any
- If tracks are not detected for long time, remove them
- Now look for un_assigned detects
- Start new tracks
- Update KalmanFilter state, lastResults and tracks trace
Args:
detections: detected centroids of object to be tracked
Return:
None
"""
# Create tracks if no tracks vector found
if (len(self.tracks) == 0):
for i in range(len(detections)):
track = Track(detections[i], self.trackIdCount)
self.trackIdCount += 1
self.tracks.append(track)
# Calculate cost using sum of square distance between
# predicted vs detected centroids
N = len(self.tracks)
M = len(detections)
cost = np.zeros(shape=(N, M)) # Cost matrix
for i in range(len(self.tracks)):
for j in range(len(detections)):
try:
diff = self.tracks[i].prediction - detections[j]
distance = np.sqrt(diff[0][0]*diff[0][0] +
diff[1][0]*diff[1][0])
cost[i][j] = distance
except:
pass
# Let's average the squared ERROR
cost = (0.5) * cost
# Using Hungarian Algorithm assign the correct detected measurements
# to predicted tracks
assignment = []
for _ in range(N):
assignment.append(-1)
row_ind, col_ind = linear_sum_assignment(cost)
for i in range(len(row_ind)):
assignment[row_ind[i]] = col_ind[i]
# Identify tracks with no assignment, if any
un_assigned_tracks = []
for i in range(len(assignment)):
if (assignment[i] != -1):
# check for cost distance threshold.
# If cost is very high then un_assign (delete) the track
if (cost[i][assignment[i]] > self.dist_thresh):
assignment[i] = -1
un_assigned_tracks.append(i)
pass
else:
self.tracks[i].skipped_frames += 1
# If tracks are not detected for long time, remove them
del_tracks = []
for i in range(len(self.tracks)):
if (self.tracks[i].skipped_frames > self.max_frames_to_skip):
del_tracks.append(i)
if len(del_tracks) > 0: # only when skipped frame exceeds max
for id in del_tracks:
if id < len(self.tracks):
del self.tracks[id]
del assignment[id]
else:
dprint("ERROR: id is greater than length of tracks")
# Now look for un_assigned detects
un_assigned_detects = []
for i in range(len(detections)):
if i not in assignment:
un_assigned_detects.append(i)
# Start new tracks
if(len(un_assigned_detects) != 0):
for i in range(len(un_assigned_detects)):
track = Track(detections[un_assigned_detects[i]],
self.trackIdCount)
self.trackIdCount += 1
self.tracks.append(track)
# Update KalmanFilter state, lastResults and tracks trace
for i in range(len(assignment)):
if(assignment[i] != -1):
self.tracks[i].skipped_frames = 0
self.tracks[i].prediction = detections[assignment[i]]
else:
self.tracks[i].prediction = np.array([[0], [0]])
if(len(self.tracks[i].trace) > self.max_trace_length):
for j in range(len(self.tracks[i].trace) -
self.max_trace_length):
del self.tracks[i].trace[j]
self.tracks[i].trace.append(self.tracks[i].prediction)
| 38.241176
| 77
| 0.57022
|
ea9841da21e8b47ec62894276ccac4299b18217e
| 3,921
|
py
|
Python
|
xarray/backends/pynio_.py
|
apkrelling/xarray
|
abcae54664539e50a34d4b713faadf108cf6d22e
|
[
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
xarray/backends/pynio_.py
|
apkrelling/xarray
|
abcae54664539e50a34d4b713faadf108cf6d22e
|
[
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
xarray/backends/pynio_.py
|
apkrelling/xarray
|
abcae54664539e50a34d4b713faadf108cf6d22e
|
[
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from ..core import indexing
from ..core.utils import Frozen, FrozenDict, close_on_error
from ..core.variable import Variable
from .common import (
BACKEND_ENTRYPOINTS,
AbstractDataStore,
BackendArray,
BackendEntrypoint,
)
from .file_manager import CachingFileManager
from .locks import HDF5_LOCK, NETCDFC_LOCK, SerializableLock, combine_locks, ensure_lock
from .store import StoreBackendEntrypoint
try:
import Nio
has_pynio = True
except ModuleNotFoundError:
has_pynio = False
# PyNIO can invoke netCDF libraries internally
# Add a dedicated lock just in case NCL as well isn't thread-safe.
NCL_LOCK = SerializableLock()
PYNIO_LOCK = combine_locks([HDF5_LOCK, NETCDFC_LOCK, NCL_LOCK])
class NioArrayWrapper(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
self.dtype = np.dtype(array.typecode())
def get_array(self, needs_lock=True):
ds = self.datastore._manager.acquire(needs_lock)
return ds.variables[self.variable_name]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.BASIC, self._getitem
)
def _getitem(self, key):
with self.datastore.lock:
array = self.get_array(needs_lock=False)
if key == () and self.ndim == 0:
return array.get_value()
return array[key]
class NioDataStore(AbstractDataStore):
"""Store for accessing datasets via PyNIO"""
def __init__(self, filename, mode="r", lock=None, **kwargs):
if lock is None:
lock = PYNIO_LOCK
self.lock = ensure_lock(lock)
self._manager = CachingFileManager(
Nio.open_file, filename, lock=lock, mode=mode, kwargs=kwargs
)
# xarray provides its own support for FillValue,
# so turn off PyNIO's support for the same.
self.ds.set_option("MaskedArrayMode", "MaskedNever")
@property
def ds(self):
return self._manager.acquire()
def open_store_variable(self, name, var):
data = indexing.LazilyIndexedArray(NioArrayWrapper(name, self))
return Variable(var.dimensions, data, var.attributes)
def get_variables(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
def get_attrs(self):
return Frozen(self.ds.attributes)
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def get_encoding(self):
return {
"unlimited_dims": {k for k in self.ds.dimensions if self.ds.unlimited(k)}
}
def close(self):
self._manager.close()
class PynioBackendEntrypoint(BackendEntrypoint):
def open_dataset(
self,
filename_or_obj,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables=None,
use_cftime=None,
decode_timedelta=None,
mode="r",
lock=None,
):
store = NioDataStore(
filename_or_obj,
mode=mode,
lock=lock,
)
store_entrypoint = StoreBackendEntrypoint()
with close_on_error(store):
ds = store_entrypoint.open_dataset(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
return ds
if has_pynio:
BACKEND_ENTRYPOINTS["pynio"] = PynioBackendEntrypoint
| 28.620438
| 88
| 0.646009
|
1928ba7d70f868a32501d4af9dcd3725eee43519
| 1,392
|
py
|
Python
|
beakerx_tabledisplay/beakerx_tabledisplay/__init__.py
|
fcollonval/beakerx_tabledisplay
|
0c05d69b5d1431953b372621dd1478661a77a586
|
[
"Apache-2.0"
] | 6
|
2020-05-07T22:25:44.000Z
|
2021-01-15T21:53:16.000Z
|
beakerx_tabledisplay/beakerx_tabledisplay/__init__.py
|
fcollonval/beakerx_tabledisplay
|
0c05d69b5d1431953b372621dd1478661a77a586
|
[
"Apache-2.0"
] | 48
|
2020-05-20T09:55:37.000Z
|
2022-03-26T15:07:35.000Z
|
beakerx_tabledisplay/beakerx_tabledisplay/__init__.py
|
fcollonval/beakerx_tabledisplay
|
0c05d69b5d1431953b372621dd1478661a77a586
|
[
"Apache-2.0"
] | 5
|
2020-07-14T03:39:12.000Z
|
2022-02-23T08:18:13.000Z
|
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._version import version_info, __version__
from .commands import beakerx_parse
from .handlers import load_jupyter_server_extension
from .tabledisplay import *
from .tableitems import *
from .object import beakerx_tabledisplay
from beakerx_base import *
def _jupyter_nbextension_paths():
return [{
'section': 'notebook',
'src': 'static',
'dest': 'beakerx_tabledisplay',
'require': 'beakerx_tabledisplay/extension'
}]
def _jupyter_labextension_paths():
return [{
'src': 'labextension',
'dest': '@beakerx/beakerx-tabledisplay',
}]
def _jupyter_server_extension_paths():
return [dict(module="beakerx_tabledisplay")]
def run():
try:
beakerx_parse()
except KeyboardInterrupt:
return 130
return 0
| 28.408163
| 74
| 0.718391
|
97c68d2483789610ca03ede57f5af6c81a4f8302
| 421
|
py
|
Python
|
projfd/appfd/models/activation.py
|
FirstDraftGIS/firstdraft
|
2f1f2124c9c75b1b1d380a9b8b16e2dfb99db873
|
[
"Apache-2.0"
] | 10
|
2016-04-23T19:40:28.000Z
|
2021-09-27T19:06:45.000Z
|
projfd/appfd/models/activation.py
|
FirstDraftGIS/firstdraft
|
2f1f2124c9c75b1b1d380a9b8b16e2dfb99db873
|
[
"Apache-2.0"
] | 19
|
2016-06-22T03:22:45.000Z
|
2018-02-09T04:55:34.000Z
|
projfd/appfd/models/activation.py
|
FirstDraftGIS/firstdraft
|
2f1f2124c9c75b1b1d380a9b8b16e2dfb99db873
|
[
"Apache-2.0"
] | 1
|
2016-04-23T19:40:38.000Z
|
2016-04-23T19:40:38.000Z
|
#-*- coding: utf-8 -*-
from .base import Base
from django.contrib.gis.db.models import BooleanField, CASCADE, CharField, OneToOneField
from django.contrib.auth.models import User
class Activation(Base):
expired = BooleanField(default=False)
key = CharField(max_length=200)
notified_success = BooleanField(default=False)
used = BooleanField(default=False)
user = OneToOneField(User, on_delete=CASCADE)
| 38.272727
| 88
| 0.760095
|
c973cfa4bafcb20c7a5b5b4bd31a093991c368fb
| 408
|
py
|
Python
|
data/scripts/templates/object/ship/shared_xwing_tier1.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/ship/shared_xwing_tier1.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/ship/shared_xwing_tier1.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_xwing_tier1.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 24
| 55
| 0.703431
|
182869af39d8470c966ae1f394a3a8635b793864
| 19,287
|
py
|
Python
|
chat/ui_client.py
|
Adhesh148/BaBbLe
|
e86cd19a25759728dd731201b62de239efa2fc3f
|
[
"MIT"
] | 8
|
2020-09-23T10:30:46.000Z
|
2022-03-07T09:31:13.000Z
|
chat/ui_client.py
|
Ashwin-op/BaBbLe
|
e86cd19a25759728dd731201b62de239efa2fc3f
|
[
"MIT"
] | null | null | null |
chat/ui_client.py
|
Ashwin-op/BaBbLe
|
e86cd19a25759728dd731201b62de239efa2fc3f
|
[
"MIT"
] | 2
|
2020-11-30T04:25:15.000Z
|
2021-09-28T04:41:15.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'chat.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import sys
import os
import time
import socket
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import pyqtSignal, QThread
import threading
HEADER = 64
# PORT = 18521
# SERVER = "2.tcp.ngrok.io"
# ADDR = (SERVER,PORT)
FORMAT = 'utf-8'
DISCONNECT_MSG = "!END"
test_client = socket.socket()
class Ui_MainWindow(object):
global_client = socket.socket()
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(492, 846)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(21, 18, 50))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(37, 35, 49))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(21, 18, 50))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(37, 35, 49))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(190, 190, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(37, 35, 49))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(37, 35, 49))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
MainWindow.setPalette(palette)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(400, 760, 71, 41))
font = QtGui.QFont()
font.setFamily("UnYetgul")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("color:#004cf5;\n"
"background-color:#1f1e2b;\n"
"border-radius: 15px;")
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 20, 141, 41))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(190, 190, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(190, 190, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(190, 190, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("UnYetgul")
font.setPointSize(28)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(20, 760, 371, 41))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(195, 193, 197))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(195, 193, 197))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(195, 193, 197))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.HighlightedText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(195, 193, 197))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(195, 193, 197))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(195, 193, 197))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.HighlightedText, brush)
brush = QtGui.QBrush(QtGui.QColor(195, 193, 197, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(195, 193, 197))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(195, 193, 197))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(195, 193, 197))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 69, 91))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.HighlightedText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.lineEdit.setPalette(palette)
self.lineEdit.setStyleSheet("border-radius: 10px;\n"
"background-color: #43455b;\n"
"padding: 10px;\n"
"color: #c3c1c5;\n"
"font-size: 13px;")
self.lineEdit.setObjectName("lineEdit")
self.plainTextEdit = QtWidgets.QPlainTextEdit(self.centralwidget)
self.plainTextEdit.setGeometry(QtCore.QRect(20, 270, 451, 471))
self.plainTextEdit.setStyleSheet("background-color: #43455b;\n"
"color: white;\n"
"padding: 10px;\n"
"font-size: 13px;")
self.plainTextEdit.setReadOnly(True)
self.plainTextEdit.setObjectName("plainTextEdit")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(10, 240, 471, 20))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(30, 160, 61, 16))
font = QtGui.QFont()
font.setPointSize(11)
self.label_2.setFont(font)
self.label_2.setStyleSheet("color:white;")
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(50, 210, 31, 16))
font = QtGui.QFont()
font.setPointSize(11)
self.label_3.setFont(font)
self.label_3.setStyleSheet("color:white;")
self.label_3.setObjectName("label_3")
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setGeometry(QtCore.QRect(10, 70, 471, 20))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(100, 150, 181, 31))
self.lineEdit_2.setStyleSheet("border-radius: 10px;\n"
"background-color: #43455b;\n"
"padding: 5px;\n"
"color: #c3c1c5;\n"
"font-size: 13px;")
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setGeometry(QtCore.QRect(100, 200, 181, 31))
self.lineEdit_3.setStyleSheet("border-radius: 10px;\n"
"background-color: #43455b;\n"
"padding: 5px;\n"
"color: #c3c1c5;\n"
"font-size: 13px;")
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(10, 110, 91, 16))
font = QtGui.QFont()
font.setPointSize(11)
self.label_4.setFont(font)
self.label_4.setStyleSheet("color:white;")
self.label_4.setObjectName("label_4")
self.lineEdit_4 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_4.setGeometry(QtCore.QRect(100, 100, 181, 31))
self.lineEdit_4.setStyleSheet("border-radius: 10px;\n"
"background-color: #43455b;\n"
"padding: 5px;\n"
"color: #c3c1c5;\n"
"font-size: 13px;")
self.lineEdit_4.setObjectName("lineEdit_4")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(340, 120, 91, 41))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.pushButton_2.setFont(font)
self.pushButton_2.setStyleSheet("color:#004cf5;\n"
"background-color:#1f1e2b;\n"
"border-radius: 15px;")
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(340, 170, 91, 41))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.pushButton_3.setFont(font)
self.pushButton_3.setStyleSheet("color:#004cf5;\n"
"background-color:#1f1e2b;\n"
"border-radius: 15px;")
self.pushButton_3.setObjectName("pushButton_3")
self.lineEdit.raise_()
self.pushButton.raise_()
self.label.raise_()
self.plainTextEdit.raise_()
self.line.raise_()
self.label_2.raise_()
self.label_3.raise_()
self.line_2.raise_()
self.lineEdit_2.raise_()
self.lineEdit_3.raise_()
self.label_4.raise_()
self.lineEdit_4.raise_()
self.pushButton_2.raise_()
self.pushButton_3.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
# on startup
self.lineEdit.setEnabled(False)
self.pushButton.setEnabled(False)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# add a listener to "Join" button
self.pushButton_2.clicked.connect(lambda: self.connectToServer())
# add a listener to "Send" button
self.pushButton.clicked.connect(lambda: self.sendMessage())
# add a listener to "Leave" button
self.pushButton_3.clicked.connect(lambda: self.closeConn())
def connectToServer(self):
# Get connection details
username = self.lineEdit_4.text()
server = self.lineEdit_2.text()
port = (int)(self.lineEdit_3.text())
# Establish Connection
addr = (server,port)
client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect(addr)
self.global_client = client
# test the global client
global test_client
test_client = client
# On successful connection, Disable lineEdits
self.lineEdit_4.setEnabled(False)
self.lineEdit_2.setEnabled(False)
self.lineEdit_3.setEnabled(False)
self.pushButton_2.setEnabled(False)
self.lineEdit.setEnabled(True)
self.pushButton.setEnabled(True)
print(username,server,port)
# send the username to the server
client.send(username.encode(FORMAT))
# lets start a thread to continously listen to server
self.thread = ExecuteThread()
self.thread.start()
self.thread.my_signal.connect(self.appendToChat)
def appendToChat(self,msg):
self.plainTextEdit.appendPlainText(msg)
def closeConn(self):
print("close")
test_client.send(DISCONNECT_MSG.encode(FORMAT))
self.lineEdit.setEnabled(False)
self.pushButton.setEnabled(False)
time.sleep(1)
sys.exit()
def sendMessage(self):
# get the message in the lineEdit
msg = self.lineEdit.text()
# Send the msg to the server to be broadcasted
self.global_client.send(msg.encode(FORMAT))
# Clear the lineEdit
self.lineEdit.setText("")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "Send"))
self.label.setText(_translate("MainWindow", "Chat"))
self.lineEdit.setPlaceholderText(_translate("MainWindow", "Type a message"))
self.label_2.setText(_translate("MainWindow", "Server"))
self.label_3.setText(_translate("MainWindow", "Port"))
self.label_4.setText(_translate("MainWindow", "Username"))
self.pushButton_2.setText(_translate("MainWindow", "Join"))
self.pushButton_3.setText(_translate("MainWindow", "Leave"))
class ExecuteThread(QThread):
my_signal = pyqtSignal(str)
def run(self):
while 1:
# recevie message
msg = test_client.recv(4096).decode(FORMAT)
# If the received message is DISCONNECT_MSG then exit
if(msg == DISCONNECT_MSG):
closeConn(test_client)
# emit signal
self.my_signal.emit(msg)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 45.168618
| 88
| 0.679266
|
aa8e4092627663702e9df52551c5189436a97823
| 1,209
|
py
|
Python
|
main.py
|
amanda/randcamp
|
1e2a46c2bbae9456a595b796d7e5730661d24c91
|
[
"MIT"
] | null | null | null |
main.py
|
amanda/randcamp
|
1e2a46c2bbae9456a595b796d7e5730661d24c91
|
[
"MIT"
] | null | null | null |
main.py
|
amanda/randcamp
|
1e2a46c2bbae9456a595b796d7e5730661d24c91
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import json
def get_page_html(num):
url = f"https://bandcamp.com/artist_index?page={num}"
html = requests.get(url).text
return html
def get_artist_info(html):
soup = BeautifulSoup(html, 'html.parser')
names = soup.find_all(class_="item")
return [[n.get_text().strip(), n.find('a').get('href')] for n in names]
def get_artist_links(html):
soup = BeautifulSoup(html, 'html.parser')
names = soup.find_all(class_="item")
return [n.find('a').get('href') for n in names]
def get_bands_on_page(page_number):
return get_artist_links(get_page_html(page_number))
def write_file():
for i in range(1, 3449): # todo last page changes as more bands added
with open(f"results-{i}.txt", "w") as f:
bands = get_bands_on_page(i)
# print(bands)
f.write("\n".join(bands))
def write_json():
with open("results.txt") as fin:
dicty = {"bands": []}
for line in fin.readlines():
dicty["bands"].append(line.strip())
with open("results.json", "w") as fout:
json.dump(dicty, fout)
if __name__ == "__main__":
write_file()
# write_json()
| 25.1875
| 75
| 0.630273
|
ec5a9c34b785575c1af4f6cd07336300b34a698c
| 2,730
|
py
|
Python
|
scheduler/setup.py
|
Kami/google-cloud-python
|
a14ffbaa50f7823c2792e91413a37cbc3ce687f5
|
[
"Apache-2.0"
] | 1
|
2019-06-14T10:11:59.000Z
|
2019-06-14T10:11:59.000Z
|
scheduler/setup.py
|
Kami/google-cloud-python
|
a14ffbaa50f7823c2792e91413a37cbc3ce687f5
|
[
"Apache-2.0"
] | null | null | null |
scheduler/setup.py
|
Kami/google-cloud-python
|
a14ffbaa50f7823c2792e91413a37cbc3ce687f5
|
[
"Apache-2.0"
] | 1
|
2020-04-14T10:47:41.000Z
|
2020-04-14T10:47:41.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
# Package metadata.
name = "google-cloud-scheduler"
description = "Cloud Scheduler API API client library"
version = "1.1.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
"google-api-core[grpc] >= 1.6.0, < 2.0.0dev",
'enum34; python_version < "3.4"',
]
# Setup boilerplate below this line.
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
# Only include packages under the 'google' namespace. Do not include tests,
# benchmarks, etc.
packages = [
package for package in setuptools.find_packages() if package.startswith("google")
]
# Determine which namespaces are needed.
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="googleapis-packages@google.com",
license="Apache 2.0",
url="https://github.com/GoogleCloudPlatform/google-cloud-python",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
include_package_data=True,
zip_safe=False,
)
| 32.117647
| 85
| 0.689377
|
f809ebecd2f77cc3042cc11c130c8b897be36a6f
| 1,020
|
py
|
Python
|
V2RaycSpider1225/src/BusinessLogicLayer/plugins/accelerator/vulcan_ash.py
|
pgymail00/V2RayCloudSpider
|
d2222ed1374817f328dc378acb8dca21b06cc073
|
[
"MIT"
] | 1
|
2021-12-10T14:28:14.000Z
|
2021-12-10T14:28:14.000Z
|
V2RaycSpider1225/src/BusinessLogicLayer/plugins/accelerator/vulcan_ash.py
|
codemonkeyBeginner/V2RayCloudSpider
|
9cb8acc0bab3c81168256e9498f5a6a926396646
|
[
"MIT"
] | null | null | null |
V2RaycSpider1225/src/BusinessLogicLayer/plugins/accelerator/vulcan_ash.py
|
codemonkeyBeginner/V2RayCloudSpider
|
9cb8acc0bab3c81168256e9498f5a6a926396646
|
[
"MIT"
] | 1
|
2021-11-30T09:12:49.000Z
|
2021-11-30T09:12:49.000Z
|
"""
- 核心功能:
- “解压”与实例化采集器管理模块
- 加速器性能释放
"""
from BusinessCentralLayer.setting import logger, DEFAULT_POWER
from .core import CoroutineSpeedup
class ShuntRelease(CoroutineSpeedup):
"""accelerator性能释放关口"""
def __init__(
self, work_queue=None, task_docker: list = None, power: int = DEFAULT_POWER
):
super(ShuntRelease, self).__init__(
work_q=work_queue, task_docker=task_docker, power=power
)
def control_driver(self, task):
try:
task()
except Exception as e:
logger.exception(e)
class ForceRunRelease(CoroutineSpeedup):
"""collector管理器实例化关口"""
def __init__(self, task_docker: list = None, power: int = DEFAULT_POWER):
super(ForceRunRelease, self).__init__(task_docker=task_docker, power=power)
from src.BusinessLogicLayer.cluster.sailor import manage_task
self.core = manage_task
def control_driver(self, task):
self.core(class_=task, beat_sync=True, force_run=True)
| 26.153846
| 83
| 0.673529
|
ff81c9fbb03fb71c1e76f433d7db8c82cb55aac3
| 4,309
|
py
|
Python
|
function/python/brightics/function/statistics/cross_table.py
|
parkjh80/studio
|
6d8d8384272e5e1b2838b12e5557272a19408e89
|
[
"Apache-2.0"
] | 1
|
2020-02-08T10:56:29.000Z
|
2020-02-08T10:56:29.000Z
|
function/python/brightics/function/statistics/cross_table.py
|
data-weirdo/studio
|
48852c4f097f773ce3d408b59f79fda2e2d60470
|
[
"Apache-2.0"
] | null | null | null |
function/python/brightics/function/statistics/cross_table.py
|
data-weirdo/studio
|
48852c4f097f773ce3d408b59f79fda2e2d60470
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, pandasDF2MD
from brightics.function.utils import _model_dict
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.validation import raise_runtime_error
import numpy as np
import pandas as pd
def cross_table(table, group_by=None, **params):
check_required_parameters(_cross_table, params, ['table'])
if group_by is not None:
return _function_by_group(_cross_table, table, group_by=group_by, **params)
else:
return _cross_table(table, **params)
def _cross_table(table, input_cols_1, input_cols_2, result='N', margins=False):
df1 = [table[col] for col in input_cols_1]
df2 = [table[col] for col in input_cols_2]
# cross table
if result == 'N':
result_table = pd.crosstab(df1, df2, margins=margins)
elif result == 'N / Row Total':
result_table = pd.crosstab(df1, df2, margins=margins, normalize='index')
elif result == 'N / Column Total':
result_table = pd.crosstab(df1, df2, margins=margins, normalize='columns')
elif result == 'N / Total':
result_table = pd.crosstab(df1, df2, margins=margins, normalize='all')
else:
raise_runtime_error("Please check 'result'.")
# each row and column name
row_names = list(result_table.index)[:]
if len(input_cols_1) == 1:
joined_row_name = [str(i) for i in row_names]
else:
if margins == False:
joined_row_name = ['_'.join(str(s) for s in row_names[i]) for i in range(len(row_names))]
elif margins == True:
joined_row_name = ['_'.join(str(s) for s in row_names[i]) for i in range(len(row_names) - 1)] + [row_names[-1][0]]
column_names = list(result_table.columns)[:]
if len(input_cols_2) == 1:
joined_column_name = [str(i) for i in column_names]
else:
if margins == False:
joined_column_name = ['_'.join(str(s) for s in column_names[i]) for i in range(len(column_names))]
elif margins == True:
joined_column_name = ['_'.join(str(s) for s in column_names[i]) for i in range(len(column_names) - 1)] + [column_names[-1][0]]
# cross table
if result == 'N':
result_table.insert(loc=0, column=' ', value=joined_row_name)
result_table.columns = np.append('N', joined_column_name)
# cross table normalize by row
elif result == 'N / Row Total':
result_table.insert(loc=0, column=' ', value=joined_row_name)
result_table.columns = np.append('N / Row Total', joined_column_name)
# cross table normalize by column
elif result == 'N / Column Total':
result_table.insert(loc=0, column=' ', value=joined_row_name)
result_table.columns = np.append('N / Column Total', joined_column_name)
# cross table normalize by all values
elif result == 'N / Total':
result_table.insert(loc=0, column=' ', value=joined_row_name)
result_table.columns = np.append('N / Total', joined_column_name)
else:
raise_runtime_error("Please check 'result'.")
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Cross Table Result
| ### Result Type : {result}
|
| #### Result Table
|
| {result_table}
|
""".format(result=result, result_table=pandasDF2MD(result_table, num_rows=len(result_table.index) + 1))))
model = _model_dict('cross_table')
model['result'] = result
model['result_table'] = result_table
model['_repr_brtc_'] = rb.get()
return {'model': model}
| 40.271028
| 138
| 0.656301
|
6d03d1a742ef0150dff47550dd4df73aa6816049
| 2,499
|
py
|
Python
|
app.py
|
Ethan1498/Custom-URL-Shortener
|
405a23db5b2cb6900ab92a1b03998529c4173b50
|
[
"MIT"
] | null | null | null |
app.py
|
Ethan1498/Custom-URL-Shortener
|
405a23db5b2cb6900ab92a1b03998529c4173b50
|
[
"MIT"
] | null | null | null |
app.py
|
Ethan1498/Custom-URL-Shortener
|
405a23db5b2cb6900ab92a1b03998529c4173b50
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Flask
from flask import request
from flask import render_template
from flask import redirect
import sqlite3 as sql
import requests
app = Flask(__name__)
@app.route('/',methods=['GET'])
def hello_world():
return render_template('index.html')
@app.route('/s',methods=['GET','POST'])
def short():
if request.method=='POST':
longurl = request.form['longurl']
custom = request.form['custom']
if not longurl and custom:
return 'Error <script>alert("Invalid Credentials");</script>'
if longurl.startswith("http://" or "https://"):
pass
else:
longurl = str("http://"+str(longurl))
try:
r = requests.get(longurl)
if r.status_code == 200:
pass
else:
return 'Invalid URL <script>alert("Invalid URL");</script>'
except:
return '''Invalid URL <script>alert("Invalid URL");
var meta = document.createElement('meta');
meta.httpEquiv = "REFRESH";
meta.content = "0;URL=/";
document.getElementsByTagName('head')[0].appendChild(meta);
</script>'''
print (longurl)
print (custom)
conn = sql.connect('urls.db')
cursor = conn.cursor()
#print cursor.execute("SELECT * FROM urls;")
try:
cursor.execute("INSERT INTO urls(longurl,custom) VALUES (?,?);", (str(longurl),str(custom)))
except:
return '''Invalid/Already existing custom url <script>alert("Invalid/Already existing custom url");
var meta = document.createElement('meta');
meta.httpEquiv = "REFRESH";
meta.content = "0;URL=/";
document.getElementsByTagName('head')[0].appendChild(meta);
</script>'''
conn.commit()
conn.close()
url = "https://shrink-link/s/"+custom
return 'Live at <a target="_blank" href="'+url+'">'+url+'</a>'
return ""
@app.route('/s/<custom>',methods=['GET','POST'])
def final(custom):
conn = sql.connect('urls.db')
cursor = conn.cursor()
cursor.execute('SELECT * FROM urls WHERE custom=?;', (str(custom),))
#return_this = cursor.fetchall()
#return_this = [[str(item) for item in results] for results in cursor.fetchall()]
for row in cursor.fetchall():
return_this= row[0]
print (return_this)
return redirect(return_this,code=302)
if __name__ == '__main__':
app.run(port=5000)
| 27.766667
| 111
| 0.590236
|
3b84b68332c5ab4e64a14548d8329ad4ddddbf72
| 3,432
|
py
|
Python
|
explorer/services/worker/manager.py
|
cryptassic/dex-explorer
|
1588011db1666b8f1ffb6499d909e4eff3f6b09b
|
[
"MIT"
] | null | null | null |
explorer/services/worker/manager.py
|
cryptassic/dex-explorer
|
1588011db1666b8f1ffb6499d909e4eff3f6b09b
|
[
"MIT"
] | null | null | null |
explorer/services/worker/manager.py
|
cryptassic/dex-explorer
|
1588011db1666b8f1ffb6499d909e4eff3f6b09b
|
[
"MIT"
] | null | null | null |
import time
import threading
import time
from multiprocessing import Process, Queue
from explorer.utils.misc_helpers import get_sliced_range
from explorer.utils import CustomLogger
from explorer.models import WorkerTask
from explorer.services import Worker
class WorkerManager():
def __init__(self, callback_func, start_block: int, end_block: int,max_parallel:int=3):
self._logger = CustomLogger()
self._max_parallelism = max_parallel
self._callback_func = callback_func
self.workers = []
self.block_start = start_block
self.block_end = end_block
def __callback_service(self) -> None:
while True:
if not self.output_queue._closed:
if not self.output_queue.empty():
data_to_pass = self.output_queue.get()
self._callback_func(data_to_pass)
else:
time.sleep(1)
else:
break
def __start_callback_thread(self) -> None:
self._callback_thread = threading.Thread(target=self.__callback_service, args=())
self._callback_thread.start()
def __boot_workers(self) -> None:
for _ in range(self._max_parallelism):
worker = Worker(input_queue=self.input_queue, output_queue=self.output_queue)
w_process = Process(target=worker.start, args=())
self.workers.append(w_process)
[worker.start() for worker in self.workers]
def __start(self) -> None:
if hasattr(self,'input_queue'):
if self.input_queue and not self.input_queue.empty:
self._logger.warning(f"WorkerManager:Input Queue not empty! Keeping old queue")
else:
self.input_queue = Queue(
self._max_parallelism*2)
if hasattr(self,'output_queue'):
if self.output_queue and not self.output_queue.empty:
self._logger.warning(f"WorkerManager: Output Queue not empty! Keeping old queue")
else:
self.output_queue = Queue()
self.__start_callback_thread()
if len(self.workers):
self._logger.warning(f"WorkerManager: Found old workers. Overrding")
self.workers.clear()
self.__boot_workers()
if not len(self.workers):
raise Exception("Failed to launch workers")
def stop(self) -> None:
# Putting dead pill in queue
self.input_queue.put(None)
for worker in self.workers:
worker.join()
self._logger.info(f"Job Completed. Exiting...")
self.input_queue.close()
#This will close callback thread
self.output_queue.close()
def start(self) -> None:
self.__start()
starting_block = self.block_start
# Slicing big ranges to smaller chunks of self._max_parallelism size
for block_range_slice in get_sliced_range(start_block=starting_block, end_block=self.block_end, step=self._max_parallelism):
# Extend last slice to include last block
if block_range_slice == self.block_end:
block_range_slice += 1
for block_index in range(starting_block, block_range_slice):
task = WorkerTask(block_number=block_index)
self.input_queue.put(task)
starting_block = block_range_slice
| 33.320388
| 132
| 0.629662
|
4ec023f00ce83ba799cd5221b2d97a4a868f7fc4
| 14,240
|
py
|
Python
|
src/lib/models/networks/msra_resnet_fpn.py
|
evitself/CenterNet
|
db3714397c776f3f84c6ab9b61a47160f78462f5
|
[
"MIT"
] | null | null | null |
src/lib/models/networks/msra_resnet_fpn.py
|
evitself/CenterNet
|
db3714397c776f3f84c6ab9b61a47160f78462f5
|
[
"MIT"
] | null | null | null |
src/lib/models/networks/msra_resnet_fpn.py
|
evitself/CenterNet
|
db3714397c776f3f84c6ab9b61a47160f78462f5
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from typing import List
BN_MOMENTUM = 0.1
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetFpn(nn.Module):
def __init__(self, block, layers, heads, head_conv, **kwargs):
self.inplanes = 64
self.deconv_with_bias = False
self.heads = heads
super(ResNetFpn, self).__init__()
# multi stem
self.k3_conv = nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1,
bias=False)
self.k3_bn = nn.BatchNorm2d(32, momentum=BN_MOMENTUM)
self.k3_relu = nn.ReLU(inplace=True)
self.k3_maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.k7_conv = nn.Conv2d(3, 24, kernel_size=7, stride=2, padding=3,
bias=False)
self.k7_bn = nn.BatchNorm2d(24, momentum=BN_MOMENTUM)
self.k7_relu = nn.ReLU(inplace=True)
self.k7_maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.k11_conv = nn.Conv2d(3, 8, kernel_size=11, stride=2, padding=5,
bias=False)
self.k11_bn = nn.BatchNorm2d(8, momentum=BN_MOMENTUM)
self.k11_relu = nn.ReLU(inplace=True)
self.k11_maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1, l1_in, l1_out = self._make_layer(block, 64, layers[0])
self.layer2, l2_in, l2_out = self._make_layer(block, 128, layers[1], stride=2)
self.layer3, l3_in, l3_out = self._make_layer(block, 256, layers[2], stride=2)
self.layer4, l4_in, l4_out = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
# self.deconv_layers = self._make_deconv_layer(
# 3,
# [256, 256, 256],
# [4, 4, 4],
# )
self.deconv_layer1 = self._make_deconv_layer_one(l4_out, 256, 4)
self.deconv_layer2 = self._make_deconv_layer_one(256, 256, 4)
self.deconv_layer3 = self._make_deconv_layer_one(256, 256, 4)
self.deconv_layers = [
self.deconv_layer1, self.deconv_layer2, self.deconv_layer3
]
# self.final_layer = []
self.layer3_projection = self._make_fpn_projection_layer(l3_out, 256)
self.layer2_projection = self._make_fpn_projection_layer(l2_out, 256)
self.layer1_projection = self._make_fpn_projection_layer(l1_out, 256)
self.projection_layers = [
self.layer3_projection, self.layer2_projection, self.layer1_projection
]
for head in sorted(self.heads):
num_output = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(256, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, num_output,
kernel_size=1, stride=1, padding=0))
else:
fc = nn.Conv2d(
in_channels=256,
out_channels=num_output,
kernel_size=1,
stride=1,
padding=0
)
self.__setattr__(head, fc)
# self.final_layer = nn.ModuleList(self.final_layer)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
layer_in_ch = self.inplanes
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
layer_out_ch = self.inplanes
return nn.Sequential(*layers), int(layer_in_ch), int(layer_out_ch)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels) -> List[torch.nn.Sequential]:
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
deconv_blocks = []
for i in range(num_layers):
layers = []
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
deconv_blocks.append(nn.Sequential(*layers))
return deconv_blocks
def _make_deconv_layer_one(self, in_channels, out_channels, kernel):
kernel, padding, output_padding = self._get_deconv_cfg(kernel, 0)
layers = []
layers.append(
nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def _make_fpn_projection_layer(self, in_plains, out_plains):
layers = [
nn.Conv2d(
in_channels=in_plains,
out_channels=out_plains,
kernel_size=1,
stride=1,
padding=0,
bias=self.deconv_with_bias
)]
return nn.Sequential(*layers)
def forward(self, x):
x_k3 = self.k3_maxpool(self.k3_relu(self.k3_bn(self.k3_conv(x))))
x_k7 = self.k7_maxpool(self.k7_relu(self.k7_bn(self.k7_conv(x))))
x_k11 = self.k7_maxpool(self.k11_relu(self.k11_bn(self.k11_conv(x))))
x_cat = torch.cat((x_k3, x_k7, x_k11), 1)
l1 = self.layer1(x_cat)
p1 = self.layer1_projection(l1)
l2 = self.layer2(l1)
p2 = self.layer2_projection(l2)
l3 = self.layer3(l2)
p3 = self.layer3_projection(l3)
l4 = self.layer4(l3)
d1 = self.deconv_layer1(l4)
d2 = self.deconv_layer2(d1 + p3)
d3 = self.deconv_layer3(d2 + p2)
feature = d3 + p1
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(feature)
return [ret]
def init_weights(self, num_layers, pretrained=True):
for backbone_layer in (self.k3_conv, self.k3_bn, self.k7_conv, self.k7_bn,
self.k11_conv, self.k11_bn,
self.layer1, self.layer2, self.layer3, self.layer4):
for _, m in backbone_layer.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# print('=> init resnet deconv weights from normal distribution')
for deconv_layer in self.deconv_layers:
for _, m in deconv_layer.named_modules():
if isinstance(m, nn.ConvTranspose2d):
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
# print('=> init {}.weight as 1'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for proj_layer in self.projection_layers:
for _, m in proj_layer.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
# print('=> init final conv weights from normal distribution')
for head in self.heads:
final_layer = self.__getattr__(head)
for i, m in enumerate(final_layer.modules()):
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
if m.weight.shape[0] == self.heads[head]:
if 'hm' in head:
nn.init.constant_(m.bias, -2.19)
else:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
if pretrained:
# pretrained_state_dict = torch.load(pretrained)
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
# else:
# print('=> imagenet pretrained model dose not exist')
# print('=> please download it first')
# raise ValueError('imagenet pretrained model does not exist')
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_resnet_fpn(num_layers, heads, head_conv):
block_class, layers = resnet_spec[num_layers]
model = ResNetFpn(block_class, layers, heads, head_conv=head_conv)
model.init_weights(num_layers, pretrained=True)
return model
| 38.27957
| 100
| 0.56889
|
1069c773ae75dccc1862c813261c20a19dfca163
| 512
|
py
|
Python
|
src/hypermodern_python_tutorial/console.py
|
arwinlashawn/hypermodern-python-tutorial
|
94e266aab271d0da64d222ba8821766eca34578e
|
[
"MIT"
] | null | null | null |
src/hypermodern_python_tutorial/console.py
|
arwinlashawn/hypermodern-python-tutorial
|
94e266aab271d0da64d222ba8821766eca34578e
|
[
"MIT"
] | null | null | null |
src/hypermodern_python_tutorial/console.py
|
arwinlashawn/hypermodern-python-tutorial
|
94e266aab271d0da64d222ba8821766eca34578e
|
[
"MIT"
] | null | null | null |
import textwrap
import click
import requests
from . import __version__
API_URL = "https://en.wikipedia.org/api/rest_v1/page/random/summary"
@click.command()
@click.version_option(version=__version__)
def main():
"""The hypermodern Python project."""
with requests.get(API_URL) as response:
response.raise_for_status()
data = response.json()
title = data["title"]
extract = data["extract"]
click.secho(title, fg="green")
click.echo(textwrap.fill(extract))
| 17.655172
| 68
| 0.683594
|
6849578bbb9ac0aaf07f530ccd81bf3d876a1050
| 6,940
|
py
|
Python
|
tests/components/mfi/test_sensor.py
|
Hypfer/home-assistant
|
204ca3f3a6e24ef11ece2e2ee490a8d77553c147
|
[
"Apache-2.0"
] | 1
|
2019-12-06T08:49:19.000Z
|
2019-12-06T08:49:19.000Z
|
tests/components/mfi/test_sensor.py
|
FuqiangSong/home-assistant
|
d5419b77f9c245e5af006143eb55ae4dda3f174e
|
[
"Apache-2.0"
] | 2
|
2021-02-08T20:39:43.000Z
|
2021-09-08T01:36:57.000Z
|
tests/components/mfi/test_sensor.py
|
FuqiangSong/home-assistant
|
d5419b77f9c245e5af006143eb55ae4dda3f174e
|
[
"Apache-2.0"
] | null | null | null |
"""The tests for the mFi sensor platform."""
import unittest
import unittest.mock as mock
import requests
from mficlient.client import FailedToLogin
from homeassistant.setup import setup_component
import homeassistant.components.sensor as sensor
import homeassistant.components.mfi.sensor as mfi
from homeassistant.const import TEMP_CELSIUS
from tests.common import get_test_home_assistant
class TestMfiSensorSetup(unittest.TestCase):
"""Test the mFi sensor platform."""
PLATFORM = mfi
COMPONENT = sensor
THING = "sensor"
GOOD_CONFIG = {
"sensor": {
"platform": "mfi",
"host": "foo",
"port": 6123,
"username": "user",
"password": "pass",
"ssl": True,
"verify_ssl": True,
}
}
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
@mock.patch("homeassistant.components.mfi.sensor.MFiClient")
def test_setup_missing_config(self, mock_client):
"""Test setup with missing configuration."""
config = {"sensor": {"platform": "mfi"}}
assert setup_component(self.hass, "sensor", config)
assert not mock_client.called
@mock.patch("homeassistant.components.mfi.sensor.MFiClient")
def test_setup_failed_login(self, mock_client):
"""Test setup with login failure."""
mock_client.side_effect = FailedToLogin
assert not self.PLATFORM.setup_platform(self.hass, dict(self.GOOD_CONFIG), None)
@mock.patch("homeassistant.components.mfi.sensor.MFiClient")
def test_setup_failed_connect(self, mock_client):
"""Test setup with connection failure."""
mock_client.side_effect = requests.exceptions.ConnectionError
assert not self.PLATFORM.setup_platform(self.hass, dict(self.GOOD_CONFIG), None)
@mock.patch("homeassistant.components.mfi.sensor.MFiClient")
def test_setup_minimum(self, mock_client):
"""Test setup with minimum configuration."""
config = dict(self.GOOD_CONFIG)
del config[self.THING]["port"]
assert setup_component(self.hass, self.COMPONENT.DOMAIN, config)
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6443, use_tls=True, verify=True
)
@mock.patch("homeassistant.components.mfi.sensor.MFiClient")
def test_setup_with_port(self, mock_client):
"""Test setup with port."""
config = dict(self.GOOD_CONFIG)
config[self.THING]["port"] = 6123
assert setup_component(self.hass, self.COMPONENT.DOMAIN, config)
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6123, use_tls=True, verify=True
)
@mock.patch("homeassistant.components.mfi.sensor.MFiClient")
def test_setup_with_tls_disabled(self, mock_client):
"""Test setup without TLS."""
config = dict(self.GOOD_CONFIG)
del config[self.THING]["port"]
config[self.THING]["ssl"] = False
config[self.THING]["verify_ssl"] = False
assert setup_component(self.hass, self.COMPONENT.DOMAIN, config)
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6080, use_tls=False, verify=False
)
@mock.patch("homeassistant.components.mfi.sensor.MFiClient")
@mock.patch("homeassistant.components.mfi.sensor.MfiSensor")
def test_setup_adds_proper_devices(self, mock_sensor, mock_client):
"""Test if setup adds devices."""
ports = {
i: mock.MagicMock(model=model) for i, model in enumerate(mfi.SENSOR_MODELS)
}
ports["bad"] = mock.MagicMock(model="notasensor")
mock_client.return_value.get_devices.return_value = [
mock.MagicMock(ports=ports)
]
assert setup_component(self.hass, sensor.DOMAIN, self.GOOD_CONFIG)
for ident, port in ports.items():
if ident != "bad":
mock_sensor.assert_any_call(port, self.hass)
assert mock.call(ports["bad"], self.hass) not in mock_sensor.mock_calls
class TestMfiSensor(unittest.TestCase):
"""Test for mFi sensor platform."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.port = mock.MagicMock()
self.sensor = mfi.MfiSensor(self.port, self.hass)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_name(self):
"""Test the name."""
assert self.port.label == self.sensor.name
def test_uom_temp(self):
"""Test the UOM temperature."""
self.port.tag = "temperature"
assert TEMP_CELSIUS == self.sensor.unit_of_measurement
def test_uom_power(self):
"""Test the UOEM power."""
self.port.tag = "active_pwr"
assert "Watts" == self.sensor.unit_of_measurement
def test_uom_digital(self):
"""Test the UOM digital input."""
self.port.model = "Input Digital"
assert "State" == self.sensor.unit_of_measurement
def test_uom_unknown(self):
"""Test the UOM."""
self.port.tag = "balloons"
assert "balloons" == self.sensor.unit_of_measurement
def test_uom_uninitialized(self):
"""Test that the UOM defaults if not initialized."""
type(self.port).tag = mock.PropertyMock(side_effect=ValueError)
assert "State" == self.sensor.unit_of_measurement
def test_state_digital(self):
"""Test the digital input."""
self.port.model = "Input Digital"
self.port.value = 0
assert mfi.STATE_OFF == self.sensor.state
self.port.value = 1
assert mfi.STATE_ON == self.sensor.state
self.port.value = 2
assert mfi.STATE_ON == self.sensor.state
def test_state_digits(self):
"""Test the state of digits."""
self.port.tag = "didyoucheckthedict?"
self.port.value = 1.25
with mock.patch.dict(mfi.DIGITS, {"didyoucheckthedict?": 1}):
assert 1.2 == self.sensor.state
with mock.patch.dict(mfi.DIGITS, {}):
assert 1.0 == self.sensor.state
def test_state_uninitialized(self):
"""Test the state of uninitialized sensors."""
type(self.port).tag = mock.PropertyMock(side_effect=ValueError)
assert mfi.STATE_OFF == self.sensor.state
def test_update(self):
"""Test the update."""
self.sensor.update()
assert self.port.refresh.call_count == 1
assert self.port.refresh.call_args == mock.call()
| 37.513514
| 88
| 0.646974
|
09758795cd0dbf301f57a6b93a4722caa3245e0c
| 11,493
|
py
|
Python
|
010train_model_10_preprocess.py
|
qiufengdiewu/LPInsider
|
92fcc2ad9e05cb634c4e3f1accd1220b984a027d
|
[
"Apache-2.0"
] | null | null | null |
010train_model_10_preprocess.py
|
qiufengdiewu/LPInsider
|
92fcc2ad9e05cb634c4e3f1accd1220b984a027d
|
[
"Apache-2.0"
] | null | null | null |
010train_model_10_preprocess.py
|
qiufengdiewu/LPInsider
|
92fcc2ad9e05cb634c4e3f1accd1220b984a027d
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
import pandas as pd
import numpy as np
import gensim
from sklearn.svm import SVC
from sklearn import preprocessing
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
import xgboost
import lightgbm
from sklearn.metrics.pairwise import pairwise_distances
import joblib
# 计算词向量
# 包括计算对应的位置特征
def get_sent_vec(size, npLength, sent, model, model_train, lncRNA,protein,length_POS,sent_POS,POS_classified0,POS_matrix,length_classified):
vec = []
sent = str(sent).replace(',', ' ')
sent = sent.replace('(', ' ')
sent = sent.replace(')', ' ')
sent = sent.replace("'", ' ')
sent = sent.replace('.', ' ')
sent = sent.replace(':', ' ')
sent = sent.replace(']', ' ')
sent = sent.replace('[', ' ')
sent = sent.replace('/', ' ')
words = sent.split(" ")
for word in words:
try:
vec_word = model[word].reshape(1, size)
vec = np.append(vec, vec_word)
npLength -= 1
except:
try:
vec_word = model_train[word].reshape(1, size)
vec = np.append(vec, vec_word)
npLength -= 1
except:
continue
while npLength >= 0:
vec = np.append(vec, np.zeros(size).reshape(1, size))
npLength -= 1
# 计算位置特征
matrix = np.zeros((1, 6))
lncRNA_matrix = matrix[0]
protein_matrix = matrix[0]
if lncRNA == "5'aHIF1alpha":
words[words.index('aHIF1alpha')] = "5'aHIF1alpha"
try:
lncRNA_location = words.index(lncRNA)
except:
lncRNA_location=-1
try:
protein_location = words.index(protein)
except:
protein_location=-1
try:
lncRNA_w2v = model_train[lncRNA]
protein_w2v = model_train[protein]
count = 0
# 计算lncRNA的距离矩阵
for i in range(lncRNA_location - 1, -1, -1):
try:
word_w2v = model_train[words[i]]
lncRNA_matrix[2 - count] = pairwise_distances([lncRNA_w2v, word_w2v])[0][1]
count += 1
if count >= 3:
break
except:
pass
count = 0
for i in range(lncRNA_location + 1, len(words)):
try:
word_w2v = model_train[words[i]]
lncRNA_matrix[3 + count] = pairwise_distances([lncRNA_w2v, word_w2v])[0][1]
count += 1
if count >= 3:
break
except:
pass
# 计算protein的距离矩阵
# 这里可以写成一个函数,减少行数,but我没改。emmm
count = 0
for i in range(protein_location - 1, -1, -1):
try:
word_w2v = model_train[words[i]]
protein_matrix[2 - count] = pairwise_distances([protein_w2v, word_w2v])[0][1]
count += 1
if count >= 3:
break
except:
pass
count = 0
for i in range(protein_location + 1, len(words)):
try:
word_w2v = model_train[words[i]]
protein_matrix[3 + count] = pairwise_distances([protein_w2v, word_w2v])[0][1]
count += 1
if count >= 3:
break
except:
pass
except:
pass
######计算词性特征
vec_POS=[]
words_POS=str(sent_POS).split(" ")
for word_POS in words_POS:
for i in range(length_classified):
if str(word_POS)==str(POS_classified0[i]):
vec_POS=np.append(vec_POS,POS_matrix[i])
length_POS-=1
break
while length_POS >= 0:
vec_POS = np.append(vec_POS,np.zeros(length_classified).reshape(1,length_classified))
length_POS -= 1
#####################
vec = nomalization(vec)
lncRNA_matrix = nomalization(lncRNA_matrix)
protein_matrix = nomalization(protein_matrix)
vec_POS = nomalization(vec_POS)
vec = np.concatenate((vec, lncRNA_matrix, protein_matrix, vec_POS), axis=0)
return vec
# 训练模型
X = pd.read_csv("./out/007X_with_entity_and_stanford_parser_preprocess.txt", sep='\t', header=None,
encoding='ISO-8859-1') #######
_025POS_transform_to_unite=pd.read_csv("./in/025POS_transform_to_unite_preprocess.txt",sep="\t",header=None,encoding="utf-8")
y = np.load('./out/007X_with_entity_and_stanford_parser_preprocess.npy') ####
f_svm = open("./out/results/010svm_10_preprocess.txt", 'a+') ###################################################
f_LogisticR = open("./out/results/010LogisticR_10_preprocess.txt", 'a+') ###################################################
f_RandomF = open('./out/results/010RandomF_10_preprocess.txt', 'a+') ###################################################
f_xgboost = open('./out/results/010xgboost_10_preprocess.txt', 'a+') ###################################################)
f_lightGBM = open('./out/results/010lightGBM_10_preprocess.txt', 'a+') ###################################################)
def train(X, y, count):
# 导入模型
word2vec_path = "I:/Word2vecModel/wikipedia-pubmed-and-PMC-w2v.bin"
word2vec_path_train = "./out/03721Word2vec_word2vec_format_model"
model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
model_train = gensim.models.word2vec.Word2Vec.load(word2vec_path_train)
for c in range(10):###################
sentX = []
sentX_POS=[]
length = 0
length_POS=0
for i in range(0, len(X), 1):
sentX.append(X[2][i])
for sent in sentX:
sent = str(sent).replace(',', ' ')
sent = sent.replace('(', ' ')
sent = sent.replace(')', ' ')
sent = sent.replace("'", ' ')
sent = sent.replace('.', ' ')
sent = sent.replace(':', ' ')
sent = sent.replace(']', ' ')
sent = sent.replace('[', ' ')
sent = sent.replace('/', ' ')
words = sent.split(" ")
if len(words) > length:
length = len(words)#########小样本数据集的单词最大长度是97
print("length"+str(length))
for i in range(len(_025POS_transform_to_unite)):
sentX_POS.append(_025POS_transform_to_unite[2][i])
for sent in sentX_POS:
words=str(sent).split(" ")
if len(words) > length_POS:
length_POS = len(words)
print("length_POS" + str(length_POS))
lncRNAs=[]
proteins=[]
for i in range(len(X)):
lncRNAs.append(X[0][i])
proteins.append(X[1][i])
XX = []
############计算词性矩阵,例如NN:[0,1,0,0,0,0,0,0,0,0,0]
POS_classified = pd.read_csv("./in/POS_classified.txt", sep='\t', header=None)
length_classified = len(POS_classified)
POS_classified0 = POS_classified[0]
POS_matrix=np.zeros((length_classified,length_classified))
for i in range(length_classified):
POS_matrix[i][i] = 1
###############
for i in range(len(sentX)):
sent=sentX[i]
sent_POS=sentX_POS[i]
XX.append([get_sent_vec(200, length, sent, model, model_train,X[0][i],X[1][i],length_POS,sent_POS,POS_classified0,POS_matrix,length_classified)])
#i += 1
XX = np.concatenate(XX)
####################################
floder = KFold(n_splits=10, random_state=5 * c, shuffle=True)
for train_loc, test_loc in floder.split(XX, y):
train_vec = XX[train_loc]
y_train = y[train_loc]
test_vec = XX[test_loc]
y_test = y[test_loc]
print("lightGBM")
# lightGBM############################################
LGBM = lightgbm.LGBMClassifier()
LGBM.fit(train_vec, y_train)
accuracy_LGBM = LGBM.score(test_vec, y_test)
predict_LGBM = LGBM.predict(test_vec)
precision_LGBM = metrics.precision_score(y_test, predict_LGBM)
recall_LGBM = metrics.recall_score(y_test, predict_LGBM)
f1_LGBM = metrics.f1_score(y_test, predict_LGBM)
###################
joblib.dump(LGBM, './out/010LGBM_model.pkl')
print("joblib.dump(LGBM, './out/010LGBM_model.pkl')")
f_lightGBM.write(str(accuracy_LGBM) + '\t')
f_lightGBM.write(str(precision_LGBM) + '\t' + str(recall_LGBM) + '\t' + str(f1_LGBM) + '\n')
# xgboost###############################################
reg = xgboost.XGBClassifier(silent=1)
reg.fit(train_vec, y_train)
accuracy_XGB = reg.score(test_vec, y_test)
predict_XGB = reg.predict(test_vec)
precision_XGB = metrics.precision_score(y_test, predict_XGB)
recall_XGB = metrics.recall_score(y_test, predict_XGB)
f1_XGB = metrics.f1_score(y_test, predict_XGB)
f_xgboost.write(str(accuracy_XGB) + '\t')
f_xgboost.write(str(precision_XGB) + '\t' + str(recall_XGB) + '\t' + str(f1_XGB) + '\n')
# svm###################################################
clf_svm = SVC(kernel='rbf', verbose=True, C=10)
clf_svm.fit(train_vec, y_train)
accuracy_SVM = clf_svm.score(test_vec, y_test)
predict = clf_svm.predict(test_vec)
precision_SVM = metrics.precision_score(y_test, predict)
recall_SVM = metrics.recall_score(y_test, predict)
f1_SVM = metrics.f1_score(y_test, predict)
f_svm.write(str(accuracy_SVM) + '\t')
f_svm.write(str(precision_SVM) + '\t' + str(recall_SVM) + '\t' + str(f1_SVM) + '\n')
# 逻辑回归##################################################
clf_LogR = LogisticRegression(C=100, max_iter=200)
clf_LogR.fit(train_vec, y_train)
accuracy_LogR = clf_LogR.score(test_vec, y_test)
predict_logR = clf_LogR.predict(test_vec)
precision_logR = metrics.precision_score(y_test, predict_logR)
recall_logR = metrics.recall_score(y_test, predict_logR)
f1_logR = metrics.f1_score(y_test, predict_logR)
f_LogisticR.write(str(accuracy_LogR) + '\t')
f_LogisticR.write(str(precision_logR) + '\t' + str(recall_logR) + '\t' + str(f1_logR) + '\n')
# RandomForestClassifier ##################################################
forest = RandomForestClassifier(criterion='entropy', n_estimators=1000)
forest.fit(train_vec, y_train)
acc_RF = forest.score(test_vec, y_test)
predict_RF = forest.predict(test_vec)
precision_RF = metrics.precision_score(y_test, predict_RF)
recall_RF = metrics.recall_score(y_test, predict_RF)
f1_RF = metrics.f1_score(y_test, predict_RF)
f_RandomF.write(str(acc_RF) + '\t')
f_RandomF.write(str(precision_RF) + '\t' + str(recall_RF) + '\t' + str(f1_RF) + '\n')
count += 1
print("#################success:" + str(int(c) + 1) + ' ' + str(count))
def nomalization(X):
return preprocessing.scale(X, axis=0)
count = 0
train(X, y, count)
f_svm.close()
f_LogisticR.close()
f_RandomF.close()
f_xgboost.close()
f_lightGBM.close()
| 38.567114
| 157
| 0.541982
|
ee81a941b2355a6c775d591c3a7c0df2d0a4cac8
| 5,442
|
py
|
Python
|
xeno/model.py
|
sourcery-ai-bot/xeno
|
df7e9448b681024fae7a899bb2060b9bcda84ecf
|
[
"MIT"
] | null | null | null |
xeno/model.py
|
sourcery-ai-bot/xeno
|
df7e9448b681024fae7a899bb2060b9bcda84ecf
|
[
"MIT"
] | null | null | null |
xeno/model.py
|
sourcery-ai-bot/xeno
|
df7e9448b681024fae7a899bb2060b9bcda84ecf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Linear stack of layers.
"""
import sys
import numpy as np
from xeno.utils.random import get_rng
from .layers import Layer
from .optimizers import SGD
from . import optimizers
from xeno.utils.random import get_dtype
from .objectives import SoftmaxCategoricalCrossEntropy
from . import objectives
class Model(object):
def __init__(self, layers=None):
self.layers = [] if layers is None else layers
self.loss = None
self.optimizer = None
def add(self, layer):
assert isinstance(layer, Layer), "Must be 'Layer' instance."
self.layers.append(layer)
def compile(self, loss=SoftmaxCategoricalCrossEntropy(), optimizer=SGD()):
# check
# assert isinstance(self.layers[0], InputLayer)
self.layers[0].first_layer = True
# connect to
next_layer = None
for layer in self.layers:
layer.connect_to(next_layer)
next_layer = layer
# for pre_layer, layer in zip(self.layers[:-1], self.layers[1:]):
# layer.connect_to(pre_layer)
# get loss class
self.loss = objectives.get(loss)
# get optimizer class
self.optimizer = optimizers.get(optimizer)
def fit(self, X, Y, max_iter=100, batch_size=64, shuffle=True,
validation_split=0., validation_data=None, file=sys.stdout):
# prepare data
train_X = X.astype(get_dtype()) if np.issubdtype(np.float64, X.dtype) else X
train_Y = Y.astype(get_dtype()) if np.issubdtype(np.float64, Y.dtype) else Y
if 1. > validation_split > 0.:
split = int(train_Y.shape[0] * validation_split)
valid_X, valid_Y = train_X[-split:], train_Y[-split:]
train_X, train_Y = train_X[:-split], train_Y[:-split]
elif validation_data is not None:
valid_X, valid_Y = validation_data
else:
valid_X, valid_Y = None, None
for iter_idx in range(1, max_iter + 1):
# shuffle
if shuffle:
seed = get_rng().randint(111, 1111111)
np.random.seed(seed)
np.random.shuffle(train_X)
np.random.seed(seed)
np.random.shuffle(train_Y)
# train
train_losses, train_predicts, train_targets = [], [], []
for b in range(train_Y.shape[0] // batch_size):
batch_begin = b * batch_size
batch_end = batch_begin + batch_size
x_batch = train_X[batch_begin:batch_end]
y_batch = train_Y[batch_begin:batch_end]
# forward propagation
y_pred = self.predict(x_batch)
# backward propagation
next_grad = self.loss.backward(y_pred, y_batch)
for layer in self.layers[::-1]:
next_grad = layer.backward(next_grad)
# get parameter and gradients
params = []
grads = []
for layer in self.layers:
params += layer.params
grads += layer.grads
# update parameters
self.optimizer.update(params, grads)
# got loss and predict
train_losses.append(self.loss.forward(y_pred, y_batch))
train_predicts.extend(y_pred)
train_targets.extend(y_batch)
# output train status
runout = "iter %d, train-[loss %.4f, acc %.4f]; " % (
iter_idx, float(np.mean(train_losses)), float(self.accuracy(train_predicts, train_targets)))
# runout = "iter %d, train-[loss %.4f, ]; " % (
# iter_idx, float(np.mean(train_losses)))
if valid_X is not None and valid_Y is not None:
# valid
valid_losses, valid_predicts, valid_targets = [], [], []
for b in range(valid_X.shape[0] // batch_size):
batch_begin = b * batch_size
batch_end = batch_begin + batch_size
x_batch = valid_X[batch_begin:batch_end]
y_batch = valid_Y[batch_begin:batch_end]
# forward propagation
y_pred = self.predict(x_batch)
# got loss and predict
valid_losses.append(self.loss.forward(y_pred, y_batch))
valid_predicts.extend(y_pred)
valid_targets.extend(y_batch)
# output valid status
runout += "valid-[loss %.4f, acc %.4f]; " % (
float(np.mean(valid_losses)), float(self.accuracy(valid_predicts, valid_targets)))
print(runout, file=file)
def predict(self, X):
""" Calculate an output Y for the given input X. """
x_next = X
for layer in self.layers[:]:
x_next = layer.forward(x_next)
return x_next
def accuracy(self, outputs, targets):
y_predicts = np.argmax(outputs, axis=1)
y_targets = np.argmax(targets, axis=1)
acc = y_predicts == y_targets
return np.mean(acc)
# acc = 0
# for i in range(y_targets.shape[0]):
# if y_targets[i] == y_predicts[i]:
# acc += 1
# return acc / y_targets.shape[0]
def evaluate(self, X, Y):
raise NotImplementedError()
| 34.443038
| 108
| 0.557883
|
0be4a25a2f23dd134f5ee848dd00ece8e15d4599
| 9,949
|
py
|
Python
|
tests/test_fellowships.py
|
oleksost/continuum
|
682d66540bfbfa171ac73281ed2989f9338e88bf
|
[
"MIT"
] | null | null | null |
tests/test_fellowships.py
|
oleksost/continuum
|
682d66540bfbfa171ac73281ed2989f9338e88bf
|
[
"MIT"
] | null | null | null |
tests/test_fellowships.py
|
oleksost/continuum
|
682d66540bfbfa171ac73281ed2989f9338e88bf
|
[
"MIT"
] | null | null | null |
import os
import copy
import pytest
import numpy as np
from torch.utils.data import DataLoader
import torchvision.transforms as trsf
from continuum.tasks import TaskType
from continuum.scenarios import ClassIncremental, InstanceIncremental, OnlineFellowship
from continuum.datasets import (
CIFAR10, CIFAR100, KMNIST, MNIST, CIFARFellowship, FashionMNIST, Fellowship, MNISTFellowship,
InMemoryDataset, Fellowship, Core50
)
DATA_PATH = os.environ.get("CONTINUUM_DATA_PATH")
@pytest.fixture
def dataset7c():
return InMemoryDataset(*gen_dataset(7, 0))
@pytest.fixture
def dataset10c():
return InMemoryDataset(*gen_dataset(10, 1))
@pytest.fixture
def dataset20c():
return InMemoryDataset(*gen_dataset(20, 2))
@pytest.fixture
def dataset20c_3channels():
return InMemoryDataset(*gen_dataset_3channels(20, 2))
def gen_dataset(nb_classes, pixel_value):
nb_items_per_class = 5
x_train = np.ones((nb_items_per_class * nb_classes, 32, 32, 3)) * pixel_value
y_train = []
for i in range(nb_classes):
y_train.append(np.ones(nb_items_per_class, dtype=np.int64) * i)
y_train = np.concatenate(y_train)
return (x_train, y_train)
def gen_dataset_3channels(nb_classes, pixel_value):
nb_items_per_class = 5
x_train = np.ones((nb_items_per_class * nb_classes, 32, 32, 3)) * pixel_value
y_train = []
for i in range(nb_classes):
y_train.append(np.ones(nb_items_per_class, dtype=np.int64) * i)
y_train = np.concatenate(y_train)
return (x_train, y_train)
@pytest.mark.parametrize("increment", [1, [7, 10, 20]])
def test_inMemory_updateLabels_Fellowship(increment, dataset7c, dataset10c, dataset20c):
fellow = Fellowship([dataset7c, dataset10c, dataset20c], update_labels=True)
x, y, t = fellow.get_data()
assert len(np.unique(t)) == 3
assert len(np.unique(y)) == 37
if isinstance(increment, list):
continuum = ClassIncremental(fellow, increment=increment)
assert continuum.nb_classes == 37
assert continuum.nb_tasks == len(increment)
else:
continuum = ClassIncremental(fellow, increment=increment)
assert continuum.nb_tasks == 37
assert continuum.nb_classes == 37
def test_Online_Fellowship(dataset7c, dataset10c, dataset20c):
scenario = OnlineFellowship([dataset7c, dataset10c, dataset20c])
for i, task_set in enumerate(scenario):
if i == 0:
assert task_set.nb_classes == 7
if i == 1:
assert task_set.nb_classes == 10
if i == 2:
assert task_set.nb_classes == 20
assert scenario[0].nb_classes == 7
assert scenario[1].nb_classes == 10
assert scenario[2].nb_classes == 20
@pytest.mark.parametrize("types,error", (
[[TaskType.IMAGE_PATH], False],
[[TaskType.H5, TaskType.IMAGE_PATH, TaskType.IMAGE_ARRAY, TaskType.TENSOR], False],
[[TaskType.H5, TaskType.IMAGE_PATH, TaskType.IMAGE_ARRAY, TaskType.TENSOR, TaskType.SEGMENTATION], True],
[[TaskType.H5, TaskType.IMAGE_PATH, TaskType.IMAGE_ARRAY, TaskType.TENSOR, TaskType.TEXT], True],
[[TaskType.H5, TaskType.IMAGE_PATH, TaskType.IMAGE_ARRAY, TaskType.TENSOR, TaskType.OBJ_DETECTION], True],
[[TaskType.SEGMENTATION, TaskType.OBJ_DETECTION], True],
[[TaskType.SEGMENTATION], False],
))
def test_online_Fellowship_mixeddatatype(dataset10c, types, error):
datasets = []
for typ in types:
d = copy.deepcopy(dataset10c)
d._data_type = typ
d._nb_classes = 10
datasets.append(d)
if error:
with pytest.raises(ValueError):
scenario = OnlineFellowship(datasets)
else:
scenario = OnlineFellowship(datasets)
@pytest.mark.slow
@pytest.mark.parametrize(
"list_datasets", [
([MNIST, FashionMNIST]),
([KMNIST, MNIST, FashionMNIST]),
([CIFAR10, CIFAR100, KMNIST, MNIST, FashionMNIST]),
]
)
def test_online_Fellowship_inMemory(list_datasets):
list_dict_args = {"data_path": DATA_PATH, "train": True, "download": False}
list_instanciate_datasets = []
for dataset in list_datasets:
list_instanciate_datasets.append(dataset(**list_dict_args))
scenario = OnlineFellowship(list_instanciate_datasets, update_labels=True)
assert len(scenario) == len(list_datasets)
tot_nb_classes = 0
for task_id, taskset in enumerate(scenario):
tot_nb_classes += taskset.nb_classes
loader = DataLoader(taskset)
_, _, _ = next(iter(loader))
assert tot_nb_classes == scenario.nb_classes
@pytest.mark.slow
@pytest.mark.parametrize(
"list_datasets", [
([Core50, CIFAR10])
]
)
def test_online_Fellowship_mix_path_array(list_datasets):
list_dict_args = [{"data_path": DATA_PATH, "train": True, "download": False}] * len(list_datasets)
list_instanciate_datasets = []
for i, dataset in enumerate(list_datasets):
list_instanciate_datasets.append(dataset(**list_dict_args[i]))
scenario = OnlineFellowship(list_instanciate_datasets, update_labels=True)
assert len(scenario) == len(list_datasets)
tot_nb_classes = 0
for task_id, taskset in enumerate(scenario):
tot_nb_classes += taskset.nb_classes
loader = DataLoader(taskset)
_, _, _ = next(iter(loader))
assert tot_nb_classes == scenario.nb_classes
@pytest.mark.parametrize(
"transformations", [
([trsf.Resize(size=(16, 16)), trsf.ToTensor()]), #single for all
([[trsf.ToTensor()], [trsf.ToTensor()], [trsf.ToTensor()]]) # one each
]
)
def test_online_Fellowship_transformation(dataset7c, dataset10c, dataset20c, transformations):
scenario = OnlineFellowship([dataset7c, dataset10c, dataset20c], transformations=transformations)
assert len(scenario) == 3
tot_nb_classes = 0
for task_id, taskset in enumerate(scenario):
tot_nb_classes += taskset.nb_classes
loader = DataLoader(taskset)
_, _, _ = next(iter(loader))
assert tot_nb_classes == scenario.nb_classes
def test_online_Fellowship_transformation2(dataset7c, dataset10c, dataset20c):
sizes = [16, 24, 40]
transformations = [[trsf.Resize(size=(sizes[0], sizes[0])), trsf.ToTensor()],
[trsf.Resize(size=(sizes[1], sizes[1])), trsf.ToTensor()],
[trsf.Resize(size=(sizes[2], sizes[2])), trsf.ToTensor()]]
scenario = OnlineFellowship([dataset7c, dataset10c, dataset20c], transformations=transformations)
for task_id, taskset in enumerate(scenario):
loader = DataLoader(taskset)
x, _, _ = next(iter(loader))
assert x.shape[-1] == sizes[task_id]
@pytest.mark.parametrize("increment", [1, [7, 10, 20]])
def test_inMemory_keepLabels_Fellowship(increment, dataset7c, dataset10c, dataset20c):
fellow = Fellowship([dataset7c, dataset10c, dataset20c], update_labels=False)
x, y, t = fellow.get_data()
assert len(np.unique(t)) == 3
assert len(np.unique(y)) == 20
if isinstance(increment, list):
with pytest.raises(Exception):
scenario = ClassIncremental(fellow, increment=increment)
else:
scenario = ClassIncremental(fellow, increment=increment)
assert scenario.nb_classes == 20
assert scenario.nb_tasks == 20
@pytest.mark.parametrize("update_labels,nb_tasks", [
(True, 0),
(True, 3),
(False, 0),
(False, 3),
])
def test_inMemory_Fellowship(update_labels, nb_tasks, dataset7c, dataset10c, dataset20c):
fellow = Fellowship([dataset7c, dataset10c, dataset20c], update_labels=update_labels)
continuum = InstanceIncremental(fellow, nb_tasks=nb_tasks)
assert continuum.nb_tasks == 3
@pytest.mark.slow
@pytest.mark.parametrize("nb_tasks", [0, 3])
def test_MNIST_Fellowship_Instance_Incremental(nb_tasks, tmpdir):
dataset = MNISTFellowship(data_path=tmpdir, train=True, download=True)
dataset.get_data()
continuum = InstanceIncremental(dataset, nb_tasks=nb_tasks)
assert len(continuum) == 3
@pytest.mark.slow
def test_MNIST_Fellowship_nb_classes(tmpdir):
dataset = MNISTFellowship(data_path=tmpdir, train=True, download=True)
x, y, t = dataset.get_data()
assert len(np.unique(y)) == 30
dataset = MNISTFellowship(data_path=tmpdir, train=True, download=True, update_labels=False)
x, y, t = dataset.get_data()
assert len(np.unique(y)) == 10
@pytest.mark.slow
def test_MNIST_Fellowship(tmpdir):
dataset = MNISTFellowship(data_path=tmpdir, train=True, download=True)
dataset.get_data()
continuum = ClassIncremental(dataset, increment=10)
assert len(continuum) == 3
@pytest.mark.slow
def test_CIFAR_Fellowship(tmpdir):
cl_dataset = CIFARFellowship(data_path=tmpdir, train=True, download=True)
scenario = ClassIncremental(cl_dataset, increment=10)
assert len(scenario) == 11
@pytest.mark.slow
@pytest.mark.parametrize(
"list_datasets,nb_tasks", [
([MNIST, FashionMNIST], 2),
([KMNIST, MNIST, FashionMNIST], 3),
([CIFAR10, CIFAR100], 11),
]
)
def test_Fellowship_classes(tmpdir, list_datasets, nb_tasks):
cl_dataset = Fellowship(
datasets=[d(data_path=tmpdir, download=True, train=True) for d in list_datasets]
)
scenario = ClassIncremental(cl_dataset, increment=10)
assert len(scenario) == nb_tasks
for task_id, taskset in enumerate(scenario):
classes = taskset.get_classes()
# we check if all classes are here
assert len(classes) == (classes.max() - classes.min() + 1)
@pytest.mark.slow
@pytest.mark.parametrize("list_datasets", [[MNIST, CIFAR10]])
def test_Fellowship_Dimension_Fail(tmpdir, list_datasets):
cl_dataset = Fellowship(
datasets=[d(data_path=tmpdir, download=True, train=True) for d in list_datasets]
)
# This does not work since CIFAR10 and MNIST data are not same shape
with pytest.raises(ValueError):
continuum = ClassIncremental(cl_dataset, increment=10)
| 32.619672
| 110
| 0.699266
|
566c645e15c57d77c5cad22956ee9d60863d8e37
| 16,509
|
py
|
Python
|
neutron/plugins/vmware/nsxlib/switch.py
|
gampel/neutron
|
51a6260266dc59c066072ca890ad9c40b1aad6cf
|
[
"Apache-2.0"
] | 10
|
2015-09-22T10:22:53.000Z
|
2016-02-25T06:12:05.000Z
|
neutron/plugins/vmware/nsxlib/switch.py
|
gampel/neutron
|
51a6260266dc59c066072ca890ad9c40b1aad6cf
|
[
"Apache-2.0"
] | 12
|
2015-01-08T18:30:45.000Z
|
2015-03-13T21:04:15.000Z
|
neutron/plugins/vmware/nsxlib/switch.py
|
gampel/neutron
|
51a6260266dc59c066072ca890ad9c40b1aad6cf
|
[
"Apache-2.0"
] | 7
|
2015-02-05T10:23:52.000Z
|
2019-05-18T17:11:19.000Z
|
# Copyright 2014 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo.config import cfg
from oslo.serialization import jsonutils
from neutron.common import constants
from neutron.common import exceptions as exception
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware import nsxlib
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_DELETE = "DELETE"
HTTP_PUT = "PUT"
LSWITCH_RESOURCE = "lswitch"
LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE
LOG = log.getLogger(__name__)
def _configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs):
lport_obj['allowed_address_pairs'] = []
if port_security_enabled:
for fixed_ip in fixed_ips:
ip_address = fixed_ip.get('ip_address')
if ip_address:
lport_obj['allowed_address_pairs'].append(
{'mac_address': mac_address, 'ip_address': ip_address})
# add address pair allowing src_ip 0.0.0.0 to leave
# this is required for outgoing dhcp request
lport_obj["allowed_address_pairs"].append(
{"mac_address": mac_address,
"ip_address": "0.0.0.0"})
lport_obj['security_profiles'] = list(security_profiles or [])
lport_obj['queue_uuid'] = queue_id
if mac_learning_enabled is not None:
lport_obj["mac_learning"] = mac_learning_enabled
lport_obj["type"] = "LogicalSwitchPortConfig"
for address_pair in list(allowed_address_pairs or []):
lport_obj['allowed_address_pairs'].append(
{'mac_address': address_pair['mac_address'],
'ip_address': address_pair['ip_address']})
def get_lswitch_by_id(cluster, lswitch_id):
try:
lswitch_uri_path = nsxlib._build_uri_path(
LSWITCH_RESOURCE, lswitch_id,
relations="LogicalSwitchStatus")
return nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
except exception.NotFound:
# FIXME(salv-orlando): this should not raise a neutron exception
raise exception.NetworkNotFound(net_id=lswitch_id)
def get_lswitches(cluster, neutron_net_id):
def lookup_switches_by_tag():
# Fetch extra logical switches
lswitch_query_path = nsxlib._build_uri_path(
LSWITCH_RESOURCE,
fields="uuid,display_name,tags,lport_count",
relations="LogicalSwitchStatus",
filters={'tag': neutron_net_id,
'tag_scope': 'quantum_net_id'})
return nsxlib.get_all_query_pages(lswitch_query_path, cluster)
lswitch_uri_path = nsxlib._build_uri_path(LSWITCH_RESOURCE, neutron_net_id,
relations="LogicalSwitchStatus")
results = []
try:
ls = nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
results.append(ls)
for tag in ls['tags']:
if (tag['scope'] == "multi_lswitch" and
tag['tag'] == "True"):
results.extend(lookup_switches_by_tag())
except exception.NotFound:
# This is legit if the neutron network was created using
# a post-Havana version of the plugin
results.extend(lookup_switches_by_tag())
if results:
return results
else:
raise exception.NetworkNotFound(net_id=neutron_net_id)
def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
transport_zones_config,
shared=None,
**kwargs):
# The tag scope adopts a slightly different naming convention for
# historical reasons
lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
"transport_zones": transport_zones_config,
"replication_mode": cfg.CONF.NSX.replication_mode,
"tags": utils.get_tags(os_tid=tenant_id,
quantum_net_id=neutron_net_id)}
# TODO(salv-orlando): Now that we have async status synchronization
# this tag is perhaps not needed anymore
if shared:
lswitch_obj["tags"].append({"tag": "true",
"scope": "shared"})
if "tags" in kwargs:
lswitch_obj["tags"].extend(kwargs["tags"])
uri = nsxlib._build_uri_path(LSWITCH_RESOURCE)
lswitch = nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(lswitch_obj),
cluster=cluster)
LOG.debug("Created logical switch: %s", lswitch['uuid'])
return lswitch
def update_lswitch(cluster, lswitch_id, display_name,
tenant_id=None, **kwargs):
uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id)
lswitch_obj = {"display_name": utils.check_and_truncate(display_name)}
# NOTE: tag update will not 'merge' existing tags with new ones.
tags = []
if tenant_id:
tags = utils.get_tags(os_tid=tenant_id)
# The 'tags' kwarg might existing and be None
tags.extend(kwargs.get('tags') or [])
if tags:
lswitch_obj['tags'] = tags
try:
return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj),
cluster=cluster)
except exception.NotFound:
LOG.exception(_LE("Network not found."))
raise exception.NetworkNotFound(net_id=lswitch_id)
def delete_network(cluster, net_id, lswitch_id):
delete_networks(cluster, net_id, [lswitch_id])
#TODO(salvatore-orlando): Simplify and harmonize
def delete_networks(cluster, net_id, lswitch_ids):
for ls_id in lswitch_ids:
path = "/ws.v1/lswitch/%s" % ls_id
try:
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
except exception.NotFound:
LOG.exception(_LE("Network not found."))
raise exception.NetworkNotFound(net_id=ls_id)
def query_lswitch_lports(cluster, ls_uuid, fields="*",
filters=None, relations=None):
# Fix filter for attachments
if filters and "attachment" in filters:
filters['attachment_vif_uuid'] = filters["attachment"]
del filters['attachment']
uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=ls_uuid,
fields=fields,
filters=filters,
relations=relations)
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results']
def delete_port(cluster, switch, port):
uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
try:
nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
except exception.NotFound:
LOG.exception(_LE("Port or Network not found"))
raise exception.PortNotFoundOnNetwork(
net_id=switch, port_id=port)
except api_exc.NsxApiException:
raise exception.NeutronException()
def get_ports(cluster, networks=None, devices=None, tenants=None):
vm_filter_obsolete = ""
vm_filter = ""
tenant_filter = ""
# This is used when calling delete_network. Neutron checks to see if
# the network has any ports.
if networks:
# FIXME (Aaron) If we get more than one network_id this won't work
lswitch = networks[0]
else:
lswitch = "*"
if devices:
for device_id in devices:
vm_filter_obsolete = '&'.join(
["tag_scope=vm_id",
"tag=%s" % utils.device_id_to_vm_id(device_id,
obfuscate=True),
vm_filter_obsolete])
vm_filter = '&'.join(
["tag_scope=vm_id",
"tag=%s" % utils.device_id_to_vm_id(device_id),
vm_filter])
if tenants:
for tenant in tenants:
tenant_filter = '&'.join(
["tag_scope=os_tid",
"tag=%s" % tenant,
tenant_filter])
nsx_lports = {}
lport_fields_str = ("tags,admin_status_enabled,display_name,"
"fabric_status_up")
try:
lport_query_path_obsolete = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter))
lport_query_path = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter, tenant_filter))
try:
# NOTE(armando-migliaccio): by querying with obsolete tag first
# current deployments won't take the performance hit of a double
# call. In release L-** or M-**, we might want to swap the calls
# as it's likely that ports with the new tag would outnumber the
# ones with the old tag
ports = nsxlib.get_all_query_pages(lport_query_path_obsolete,
cluster)
if not ports:
ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
except exception.NotFound:
LOG.warn(_LW("Lswitch %s not found in NSX"), lswitch)
ports = None
if ports:
for port in ports:
for tag in port["tags"]:
if tag["scope"] == "q_port_id":
nsx_lports[tag["tag"]] = port
except Exception:
err_msg = _("Unable to get ports")
LOG.exception(err_msg)
raise nsx_exc.NsxPluginException(err_msg=err_msg)
return nsx_lports
def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
"""Get port by neutron tag.
Returns the NSX UUID of the logical port with tag q_port_id equal to
neutron_port_id or None if the port is not Found.
"""
uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=lswitch_uuid,
fields='uuid',
filters={'tag': neutron_port_id,
'tag_scope': 'q_port_id'})
LOG.debug("Looking for port with q_port_id tag '%(neutron_port_id)s' "
"on: '%(lswitch_uuid)s'",
{'neutron_port_id': neutron_port_id,
'lswitch_uuid': lswitch_uuid})
res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
num_results = len(res["results"])
if num_results >= 1:
if num_results > 1:
LOG.warn(_LW("Found '%(num_ports)d' ports with "
"q_port_id tag: '%(neutron_port_id)s'. "
"Only 1 was expected."),
{'num_ports': num_results,
'neutron_port_id': neutron_port_id})
return res["results"][0]
def get_port(cluster, network, port, relations=None):
LOG.info(_LI("get_port() %(network)s %(port)s"),
{'network': network, 'port': port})
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
if relations:
uri += "relations=%s" % relations
try:
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
except exception.NotFound:
LOG.exception(_LE("Port or Network not found."))
raise exception.PortNotFoundOnNetwork(
port_id=port, net_id=network)
def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None, allowed_address_pairs=None):
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=utils.check_and_truncate(display_name),
tags=utils.get_tags(os_tid=tenant_id,
q_port_id=neutron_port_id,
vm_id=utils.device_id_to_vm_id(device_id)))
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs)
path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid
try:
result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj),
cluster=cluster)
LOG.debug("Updated logical port %(result)s "
"on logical switch %(uuid)s",
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
except exception.NotFound:
LOG.exception(_LE("Port or Network not found."))
raise exception.PortNotFoundOnNetwork(
port_id=lport_uuid, net_id=lswitch_uuid)
def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None, allowed_address_pairs=None):
"""Creates a logical port on the assigned logical switch."""
display_name = utils.check_and_truncate(display_name)
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=display_name,
tags=utils.get_tags(os_tid=tenant_id,
q_port_id=neutron_port_id,
vm_id=utils.device_id_to_vm_id(device_id))
)
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs)
path = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=lswitch_uuid)
result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
cluster=cluster)
LOG.debug("Created logical port %(result)s on logical switch %(uuid)s",
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
def get_port_status(cluster, lswitch_id, port_id):
"""Retrieve the operational status of the port."""
try:
r = nsxlib.do_request(HTTP_GET,
"/ws.v1/lswitch/%s/lport/%s/status" %
(lswitch_id, port_id), cluster=cluster)
except exception.NotFound:
LOG.exception(_LE("Port not found."))
raise exception.PortNotFoundOnNetwork(
port_id=port_id, net_id=lswitch_id)
if r['link_status_up'] is True:
return constants.PORT_STATUS_ACTIVE
else:
return constants.PORT_STATUS_DOWN
def plug_interface(cluster, lswitch_id, lport_id, att_obj):
return nsxlib.do_request(HTTP_PUT,
nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
lport_id, lswitch_id,
is_attachment=True),
jsonutils.dumps(att_obj),
cluster=cluster)
def plug_vif_interface(
cluster, lswitch_id, port_id, port_type, attachment=None):
"""Plug a VIF Attachment object in a logical port."""
lport_obj = {}
if attachment:
lport_obj["vif_uuid"] = attachment
lport_obj["type"] = port_type
return plug_interface(cluster, lswitch_id, port_id, lport_obj)
| 41.37594
| 79
| 0.617057
|
0a4820c69ba70ad4aed96303c064eca8f15d9111
| 482
|
py
|
Python
|
testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py
|
markshao/pytest
|
611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64
|
[
"MIT"
] | 9,225
|
2015-06-15T21:56:14.000Z
|
2022-03-31T20:47:38.000Z
|
testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py
|
markshao/pytest
|
611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64
|
[
"MIT"
] | 7,794
|
2015-06-15T21:06:34.000Z
|
2022-03-31T10:56:54.000Z
|
testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py
|
markshao/pytest
|
611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64
|
[
"MIT"
] | 2,598
|
2015-06-15T21:42:39.000Z
|
2022-03-29T13:48:22.000Z
|
from dataclasses import dataclass
from dataclasses import field
def test_comparing_two_different_data_classes() -> None:
@dataclass
class SimpleDataObjectOne:
field_a: int = field()
field_b: str = field()
@dataclass
class SimpleDataObjectTwo:
field_a: int = field()
field_b: str = field()
left = SimpleDataObjectOne(1, "b")
right = SimpleDataObjectTwo(1, "c")
assert left != right # type: ignore[comparison-overlap]
| 24.1
| 60
| 0.670124
|
7237fc5f21c6128c50c86b45f420896704a67e19
| 365
|
py
|
Python
|
bot_client/utils/constants.py
|
yankai14/AntiFish
|
2218d1403a1f9d82a64fd6c0d336a78d12a272bd
|
[
"MIT"
] | 1
|
2022-02-27T12:34:36.000Z
|
2022-02-27T12:34:36.000Z
|
bot_client/utils/constants.py
|
yankai14/AntiFish
|
2218d1403a1f9d82a64fd6c0d336a78d12a272bd
|
[
"MIT"
] | null | null | null |
bot_client/utils/constants.py
|
yankai14/AntiFish
|
2218d1403a1f9d82a64fd6c0d336a78d12a272bd
|
[
"MIT"
] | null | null | null |
from enum import Enum
class STATE(Enum):
# Conversation states
FEATURE_SELECTION = 1
PHISHING_CHECK = 10
PHISING_GET_LINK = 11
REPORT = 20
REPORT_GET_LINK = 21
ABOUT = 30
# Meta states
SHOWING = 1000
BACK = 1001
STOPPING = 1002
START_OVER = 1004
END = -1.
class CONSTANTS:
START_OVER = "start_over"
| 13.035714
| 29
| 0.621918
|
6deff95f8012cf460c6b296f126e2b7ea3f9b092
| 2,307
|
py
|
Python
|
res_viz.py
|
Fork-for-Modify/CSENDistance
|
6f6d1b87ea776389d543c7873422e44b35a3f0af
|
[
"MIT"
] | null | null | null |
res_viz.py
|
Fork-for-Modify/CSENDistance
|
6f6d1b87ea776389d543c7873422e44b35a3f0af
|
[
"MIT"
] | null | null | null |
res_viz.py
|
Fork-for-Modify/CSENDistance
|
6f6d1b87ea776389d543c7873422e44b35a3f0af
|
[
"MIT"
] | null | null | null |
# visualization of the distance estimation results
import os
from os.path import dirname as opd
import pandas as pd
from numpy.random import randint as np_randint
import cv2
import scipy.io as sio
import matplotlib.pyplot as plt
# %% params
data_dir = './data/orig-data/test-data/parking-redgray/'
csv_path = data_dir + 'annotations.csv'
img_dir = data_dir+'image/'
result_dir = './results/test-parking-redgray/CL-CSEN/'
result_name = 'VGG19_mr_0.5_predictions'
save_dir = result_dir + result_name + '/'
save_flag = True
img_suf = '.png'
show_delay = 1000
#%% load data
df = pd.read_csv(csv_path)
resmat = sio.loadmat(result_dir+result_name+'.mat')
y_preds = resmat['y_preds'][0]
y_trues = resmat['y_trues'][0]
#%% visualziation
# create dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# show pred distance and true distance
# print('y_preds: ', y_preds, '\n', 'y_trues', y_trues)
# plt.figure()
plt.title('Pred. v.s. True distance')
plt.scatter(y_trues, y_preds, marker = 'o', s=40)
plt.xlabel("actual distance",fontsize=13)
plt.ylabel("predicted distance",fontsize=13)
if save_flag:
plt.savefig(save_dir+'predVStrue.png')
plt.show()
# exit()
# visualze result estimation
last_img_name = ''
idx = -1
for _, row in df.iterrows():
img_name = row['filename'].replace('.txt', img_suf)
# skip out of range data
if row['zloc']>60 or row['zloc']<1:
print('warning: out of range data skiped!')
continue
else:
idx = idx+1
if last_img_name==img_name:
im = cv2.imread(save_dir + img_name)
else:
im = cv2.imread(img_dir + img_name) # Load the image.
# Object Location.
x1 = int(row['xmin'])
y1 = int(row['ymin'])
x2 = int(row['xmax'])
y2 = int(row['ymax'])
cv2.rectangle(im, (x1, y1), (x2, y2), (0, 255, 0), 2)
string = "(pred {:.2f}, true {:2f})".format(y_preds[idx], y_trues[idx])
# text_color = np_randint(256,size=(1,3)).tolist()[0]
text_color = [50,0,255]
cv2.putText(im, string, (int((x1+x2)/2), int((y1+y2)/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.4, text_color, 1, cv2.LINE_AA)
cv2.imshow("detections", im)
if cv2.waitKey(show_delay) & 0xFF == ord('q'):
break
if save_flag:
cv2.imwrite(save_dir+img_name, im)
last_img_name = img_name
| 28.134146
| 120
| 0.658431
|
7c54a827cf98f5f3f2974230eef0cd3cad380912
| 658
|
py
|
Python
|
contracts/tests/test_push.py
|
drLis/AnchorFintechLedger
|
e3f6e55a79c75f2385dc2a7cf753e01514464616
|
[
"MIT"
] | null | null | null |
contracts/tests/test_push.py
|
drLis/AnchorFintechLedger
|
e3f6e55a79c75f2385dc2a7cf753e01514464616
|
[
"MIT"
] | null | null | null |
contracts/tests/test_push.py
|
drLis/AnchorFintechLedger
|
e3f6e55a79c75f2385dc2a7cf753e01514464616
|
[
"MIT"
] | null | null | null |
from brownie import *
import pytest
def test_push(accounts, test):
def transfer(sender, receiver, id):
sender_hash = test.computeSenderHash(accounts[0], id)
receiver_hash = test.computeReceiverHash(id, accounts[1])
test.push(sender_hash, receiver_hash)
def check_transfer(sender, receiver, id):
sender_hash = test.computeSenderHash(accounts[0], id)
receiver_hash = test.computeReceiverHash(id, accounts[1])
return test.anchors(sender_hash) == receiver_hash
transfer(accounts[0], accounts[1], 1)
assert check_transfer(accounts[0], accounts[1], 1)
transfer(accounts[1], accounts[2], 1)
assert check_transfer(accounts[1], accounts[2], 1)
| 34.631579
| 59
| 0.756839
|
96040105712945c9c70cc483ea0cbff3e28de508
| 3,108
|
py
|
Python
|
pandas/tests/arrays/interval/test_interval.py
|
YuechengWu/pandas
|
7f753892eb6b29aaa62176cb9f00ad84c092c09a
|
[
"BSD-3-Clause"
] | 1
|
2018-12-19T09:09:37.000Z
|
2018-12-19T09:09:37.000Z
|
pandas/tests/arrays/interval/test_interval.py
|
YuechengWu/pandas
|
7f753892eb6b29aaa62176cb9f00ad84c092c09a
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/arrays/interval/test_interval.py
|
YuechengWu/pandas
|
7f753892eb6b29aaa62176cb9f00ad84c092c09a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Interval, IntervalIndex, date_range, timedelta_range
from pandas.core.arrays import IntervalArray
import pandas.util.testing as tm
@pytest.fixture(params=[
(Index([0, 2, 4]), Index([1, 3, 5])),
(Index([0., 1., 2.]), Index([1., 2., 3.])),
(timedelta_range('0 days', periods=3),
timedelta_range('1 day', periods=3)),
(date_range('20170101', periods=3), date_range('20170102', periods=3)),
(date_range('20170101', periods=3, tz='US/Eastern'),
date_range('20170102', periods=3, tz='US/Eastern'))],
ids=lambda x: str(x[0].dtype))
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
class TestMethods(object):
@pytest.mark.parametrize('repeats', [0, 1, 5])
def test_repeat(self, left_right_dtypes, repeats):
left, right = left_right_dtypes
result = IntervalArray.from_arrays(left, right).repeat(repeats)
expected = IntervalArray.from_arrays(
left.repeat(repeats), right.repeat(repeats))
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize('bad_repeats, msg', [
(-1, 'negative dimensions are not allowed'),
('foo', r'invalid literal for (int|long)\(\) with base 10')])
def test_repeat_errors(self, bad_repeats, msg):
array = IntervalArray.from_breaks(range(4))
with pytest.raises(ValueError, match=msg):
array.repeat(bad_repeats)
@pytest.mark.parametrize('new_closed', [
'left', 'right', 'both', 'neither'])
def test_set_closed(self, closed, new_closed):
# GH 21670
array = IntervalArray.from_breaks(range(10), closed=closed)
result = array.set_closed(new_closed)
expected = IntervalArray.from_breaks(range(10), closed=new_closed)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize('other', [
Interval(0, 1, closed='right'),
IntervalArray.from_breaks([1, 2, 3, 4], closed='right'),
])
def test_where_raises(self, other):
ser = pd.Series(IntervalArray.from_breaks([1, 2, 3, 4],
closed='left'))
match = "'value.closed' is 'right', expected 'left'."
with pytest.raises(ValueError, match=match):
ser.where([True, False, True], other=other)
class TestSetitem(object):
def test_set_na(self, left_right_dtypes):
left, right = left_right_dtypes
result = IntervalArray.from_arrays(left, right)
result[0] = np.nan
expected_left = Index([left._na_value] + list(left[1:]))
expected_right = Index([right._na_value] + list(right[1:]))
expected = IntervalArray.from_arrays(expected_left, expected_right)
tm.assert_extension_array_equal(result, expected)
def test_repr_matches():
idx = IntervalIndex.from_breaks([1, 2, 3])
a = repr(idx)
b = repr(idx.values)
assert a.replace("Index", "Array") == b
| 36.564706
| 78
| 0.646396
|
912d7400df20fac4aba2c9229a63bb6358e3a7b4
| 1,379
|
py
|
Python
|
scripts/utils_specs/download_spec_csv_from_gsheet.py
|
rathp/time_series_prediction
|
c776f988c633868c7106041ac91ab56ca9fd7968
|
[
"MIT"
] | 1
|
2020-09-17T20:59:46.000Z
|
2020-09-17T20:59:46.000Z
|
scripts/utils_specs/download_spec_csv_from_gsheet.py
|
rathp/time_series_prediction
|
c776f988c633868c7106041ac91ab56ca9fd7968
|
[
"MIT"
] | null | null | null |
scripts/utils_specs/download_spec_csv_from_gsheet.py
|
rathp/time_series_prediction
|
c776f988c633868c7106041ac91ab56ca9fd7968
|
[
"MIT"
] | null | null | null |
import argparse
import requests
import json
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config_json_path", type=str)
parser.add_argument("--output_dir", type=str)
args = parser.parse_args()
with open(args.config_json_path, 'r') as f:
gsheet_info = json.load(f)
print("Downloading sheets from provided URL")
for gid, sheet_name in zip(gsheet_info['spec_gid_list'], gsheet_info['spec_sheet_name_list']):
sheet_url = gsheet_info['spec_gsheet_url_pattern']
for var, val in gsheet_info.items():
if sheet_url.count("{{%s}}" % var):
sheet_url = sheet_url.replace("{{%s}}" % var, val)
for var, val in [('gid', gid), ('sheet_name', sheet_name)]:
if sheet_url.count("{{%s}}" % var):
sheet_url = sheet_url.replace("{{%s}}" % var, val)
ans = requests.get(sheet_url)
ans.raise_for_status()
csv_str = ans.content.decode('utf-8')
out_csv_path = os.path.join(
args.output_dir,
gsheet_info['output_csv_path_pattern'].replace("{{sheet_name}}", sheet_name)
)
with open(out_csv_path, 'w') as f:
for line in csv_str.split('\n'):
f.write("%s\n" % line)
print("... wrote sheet %s to %s" % (sheet_name, out_csv_path))
| 36.289474
| 98
| 0.60116
|
a3320b20930cf2228206af860debb3962e5ad12f
| 630
|
py
|
Python
|
cloudmesh/foo/command/foo.py
|
cloudmesh/cloudmesh-bar
|
5ead95e8502e0ee761baa3ddce74680e919237ea
|
[
"Apache-2.0"
] | null | null | null |
cloudmesh/foo/command/foo.py
|
cloudmesh/cloudmesh-bar
|
5ead95e8502e0ee761baa3ddce74680e919237ea
|
[
"Apache-2.0"
] | null | null | null |
cloudmesh/foo/command/foo.py
|
cloudmesh/cloudmesh-bar
|
5ead95e8502e0ee761baa3ddce74680e919237ea
|
[
"Apache-2.0"
] | null | null | null |
from cloudmesh.shell.command import command
from cloudmesh.shell.command import PluginCommand
from cloudmesh.common.debug import VERBOSE
from cloudmesh.shell.command import map_parameters
class FooCommand(PluginCommand):
# noinspection PyUnusedLocal
@command
def do_foo(self, args, arguments):
"""
::
Usage:
foo -f FILE
foo FILE
foo list
This command does some useful things.
Arguments:
FILE a file name
Options:
-f specify the file
"""
VERBOSE(arguments)
| 21.724138
| 50
| 0.584127
|
6cabf0b0b9c676b7c8442047d674d783bd057768
| 551
|
py
|
Python
|
tests/test_value.py
|
virtuehive/traychain
|
8e3d869ec9354b7787452baf4fcb7d6f0a06d824
|
[
"Apache-2.0"
] | null | null | null |
tests/test_value.py
|
virtuehive/traychain
|
8e3d869ec9354b7787452baf4fcb7d6f0a06d824
|
[
"Apache-2.0"
] | null | null | null |
tests/test_value.py
|
virtuehive/traychain
|
8e3d869ec9354b7787452baf4fcb7d6f0a06d824
|
[
"Apache-2.0"
] | null | null | null |
import traychain as tc
import unittest
from testutils import start_host
ENDPOINT = "/transact/hypothetical"
class NumberTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.host = start_host("test_values")
def testDivideByZero(self):
cxt = tc.Context()
cxt.result = tc.F32(3.14) / tc.F32(0.)
self.assertRaises(tc.error.BadRequest, lambda: self.host.post(ENDPOINT, cxt))
@classmethod
def tearDownClass(cls):
cls.host.stop()
if __name__ == "__main__":
unittest.main()
| 19.678571
| 85
| 0.667877
|
4918eba2d1eea76a6a27c91d0ee87580710d172e
| 874
|
py
|
Python
|
Chapter10/programs/prog05.py
|
gits00/raspberry-pi-computer-vision-programming
|
dfd5588c5d3e410945f862427c0f987536b04d9f
|
[
"MIT"
] | 17
|
2020-08-08T20:47:29.000Z
|
2022-03-12T03:08:21.000Z
|
Chapter10/programs/prog05.py
|
gits00/raspberry-pi-computer-vision-programming
|
dfd5588c5d3e410945f862427c0f987536b04d9f
|
[
"MIT"
] | 1
|
2020-07-27T09:57:19.000Z
|
2020-08-18T10:57:31.000Z
|
Chapter10/programs/prog05.py
|
gits00/raspberry-pi-computer-vision-programming
|
dfd5588c5d3e410945f862427c0f987536b04d9f
|
[
"MIT"
] | 15
|
2020-06-30T01:52:06.000Z
|
2022-02-08T08:28:48.000Z
|
import cv2
import matplotlib.pyplot as plt
img = cv2.imread('/home/pi/book/dataset/4.2.03.tiff', 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
R, G, B = cv2.split(img)
output1_R = cv2.equalizeHist(R)
output1_G = cv2.equalizeHist(G)
output1_B = cv2.equalizeHist(B)
output1 = cv2.merge((output1_R,
output1_G,
output1_B))
clahe = cv2.createCLAHE(clipLimit=2.0,
tileGridSize=(8, 8))
output2_R = clahe.apply(R)
output2_G = clahe.apply(G)
output2_B = clahe.apply(B)
output2 = cv2.merge((output2_R,
output2_G,
output2_B))
output = [img, output1, output2]
titles = ['Original Image',
'Adjusted Histogram', 'CLAHE']
for i in range(3):
plt.subplot(1, 3, i+1)
plt.imshow(output[i])
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
| 28.193548
| 56
| 0.606407
|
e428e30c9f39fab1d2cdbe5d303f9da318f50e82
| 18,870
|
py
|
Python
|
seaborn/utils.py
|
romanwerpachowski/seaborn
|
6b7fa4270294e68d79f8ed561ce8eab2b6dbc9f5
|
[
"BSD-3-Clause"
] | 2
|
2020-07-24T04:45:51.000Z
|
2020-09-04T11:10:27.000Z
|
seaborn/utils.py
|
romanwerpachowski/seaborn
|
6b7fa4270294e68d79f8ed561ce8eab2b6dbc9f5
|
[
"BSD-3-Clause"
] | null | null | null |
seaborn/utils.py
|
romanwerpachowski/seaborn
|
6b7fa4270294e68d79f8ed561ce8eab2b6dbc9f5
|
[
"BSD-3-Clause"
] | 2
|
2020-11-02T18:25:54.000Z
|
2021-07-23T16:15:34.000Z
|
"""Utility functions, mostly for internal use."""
import os
import colorsys
import warnings
from urllib.request import urlopen, urlretrieve
from http.client import HTTPException
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.colors as mplcol
import matplotlib.pyplot as plt
__all__ = ["desaturate", "saturate", "set_hls_values",
"despine", "get_dataset_names", "get_data_home", "load_dataset"]
def sort_df(df, *args, **kwargs):
"""Wrapper to handle different pandas sorting API pre/post 0.17."""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg)
try:
return df.sort_values(*args, **kwargs)
except AttributeError:
return df.sort(*args, **kwargs)
def ci_to_errsize(cis, heights):
"""Convert intervals to error arguments relative to plot heights.
Parameters
----------
cis: 2 x n sequence
sequence of confidence interval limits
heights : n sequence
sequence of plot heights
Returns
-------
errsize : 2 x n array
sequence of error size relative to height values in correct
format as argument for plt.bar
"""
cis = np.atleast_2d(cis).reshape(2, -1)
heights = np.atleast_1d(heights)
errsize = []
for i, (low, high) in enumerate(np.transpose(cis)):
h = heights[i]
elow = h - low
ehigh = high - h
errsize.append([elow, ehigh])
errsize = np.asarray(errsize).T
return errsize
def pmf_hist(a, bins=10):
"""Return arguments to plt.bar for pmf-like histogram of an array.
DEPRECATED: will be removed in a future version.
Parameters
----------
a: array-like
array to make histogram of
bins: int
number of bins
Returns
-------
x: array
left x position of bars
h: array
height of bars
w: float
width of bars
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg, FutureWarning)
n, x = np.histogram(a, bins)
h = n / n.sum()
w = x[1] - x[0]
return x[:-1], h, w
def desaturate(color, prop):
"""Decrease the saturation channel of a color by some percent.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
prop : float
saturation channel of color will be multiplied by this value
Returns
-------
new_color : rgb tuple
desaturated color code in RGB tuple representation
"""
# Check inputs
if not 0 <= prop <= 1:
raise ValueError("prop must be between 0 and 1")
# Get rgb tuple rep
rgb = mplcol.colorConverter.to_rgb(color)
# Convert to hls
h, l, s = colorsys.rgb_to_hls(*rgb)
# Desaturate the saturation channel
s *= prop
# Convert back to rgb
new_color = colorsys.hls_to_rgb(h, l, s)
return new_color
def saturate(color):
"""Return a fully saturated color with the same hue.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
Returns
-------
new_color : rgb tuple
saturated color code in RGB tuple representation
"""
return set_hls_values(color, s=1)
def set_hls_values(color, h=None, l=None, s=None): # noqa
"""Independently manipulate the h, l, or s channels of a color.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
h, l, s : floats between 0 and 1, or None
new values for each channel in hls space
Returns
-------
new_color : rgb tuple
new color code in RGB tuple representation
"""
# Get an RGB tuple representation
rgb = mplcol.colorConverter.to_rgb(color)
vals = list(colorsys.rgb_to_hls(*rgb))
for i, val in enumerate([h, l, s]):
if val is not None:
vals[i] = val
rgb = colorsys.hls_to_rgb(*vals)
return rgb
def axlabel(xlabel, ylabel, **kwargs):
"""Grab current axis and label it.
DEPRECATED: will be removed in a future version.
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg, FutureWarning)
ax = plt.gca()
ax.set_xlabel(xlabel, **kwargs)
ax.set_ylabel(ylabel, **kwargs)
def remove_na(vector):
"""Helper method for removing null values from data vectors.
Parameters
----------
vector : vector object
Must implement boolean masking with [] subscript syntax.
Returns
-------
clean_clean : same type as ``vector``
Vector of data with null values removed. May be a copy or a view.
"""
return vector[pd.notnull(vector)]
def get_color_cycle():
"""Return the list of colors in the current matplotlib color cycle
Parameters
----------
None
Returns
-------
colors : list
List of matplotlib colors in the current cycle, or dark gray if
the current color cycle is empty.
"""
cycler = mpl.rcParams['axes.prop_cycle']
return cycler.by_key()['color'] if 'color' in cycler.keys else [".15"]
def despine(fig=None, ax=None, top=True, right=True, left=False,
bottom=False, offset=None, trim=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure, optional
Figure to despine all axes of, default uses current figure.
ax : matplotlib axes, optional
Specific axes object to despine.
top, right, left, bottom : boolean, optional
If True, remove that spine.
offset : int or dict, optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward). A single value
applies to all spines; a dict can be used to set offset values per
side.
trim : bool, optional
If True, limit spines to the smallest and largest major tick
on each non-despined axis.
Returns
-------
None
"""
# Get references to the axes we want
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for side in ["top", "right", "left", "bottom"]:
# Toggle the spine objects
is_visible = not locals()[side]
ax_i.spines[side].set_visible(is_visible)
if offset is not None and is_visible:
try:
val = offset.get(side, 0)
except AttributeError:
val = offset
ax_i.spines[side].set_position(('outward', val))
# Potentially move the ticks
if left and not right:
maj_on = any(
t.tick1line.get_visible()
for t in ax_i.yaxis.majorTicks
)
min_on = any(
t.tick1line.get_visible()
for t in ax_i.yaxis.minorTicks
)
ax_i.yaxis.set_ticks_position("right")
for t in ax_i.yaxis.majorTicks:
t.tick2line.set_visible(maj_on)
for t in ax_i.yaxis.minorTicks:
t.tick2line.set_visible(min_on)
if bottom and not top:
maj_on = any(
t.tick1line.get_visible()
for t in ax_i.xaxis.majorTicks
)
min_on = any(
t.tick1line.get_visible()
for t in ax_i.xaxis.minorTicks
)
ax_i.xaxis.set_ticks_position("top")
for t in ax_i.xaxis.majorTicks:
t.tick2line.set_visible(maj_on)
for t in ax_i.xaxis.minorTicks:
t.tick2line.set_visible(min_on)
if trim:
# clip off the parts of the spines that extend past major ticks
xticks = np.asarray(ax_i.get_xticks())
if xticks.size:
firsttick = np.compress(xticks >= min(ax_i.get_xlim()),
xticks)[0]
lasttick = np.compress(xticks <= max(ax_i.get_xlim()),
xticks)[-1]
ax_i.spines['bottom'].set_bounds(firsttick, lasttick)
ax_i.spines['top'].set_bounds(firsttick, lasttick)
newticks = xticks.compress(xticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_xticks(newticks)
yticks = np.asarray(ax_i.get_yticks())
if yticks.size:
firsttick = np.compress(yticks >= min(ax_i.get_ylim()),
yticks)[0]
lasttick = np.compress(yticks <= max(ax_i.get_ylim()),
yticks)[-1]
ax_i.spines['left'].set_bounds(firsttick, lasttick)
ax_i.spines['right'].set_bounds(firsttick, lasttick)
newticks = yticks.compress(yticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_yticks(newticks)
def _kde_support(data, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
support_min = max(data.min() - bw * cut, clip[0])
support_max = min(data.max() + bw * cut, clip[1])
return np.linspace(support_min, support_max, gridsize)
def percentiles(a, pcts, axis=None):
"""Like scoreatpercentile but can take and return array of percentiles.
DEPRECATED: will be removed in a future version.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg, FutureWarning)
scores = []
try:
n = len(pcts)
except TypeError:
pcts = [pcts]
n = 0
for i, p in enumerate(pcts):
if axis is None:
score = stats.scoreatpercentile(a.ravel(), p)
else:
score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p)
scores.append(score)
scores = np.asarray(scores)
if not n:
scores = scores.squeeze()
return scores
def ci(a, which=95, axis=None):
"""Return a percentile range from an array of values."""
p = 50 - which / 2, 50 + which / 2
return np.percentile(a, p, axis)
def sig_stars(p):
"""Return a R-style significance string corresponding to p values.
DEPRECATED: will be removed in a future version.
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg, FutureWarning)
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
elif p < 0.1:
return "."
return ""
def iqr(a):
"""Calculate the IQR for an array of numbers.
DEPRECATED: will be removed in a future version.
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg, FutureWarning)
a = np.asarray(a)
q1 = stats.scoreatpercentile(a, 25)
q3 = stats.scoreatpercentile(a, 75)
return q3 - q1
def get_dataset_names():
"""Report available example datasets, useful for reporting issues."""
# delayed import to not demand bs4 unless this function is actually used
from bs4 import BeautifulSoup
http = urlopen('https://github.com/mwaskom/seaborn-data/')
gh_list = BeautifulSoup(http)
return [l.text.replace('.csv', '')
for l in gh_list.find_all("a", {"class": "js-navigation-open"})
if l.text.endswith('.csv')]
def get_data_home(data_home=None):
"""Return a path to the cache directory for example datasets.
This directory is then used by :func:`load_dataset`.
If the ``data_home`` argument is not specified, it tries to read from the
``SEABORN_DATA`` environment variable and defaults to ``~/seaborn-data``.
"""
if data_home is None:
data_home = os.environ.get('SEABORN_DATA',
os.path.join('~', 'seaborn-data'))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_dataset(name, cache=True, data_home=None, **kws):
"""Load an example dataset from the online repository (requires internet).
This function provides quick access to a small number of example datasets
that are useful for documenting seaborn or generating reproducible examples
for bug reports. It is not necessary for normal usage.
Note that some of the datasets have a small amount of preprocessing applied
to define a proper ordering for categorical variables.
Use :func:`get_dataset_names` to see a list of available datasets.
Parameters
----------
name : str
Name of the dataset (``{name}.csv`` on
https://github.com/mwaskom/seaborn-data).
cache : boolean, optional
If True, try to load from the local cache first, and save to the cache
if a download is required.
data_home : string, optional
The directory in which to cache data; see :func:`get_data_home`.
kws : keys and values, optional
Additional keyword arguments are passed to passed through to
:func:`pandas.read_csv`.
Returns
-------
df : :class:`pandas.DataFrame`
Tabular data, possibly with some preprocessing applied.
"""
path = ("https://raw.githubusercontent.com/"
"mwaskom/seaborn-data/master/{}.csv")
full_path = path.format(name)
if cache:
cache_path = os.path.join(get_data_home(data_home),
os.path.basename(full_path))
if not os.path.exists(cache_path):
urlretrieve(full_path, cache_path)
full_path = cache_path
df = pd.read_csv(full_path, **kws)
if df.iloc[-1].isnull().all():
df = df.iloc[:-1]
# Set some columns as a categorical type with ordered levels
if name == "tips":
df["day"] = pd.Categorical(df["day"], ["Thur", "Fri", "Sat", "Sun"])
df["sex"] = pd.Categorical(df["sex"], ["Male", "Female"])
df["time"] = pd.Categorical(df["time"], ["Lunch", "Dinner"])
df["smoker"] = pd.Categorical(df["smoker"], ["Yes", "No"])
if name == "flights":
df["month"] = pd.Categorical(df["month"], df.month.unique())
if name == "exercise":
df["time"] = pd.Categorical(df["time"], ["1 min", "15 min", "30 min"])
df["kind"] = pd.Categorical(df["kind"], ["rest", "walking", "running"])
df["diet"] = pd.Categorical(df["diet"], ["no fat", "low fat"])
if name == "titanic":
df["class"] = pd.Categorical(df["class"], ["First", "Second", "Third"])
df["deck"] = pd.Categorical(df["deck"], list("ABCDEFG"))
return df
def axis_ticklabels_overlap(labels):
"""Return a boolean for whether the list of ticklabels have overlaps.
Parameters
----------
labels : list of matplotlib ticklabels
Returns
-------
overlap : boolean
True if any of the labels overlap.
"""
if not labels:
return False
try:
bboxes = [l.get_window_extent() for l in labels]
overlaps = [b.count_overlaps(bboxes) for b in bboxes]
return max(overlaps) > 1
except RuntimeError:
# Issue on macos backend raises an error in the above code
return False
def axes_ticklabels_overlap(ax):
"""Return booleans for whether the x and y ticklabels on an Axes overlap.
Parameters
----------
ax : matplotlib Axes
Returns
-------
x_overlap, y_overlap : booleans
True when the labels on that axis overlap.
"""
return (axis_ticklabels_overlap(ax.get_xticklabels()),
axis_ticklabels_overlap(ax.get_yticklabels()))
def locator_to_legend_entries(locator, limits, dtype):
"""Return levels and formatted levels for brief numeric legends."""
raw_levels = locator.tick_values(*limits).astype(dtype)
class dummy_axis:
def get_view_interval(self):
return limits
if isinstance(locator, mpl.ticker.LogLocator):
formatter = mpl.ticker.LogFormatter()
else:
formatter = mpl.ticker.ScalarFormatter()
formatter.axis = dummy_axis()
# TODO: The following two lines should be replaced
# once pinned matplotlib>=3.1.0 with:
# formatted_levels = formatter.format_ticks(raw_levels)
formatter.set_locs(raw_levels)
formatted_levels = [formatter(x) for x in raw_levels]
return raw_levels, formatted_levels
def relative_luminance(color):
"""Calculate the relative luminance of a color according to W3C standards
Parameters
----------
color : matplotlib color or sequence of matplotlib colors
Hex code, rgb-tuple, or html color name.
Returns
-------
luminance : float(s) between 0 and 1
"""
rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]
rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)
lum = rgb.dot([.2126, .7152, .0722])
try:
return lum.item()
except ValueError:
return lum
def to_utf8(obj):
"""Return a string representing a Python object.
Strings (i.e. type ``str``) are returned unchanged.
Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings.
For other objects, the method ``__str__()`` is called, and the result is
returned as a string.
Parameters
----------
obj : object
Any Python object
Returns
-------
s : str
UTF-8-decoded string representation of ``obj``
"""
if isinstance(obj, str):
return obj
try:
return obj.decode(encoding="utf-8")
except AttributeError: # obj is not bytes-like
return str(obj)
def _network(t=None, url='https://google.com'):
"""
Decorator that will skip a test if `url` is unreachable.
Parameters
----------
t : function, optional
url : str, optional
"""
import nose
if t is None:
return lambda x: _network(x, url=url)
def wrapper(*args, **kwargs):
# attempt to connect
try:
f = urlopen(url)
except (IOError, HTTPException):
raise nose.SkipTest()
else:
f.close()
return t(*args, **kwargs)
return wrapper
| 29.12037
| 79
| 0.605034
|
cc870662a4e16e7d3e3f1453d9cebbe9e102cb34
| 5,201
|
py
|
Python
|
kubernetes/client/api/core_api.py
|
sthagen/kubernetes-client-python
|
3a183048d7d568ba5ea418bcfb8f61713908d3ea
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/api/core_api.py
|
sthagen/kubernetes-client-python
|
3a183048d7d568ba5ea418bcfb8f61713908d3ea
|
[
"Apache-2.0"
] | 3
|
2021-11-30T03:11:13.000Z
|
2022-02-09T03:39:41.000Z
|
kubernetes/client/api/core_api.py
|
sthagen/kubernetes-client-python
|
3a183048d7d568ba5ea418bcfb8f61713908d3ea
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.24
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes.client.api_client import ApiClient
from kubernetes.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class CoreApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_versions(self, **kwargs): # noqa: E501
"""get_api_versions # noqa: E501
get available API versions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_versions(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIVersions
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_versions_with_http_info(**kwargs) # noqa: E501
def get_api_versions_with_http_info(self, **kwargs): # noqa: E501
"""get_api_versions # noqa: E501
get available API versions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_versions_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIVersions, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_versions" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/api/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIVersions', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 36.370629
| 124
| 0.59777
|
1829f2310b132819505a34d44774b0ec98f902d2
| 9,032
|
py
|
Python
|
admin_interface/migrations/0006_bytes_to_str.py
|
Mustafa-Abu-Ghazy/django-admin-interface
|
a04878a1b3220e9e33e15f06cc2b7d075e61542e
|
[
"MIT"
] | null | null | null |
admin_interface/migrations/0006_bytes_to_str.py
|
Mustafa-Abu-Ghazy/django-admin-interface
|
a04878a1b3220e9e33e15f06cc2b7d075e61542e
|
[
"MIT"
] | null | null | null |
admin_interface/migrations/0006_bytes_to_str.py
|
Mustafa-Abu-Ghazy/django-admin-interface
|
a04878a1b3220e9e33e15f06cc2b7d075e61542e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import colorfield.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("admin_interface", "0005_add_recent_actions_visible"),
]
operations = [
migrations.AlterField(
model_name="theme",
name="css_delete_button_background_color",
field=colorfield.fields.ColorField(
blank=True,
default="#BA2121",
help_text="#BA2121",
max_length=10,
verbose_name="background color",
),
),
migrations.AlterField(
model_name="theme",
name="css_delete_button_background_hover_color",
field=colorfield.fields.ColorField(
blank=True,
default="#A41515",
help_text="#A41515",
max_length=10,
verbose_name="background hover color",
),
),
migrations.AlterField(
model_name="theme",
name="css_delete_button_text_color",
field=colorfield.fields.ColorField(
blank=True,
default="#FFFFFF",
help_text="#FFFFFF",
max_length=10,
verbose_name="text color",
),
),
migrations.AlterField(
model_name="theme",
name="css_generic_link_color",
field=colorfield.fields.ColorField(
blank=True,
default="#0C3C26",
help_text="#0C3C26",
max_length=10,
verbose_name="link color",
),
),
migrations.AlterField(
model_name="theme",
name="css_generic_link_hover_color",
field=colorfield.fields.ColorField(
blank=True,
default="#156641",
help_text="#156641",
max_length=10,
verbose_name="link hover color",
),
),
migrations.AlterField(
model_name="theme",
name="css_header_background_color",
field=colorfield.fields.ColorField(
blank=True,
default="#0C4B33",
help_text="#0C4B33",
max_length=10,
verbose_name="background color",
),
),
migrations.AlterField(
model_name="theme",
name="css_header_link_color",
field=colorfield.fields.ColorField(
blank=True,
default="#FFFFFF",
help_text="#FFFFFF",
max_length=10,
verbose_name="link color",
),
),
migrations.AlterField(
model_name="theme",
name="css_header_link_hover_color",
field=colorfield.fields.ColorField(
blank=True,
default="#C9F0DD",
help_text="#C9F0DD",
max_length=10,
verbose_name="link hover color",
),
),
migrations.AlterField(
model_name="theme",
name="css_header_text_color",
field=colorfield.fields.ColorField(
blank=True,
default="#44B78B",
help_text="#44B78B",
max_length=10,
verbose_name="text color",
),
),
migrations.AlterField(
model_name="theme",
name="css_module_background_color",
field=colorfield.fields.ColorField(
blank=True,
default="#44B78B",
help_text="#44B78B",
max_length=10,
verbose_name="background color",
),
),
migrations.AlterField(
model_name="theme",
name="css_module_link_color",
field=colorfield.fields.ColorField(
blank=True,
default="#FFFFFF",
help_text="#FFFFFF",
max_length=10,
verbose_name="link color",
),
),
migrations.AlterField(
model_name="theme",
name="css_module_link_hover_color",
field=colorfield.fields.ColorField(
blank=True,
default="#C9F0DD",
help_text="#C9F0DD",
max_length=10,
verbose_name="link hover color",
),
),
migrations.AlterField(
model_name="theme",
name="css_module_rounded_corners",
field=models.BooleanField(default=True, verbose_name="rounded corners"),
),
migrations.AlterField(
model_name="theme",
name="css_module_text_color",
field=colorfield.fields.ColorField(
blank=True,
default="#FFFFFF",
help_text="#FFFFFF",
max_length=10,
verbose_name="text color",
),
),
migrations.AlterField(
model_name="theme",
name="css_save_button_background_color",
field=colorfield.fields.ColorField(
blank=True,
default="#0C4B33",
help_text="#0C4B33",
max_length=10,
verbose_name="background color",
),
),
migrations.AlterField(
model_name="theme",
name="css_save_button_background_hover_color",
field=colorfield.fields.ColorField(
blank=True,
default="#0C3C26",
help_text="#0C3C26",
max_length=10,
verbose_name="background hover color",
),
),
migrations.AlterField(
model_name="theme",
name="css_save_button_text_color",
field=colorfield.fields.ColorField(
blank=True,
default="#FFFFFF",
help_text="#FFFFFF",
max_length=10,
verbose_name="text color",
),
),
migrations.AlterField(
model_name="theme",
name="list_filter_dropdown",
field=models.BooleanField(default=False, verbose_name="use dropdown"),
),
migrations.AlterField(
model_name="theme",
name="logo_visible",
field=models.BooleanField(default=True, verbose_name="visible"),
),
migrations.AlterField(
model_name="theme",
name="name",
field=models.CharField(default="Django", max_length=50),
),
migrations.AlterField(
model_name="theme",
name="related_modal_active",
field=models.BooleanField(default=True, verbose_name="active"),
),
migrations.AlterField(
model_name="theme",
name="related_modal_background_color",
field=colorfield.fields.ColorField(
blank=True,
default="#000000",
help_text="#000000",
max_length=10,
verbose_name="background color",
),
),
migrations.AlterField(
model_name="theme",
name="related_modal_background_opacity",
field=models.FloatField(
choices=[
(0.1, "10%"),
(0.2, "20%"),
(0.3, "30%"),
(0.4, "40%"),
(0.5, "50%"),
(0.6, "60%"),
(0.7, "70%"),
(0.8, "80%"),
(0.9, "90%"),
],
default=0.2,
help_text="20%",
verbose_name="background opacity",
),
),
migrations.AlterField(
model_name="theme",
name="related_modal_rounded_corners",
field=models.BooleanField(default=True, verbose_name="rounded corners"),
),
migrations.AlterField(
model_name="theme",
name="title",
field=models.CharField(
blank=True, default="Django administration", max_length=50
),
),
migrations.AlterField(
model_name="theme",
name="title_color",
field=colorfield.fields.ColorField(
blank=True,
default="#F5DD5D",
help_text="#F5DD5D",
max_length=10,
verbose_name="title color",
),
),
migrations.AlterField(
model_name="theme",
name="title_visible",
field=models.BooleanField(default=True, verbose_name="visible"),
),
]
| 32.489209
| 84
| 0.481178
|
f9a84236d09bb8b64cea330c2a38530fdf6003c0
| 9,547
|
py
|
Python
|
models/backbone.py
|
GUOShuxuan/detr
|
80d45bf5df940ac6457e7cc9e3ccd6441518a903
|
[
"Apache-2.0"
] | null | null | null |
models/backbone.py
|
GUOShuxuan/detr
|
80d45bf5df940ac6457e7cc9e3ccd6441518a903
|
[
"Apache-2.0"
] | null | null | null |
models/backbone.py
|
GUOShuxuan/detr
|
80d45bf5df940ac6457e7cc9e3ccd6441518a903
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict
import os
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
## adding for autonet
from sandbox.ptpoc.utils import deserialize_object, load_spec
import IPython
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: # train_backbone is True, layer1 not train, while train others
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
# IPython.embed()
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
# input: torch.Size([2, 3, 604, 960])
#xs['0'].size(): torch.Size([2, 2048, 19, 30]) 'orderdict'
# IPython.embed()
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask) #x.size():torch.Size([2, 2048, 19, 30]) mask.size():[2, 19, 30])
# IPython.embed()
return out
class BackboneAutoNetBase(BackboneBase):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super(BackboneBase, self).__init__()
for name, parameter in backbone.named_parameters():
# print(name)
if not train_backbone: #or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
# IPython.embed()
# if return_interm_layers:
# return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
# else:
# return_layers = {'layer4': "0"}
self.body = backbone
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors) #torch.Size([2, 256, 38, 60])
# IPython.embed()
xs = {'0': xs}
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask) # torch.Size([2, 256, 38, 60])
# IPython.embed()
return out
class BackboneAutoNet(BackboneAutoNetBase):
# add autonet backbone
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool,
training_spec: str,
auto_checkpoint: str
):
if name.startswith('autonet'):
# training_spec = 'sandbox/williamz/detr/res_autonet/autonet_training_spec.yaml'
# training_spec = os.path.join(os.environ["HOME"],'datasets/specs/autonet_training_spec.yaml')
training_spec = load_spec(training_spec)
model = deserialize_object(training_spec["model"])
# autonet checkpoint
# checkpoint = 'sandbox/williamz/detr/res_autonet/final_epoch.checkpoint'
checkpoint = auto_checkpoint
# checkpoint = os.path.join(os.environ["HOME"],'datasets/autonet/final_epoch.checkpoint')
if checkpoint is not None and os.path.isfile(checkpoint) and is_main_process():
print(f'---------- Loading checkpoint for AutoNet -----')
loaded_states = torch.load(checkpoint)
model_state = loaded_states["model_state"]
model.load_state_dict(model_state, strict=False)
# backbone = model
else:
print(f'---------- No checkpoint for AutoNet -----')
# get drivenet
# IPython.embed()
modules = []
for block in model._blocks:
if 'drive2d' in block["task_name"]:
modules.append(getattr(model, block['name']))
backbone = nn.Sequential(*modules[:-1])
num_channels = 256
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
if name.startswith('resnet'):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
# pretrained=False, norm_layer=FrozenBatchNorm2d)
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
if args.backbone.startswith('resnet'):
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
elif args.backbone.startswith('autonet'):
backbone = BackboneAutoNet(args.backbone, train_backbone, return_interm_layers, args.dilation, args.training_spec, args.auto_checkpoint)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
# debug test for drivenet part
# inputs = torch.rand((4, 3, 544, 960))
# out = model(inputs)
# out.keys(): dict_keys(['drive2d', 'openroad', 'map', 'wait_sky'])
# out['drive2d'].keys(): ['1_cycle', '1_person', '1_vehicle']
# out['drive2d']['1_cycle'].keys(): dict_keys(['cov', 'bbox']) # 1, 4
# bbox = out['drive2d']['1_cycle']['bbox']
# cov = out['drive2d']['1_cycle']['cov']
# # module = getattr(model, 'drive2d')
# # drive2d = getattr(model, 'drive2d')
# # rebuild drive2d
# modules = []
# for block in model._blocks:
# if 'drive2d' in block["task_name"]:
# modules.append(getattr(model, block['name']))
# drivenet = nn.ModuleList(modules)
# f = open('/home/shuxuang/experiments/demos/detection-f/drive2d.txt', 'w')
# f.write(str(drivenet))
# # drivenet[0](inputs).size(): [4, 64, 136, 240]
# # drivenet[1](drivenet[0](inputs)).size(): ([4, 256, 34, 60])
# # dout = drivenet[2](drivenet[1](drivenet[0](inputs)))
# # d_bbox = dout['1_cycle']['bbox']
# d_cov = dout['1_cycle']['cov']
# torch.all(torch.eq(bbox, d_bbox)) #True
# torch.all(torch.eq(cov, d_cov)) # True
# drivnet = nn.Sequential(*modules)
# ddout = drivnet(inputs)
# dd_bbox = ddout['1_cycle']['bbox']
# dd_cov = ddout['1_cycle']['cov']
| 41.689956
| 171
| 0.62648
|
ff09d56f312c9b56f4723c7d2a1174d177cdb820
| 1,130
|
py
|
Python
|
misc/make-apidoc.py
|
jayvdb/Nuitka
|
0ff702e065b1b53231ba0cae451385a3da0fe766
|
[
"Apache-2.0"
] | 1
|
2019-03-31T09:56:11.000Z
|
2019-03-31T09:56:11.000Z
|
misc/make-apidoc.py
|
jayvdb/Nuitka
|
0ff702e065b1b53231ba0cae451385a3da0fe766
|
[
"Apache-2.0"
] | null | null | null |
misc/make-apidoc.py
|
jayvdb/Nuitka
|
0ff702e065b1b53231ba0cae451385a3da0fe766
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Launcher for API doc upload tool.
"""
import os
import sys
# Unchanged, running from checkout, use the parent directory, the nuitka
# package ought be there.
sys.path.insert(0, os.path.normpath(os.path.join(os.path.dirname(__file__), "..")))
# isort:start
from nuitka.tools.quality.apidoc.__main__ import main
main()
| 31.388889
| 83
| 0.718584
|
2c317d60277bbe6cee39f90c753471ec43c003e6
| 767
|
py
|
Python
|
bin/clean_log_files.py
|
davidcorne/markdown-editor
|
1d6f2684f06dd7f350c68588aa3a6b5d61e3fdd5
|
[
"MIT"
] | null | null | null |
bin/clean_log_files.py
|
davidcorne/markdown-editor
|
1d6f2684f06dd7f350c68588aa3a6b5d61e3fdd5
|
[
"MIT"
] | null | null | null |
bin/clean_log_files.py
|
davidcorne/markdown-editor
|
1d6f2684f06dd7f350c68588aa3a6b5d61e3fdd5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Written by: DGC
# python imports
import os
import logging
import sys
# local imports
sys.path.append(".")
sys.path.append("..")
import Log
# immediately stop logging
logging.getLogger("").handlers = []
#==============================================================================
def main():
log_directory = os.path.dirname(Log.log_file())
print("Removing: ")
for temp_file in os.listdir(log_directory):
if ("Markdown_Editor_" in temp_file and temp_file[-4:] == ".log"):
file_path = os.path.join(log_directory, temp_file)
print(file_path)
os.remove(file_path)
#==============================================================================
if (__name__ == "__main__"):
main()
| 25.566667
| 79
| 0.514993
|
cfa1c410b8f3ef96e7130028a58645925fc06877
| 2,170
|
py
|
Python
|
share/qt/extract_strings_qt.py
|
needycoin/needycore
|
05c0ce57f27d66c37696a9c5eb3c067120fd68b8
|
[
"MIT"
] | 1
|
2020-06-04T14:05:04.000Z
|
2020-06-04T14:05:04.000Z
|
share/qt/extract_strings_qt.py
|
needycoin/needycore
|
05c0ce57f27d66c37696a9c5eb3c067120fd68b8
|
[
"MIT"
] | null | null | null |
share/qt/extract_strings_qt.py
|
needycoin/needycore
|
05c0ce57f27d66c37696a9c5eb3c067120fd68b8
|
[
"MIT"
] | 1
|
2020-07-13T17:00:15.000Z
|
2020-07-13T17:00:15.000Z
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/needycoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *needycoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("needycoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 25.833333
| 105
| 0.620737
|
0ec4578ba3bfa1049ec9f68f568213567bfe363d
| 857
|
py
|
Python
|
python/mxnet/ndarray/random.py
|
saurabh3949/mxnet
|
e25074a469b45f2cbde68e2a0c8963daea93b66b
|
[
"Apache-2.0"
] | 4
|
2017-11-17T07:28:09.000Z
|
2019-07-23T06:24:16.000Z
|
python/mxnet/ndarray/random.py
|
saurabh3949/mxnet
|
e25074a469b45f2cbde68e2a0c8963daea93b66b
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/ndarray/random.py
|
saurabh3949/mxnet
|
e25074a469b45f2cbde68e2a0c8963daea93b66b
|
[
"Apache-2.0"
] | 2
|
2019-06-12T12:40:20.000Z
|
2020-11-03T14:33:14.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Random distribution generator NDArray API of MXNet."""
__all__ = []
| 42.85
| 62
| 0.767795
|
5a95f4834a853c917ee718bb2b595b99ee0b52fa
| 1,892
|
py
|
Python
|
tests/l0_retr_test.py
|
colligant/wrfxpy
|
eacce15cad55820d9fb82dac9597021e00eb99f8
|
[
"MIT"
] | null | null | null |
tests/l0_retr_test.py
|
colligant/wrfxpy
|
eacce15cad55820d9fb82dac9597021e00eb99f8
|
[
"MIT"
] | null | null | null |
tests/l0_retr_test.py
|
colligant/wrfxpy
|
eacce15cad55820d9fb82dac9597021e00eb99f8
|
[
"MIT"
] | 1
|
2020-11-23T23:40:43.000Z
|
2020-11-23T23:40:43.000Z
|
#
# Dalton Burke
#
# Test correct functionality of the retrieval of level0 files
import os
import subprocess
import datetime
import shutil
# Set root directory of wrfxpy as working directory
script_path = os.path.realpath(__file__)
# + 6 gets us to wrfxpy
index = script_path.find('wrfxpy/tests') + 6
os.chdir(script_path[:index])
# Path where the download files should go
local_path = 'tests/l0_test_ingest'
# Remove data from old tests
shutil.rmtree(local_path, ignore_errors=True)
current_time = datetime.datetime.utcnow()
ten_hours_ago = str(current_time - datetime.timedelta(hours=10)).replace(' ', '_')
five_hours_ago = str(current_time - datetime.timedelta(hours=5)).replace(' ', '_')
current_time = str(current_time).replace(' ', '_')
source_types = ['MODIS_AQUA', 'MODIS_TERRA', 'VIIRS_NPP']
# -----------------------------------------------------------------------
# Download all data sources from the last 5 hours
print "TESTING SOURCES FOR FILES IN LAST 5 HOURS\n"
for t in source_types:
print "\nRETRIEVING %s FILES FROM THE LAST 5 HOURS WITH CALL:" % t
print './level0_retr.sh %s %s %s %s \n' % (t, five_hours_ago, current_time, local_path)
subprocess.call(['./level0_retr.sh', t, five_hours_ago, current_time, local_path])
print "\nDONE RETRIEVING FILES FROM LAST 5 HOURS \n\n"
# -----------------------------------------------------------------------
# Download all data sources from the last 10 hours
# (some data we should already have, so those should be skipped)
print "TESTING SOURCES FOR FILES IN LAST 10 HOURS\n"
for t in source_types:
print "\nRETRIEVING %s FILES FROM THE LAST 10 HOURS WITH CALL:" % t
print './level0_retr.sh %s %s %s %s \n' % (t, ten_hours_ago, current_time, local_path)
subprocess.call(['./level0_retr.sh', t, ten_hours_ago, current_time, local_path])
print "\nDONE RETRIEVING FILES FROM LAST 10 HOURS"
| 31.016393
| 91
| 0.676533
|
d4e577c157a6a62760a3d078b1487ac6618cafc6
| 1,973
|
py
|
Python
|
_Spark_makeSource_DailyC.py
|
developeration/stock
|
d1df7e152fd1fc7b5f0446148276cd16928071bb
|
[
"MIT"
] | null | null | null |
_Spark_makeSource_DailyC.py
|
developeration/stock
|
d1df7e152fd1fc7b5f0446148276cd16928071bb
|
[
"MIT"
] | null | null | null |
_Spark_makeSource_DailyC.py
|
developeration/stock
|
d1df7e152fd1fc7b5f0446148276cd16928071bb
|
[
"MIT"
] | null | null | null |
from pyspark import SparkConf
from _Setting import StockSetting
import tushare as ts
import numpy as np
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, StringType, DoubleType
import json
#spark-submit --master yarn --py-files ./_Setting.py --deploy-mode cluster ./_Spark_makeSource_DailyC.py
if __name__ == "__main__":
settings = StockSetting()
spark = SparkSession.builder \
.appName("_Spark_makeSource") \
.master("yarn") \
.config('spark.submit.pyFiles', '/work/dev/stock/_Setting.py') \
.getOrCreate()
# .appName("_Spark_makeSource_DailyC") \
# .getOrCreate()
# conf = SparkConf()
# conf.set("spark.default.parallelism","15")
sc = spark.sparkContext
pro = ts.pro_api(settings.tushareKey)
# data_local = pro.stock_basic(exchange='', list_status='L',market='主板')
# data_hadop = spark.createDataFrame(data_local)
savepath = settings.datasource_path+"stock_basic_main"
# data_hadop.write.mode("overwrite").format("json").save(savepath)
data_hadop = spark.read.format("json").load(savepath)
#Debug
#data_hadop.show()
def getdailydata(item):
try:
savepath = settings.datasource_moneyflow_path+item.ts_code
if(settings.file_exists(sc,savepath) == False):
print("moneyflow",item.ts_code)
data_daily_local = pro.moneyflow(ts_code=item.ts_code )
if data_daily_local.empty :
return
data_daily_hadop = spark.createDataFrame(data_daily_local)
data_daily_hadop.write.mode("overwrite").format("json").save(savepath)
except Exception as e:
print(item.ts_code,"moneyflow",e)
#data_hadop.foreach(getdailydata)
stock_list = data_hadop.collect()
for item in stock_list:
getdailydata(item)
print("Finished")
| 35.872727
| 107
| 0.657375
|
a5513efdfc8f1ec2e31f933562c3afc866a8dd62
| 1,180
|
py
|
Python
|
thrift/compiler/py/setup.py
|
jesboat/fbthrift
|
7d8e1dcec59024e526e6768d3d4a66f6c4abe5ac
|
[
"Apache-2.0"
] | 5
|
2015-11-23T00:26:06.000Z
|
2020-07-31T12:56:08.000Z
|
thrift/compiler/py/setup.py
|
jesboat/fbthrift
|
7d8e1dcec59024e526e6768d3d4a66f6c4abe5ac
|
[
"Apache-2.0"
] | 2
|
2017-05-10T15:43:34.000Z
|
2018-01-04T22:36:04.000Z
|
thrift/compiler/py/setup.py
|
jesboat/fbthrift
|
7d8e1dcec59024e526e6768d3d4a66f6c4abe5ac
|
[
"Apache-2.0"
] | 7
|
2017-09-01T01:30:25.000Z
|
2019-02-04T17:46:24.000Z
|
#!/usr/bin/env python
import sys
import shutil
try:
from setuptools import setup, Extension
except:
from distutils.core import setup, Extension, Command
def run_setup():
if sys.argv[1] == 'build':
shutil.copy('.libs/frontend.so', 'frontend.so')
setup(name = 'thrift-py',
version = '0.9.0',
description = 'Thrift python compiler',
author = ['Thrift Developers'],
author_email = ['dev@thrift.apache.org'],
url = 'http://thrift.apache.org',
license = 'Apache License 2.0',
packages = [
'thrift_compiler',
'thrift_compiler.generate',
],
package_dir = {'thrift_compiler' : '.'},
package_data = {'thrift_compiler':['frontend.so']},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking'
],
zip_safe = False,
)
run_setup()
| 29.5
| 59
| 0.561864
|
b8d4e9959fb21ceacb071fcf88fad10f793901d6
| 2,529
|
py
|
Python
|
homework/Testing with Examples (Data Format)/impl-temperature04.py
|
rvprasad/software-testing-course
|
3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0
|
[
"CC-BY-4.0"
] | 11
|
2018-02-08T05:23:28.000Z
|
2021-05-24T13:23:56.000Z
|
homework/Testing with Examples (Data Format)/impl-temperature04.py
|
rvprasad/software-testing-course
|
3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0
|
[
"CC-BY-4.0"
] | null | null | null |
homework/Testing with Examples (Data Format)/impl-temperature04.py
|
rvprasad/software-testing-course
|
3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0
|
[
"CC-BY-4.0"
] | 2
|
2020-09-15T08:51:22.000Z
|
2021-01-26T12:07:18.000Z
|
import re
class PhysicalInfo(object):
def set_date(self, date):
if not isinstance(date, str):
raise ValueError("date should be a string")
t = date.split("-")
if len(t) != 3:
raise ValueError("date should be in MM-DD-YYYY format")
if re.search(r'[^0-9\-]', date):
raise ValueError("date should contain only numbers and -")
year = int(t[2])
if year < 1900 or year > 2100:
raise ValueError("invalid year {0}".format(year))
is_leap = year % 4 == 0 and (year % 400 == 0 or year % 100 != 0)
month = int(t[0])
if month < 1 or month > 12:
raise ValueError("invalid month {0}".format(month))
day_limit = 31
if month in [4, 6, 7, 9, 11]:
day_limit = 30
elif month == 2:
if is_leap:
day_limit = 29
else:
day_limit = 28
day = int(t[1])
if day < 1 or day > day_limit:
raise ValueError("invalid day {0}".format(day))
self.date = date
def set_name(self, name):
if not isinstance(name, str):
raise ValueError("name should be a string")
tmp1 = name.lower()
if re.search(r'[^a-z0-9 -]', tmp1):
raise ValueError("name should contain letters, numbers, -, and space")
if len(tmp1.strip()) < 2 or len(tmp1.replace("-", '')) < 2:
raise ValueError("name should be at least two characters long")
if not re.search(r'[a-z]', tmp1):
raise ValueError("name should contain at least one character")
self.name = name
def set_gender(self, gender):
if gender != 'M' and gender != 'F':
raise ValueError("gender should be either M or F")
self.gender = gender
def set_height(self, height):
if not isinstance(height, int):
raise ValueError("height should be an integer")
if height < 17 or height > 84:
raise ValueError("height should be an integer between 17 and 84")
self.height = height
def set_temperature(self, temperature):
if not isinstance(temperature, float):
raise ValueError("temperature should be a float")
#if temperature < 95 or temperature > 104:
if temperature > 104:
raise ValueError("temperature should be a float between 95 and 104")
self.temperature = temperature
| 37.191176
| 82
| 0.546066
|
625ed42d7399daf03766381b06d76d1bdd3cc8e0
| 4,586
|
py
|
Python
|
src/CADRE/parameters.py
|
JustinSGray/OpenMDAO-CADRE
|
d8378a8a571179990531d8a409efe727cbdf2bb7
|
[
"Apache-2.0"
] | 1
|
2021-07-11T19:15:22.000Z
|
2021-07-11T19:15:22.000Z
|
src/CADRE/parameters.py
|
JustinSGray/OpenMDAO-CADRE
|
d8378a8a571179990531d8a409efe727cbdf2bb7
|
[
"Apache-2.0"
] | null | null | null |
src/CADRE/parameters.py
|
JustinSGray/OpenMDAO-CADRE
|
d8378a8a571179990531d8a409efe727cbdf2bb7
|
[
"Apache-2.0"
] | 1
|
2015-11-19T18:18:01.000Z
|
2015-11-19T18:18:01.000Z
|
''' Bspline module for CADRE '''
from openmdao.main.api import Component
from openmdao.lib.datatypes.api import Float, Array
from MBI.MBI import MBI
import numpy as np
class BsplineParameters(Component):
'''Creates a Bspline interpolant for several CADRE variables
so that their time histories can be shaped with m control points
instead of n time points.'''
def __init__(self, n, m):
super(BsplineParameters, self).__init__()
self.n = n
self.m = m
self.add('t1', Float(0.,
units='s',
desc='Start time',
iotype='in'))
self.add('t2', Float(43200.,
units='s',
desc='End time',
iotype='in'))
self.B = MBI(np.zeros(n),
[np.linspace(self.t1,self.t2,n)], [self.m], [4]).getJacobian(0,0)
self.Bdot = MBI(np.zeros(n),
[np.linspace(self.t1,self.t2,n)], [self.m], [4]).getJacobian(1,0)
self.BT = self.B.transpose()
self.BdotT = self.Bdot.transpose()
self.add('CP_P_comm', Array(np.zeros((self.m,)),
size=(self.m,),
dtype=float,
units='W',
desc='Communication power at the control points',
iotype='in'))
self.add('CP_gamma', Array(np.zeros((self.m,)),
size=(self.m,),
dtype=float,
units='rad',
desc='Satellite roll angle at control points',
iotype='in'))
self.add('CP_Isetpt', Array(np.zeros((12,self.m)),
size=(12,self.m),
dtype=float,
units='A',
desc='Currents of the solar panels at the control points',
iotype='in'))
self.add('P_comm', Array(np.ones((n,)),
size=(n,), dtype=float,
units='W',
desc='Communication power over time',
iotype='out'))
self.add('Gamma', Array(0.1*np.ones((n,)),
size=(n,),
dtype=float,
units='rad',
desc='Satellite roll angle over time',
iotype='out'))
self.add('Isetpt',Array(0.2*np.ones((12,n)),
size=(12,n),
dtype=float,
units="A",
desc="Currents of the solar panels over time",
iotype='out'))
def list_deriv_vars(self):
input_keys = ('CP_P_comm', 'CP_gamma', 'CP_Isetpt')
output_keys = ('P_comm', 'Gamma', 'Isetpt')
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives (i.e., Jacobian). """
# Derivatives are simple
return
def execute(self):
""" Calculate output. """
self.P_comm = self.B.dot(self.CP_P_comm)
self.Gamma = self.B.dot(self.CP_gamma)
for k in range(12):
self.Isetpt[k, :] = self.B.dot(self.CP_Isetpt[k, :])
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian """
if 'CP_P_comm' in arg:
result['P_comm'] += self.B.dot(arg['CP_P_comm'])
if 'CP_gamma' in arg:
result['Gamma'] += self.B.dot(arg['CP_gamma'])
if 'CP_Isetpt' in arg:
for k in range(12):
result['Isetpt'][k, :] += self.B.dot(arg['CP_Isetpt'][k, :])
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian """
if 'P_comm' in arg and 'CP_P_comm' in result:
result['CP_P_comm'] += self.BT.dot(arg['P_comm'])
if 'Gamma' in arg and 'CP_gamma' in result:
result['CP_gamma'] += self.BT.dot(arg['Gamma'])
if 'Isetpt' in arg and 'CP_Isetpt' in result:
for k in range(12):
result['CP_Isetpt'][k, :] += self.BT.dot(arg['Isetpt'][k, :])
| 36.688
| 94
| 0.441343
|
5de86698fbf5992b1c327a7a3e37baafc94f4b70
| 5,901
|
py
|
Python
|
service/auth/api.py
|
alan-turing-institute/science-gateway-counter
|
f27c5ad426f1808a81e7a531ed56be341e7ef683
|
[
"MIT"
] | null | null | null |
service/auth/api.py
|
alan-turing-institute/science-gateway-counter
|
f27c5ad426f1808a81e7a531ed56be341e7ef683
|
[
"MIT"
] | null | null | null |
service/auth/api.py
|
alan-turing-institute/science-gateway-counter
|
f27c5ad426f1808a81e7a531ed56be341e7ef683
|
[
"MIT"
] | null | null | null |
from flask_restful import Resource, abort
from flask import request, make_response, jsonify
from service.database import db, bcrypt
from service.models import Organisation, User
DEFAULT_ORGANISATION_CREDIT = 400
DEFAULT_USER_CREDIT = 50
DEFAULT_ORGANISATION_NAME = 'Industry User'
class CounterApi(Resource):
"""
Counter Resource
"""
def get(self):
# extract the auth token
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
responseObject = {
'status': 'fail',
'message': 'Bearer token malformed.'
}
return make_response(jsonify(responseObject), 401)
else:
auth_token = ''
if auth_token:
resp = Organisation.decode_auth_token(auth_token)
user_id = resp
# fetch organisation information
organisation = Organisation.query.first()
if organisation:
organisation_credit = organisation.credit
organisation_tally = organisation.tally
else:
organisation_credit = DEFAULT_ORGANISATION_CREDIT
organisation_tally = 0
# fetch user information
user = db.session.query(User).filter_by(id=user_id).first()
if user is None:
user_credit = DEFAULT_USER_CREDIT
user_tally = 0
else:
user_credit = user.credit
user_tally = user.tally
responseObject = {
'status': 'success',
'message': 'user credit',
'organisation':{
'credit': organisation_credit,
'tally': organisation_tally,
},
'user':{
'credit': user_credit,
'tally': user_tally,
}
}
return make_response(jsonify(responseObject), 200)
else:
responseObject = {
'status': 'fail',
'message': 'Provide a valid auth token.'
}
return make_response(jsonify(responseObject), 401)
def post(self):
# extract the auth token
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
responseObject = {
'status': 'fail',
'message': 'Bearer token malformed.'
}
return make_response(jsonify(responseObject), 401)
else:
auth_token = ''
if auth_token:
resp = Organisation.decode_auth_token(auth_token)
if not isinstance(resp, str):
user_id = resp
organisation = Organisation.query.first()
# create the counter if we need to
if organisation is None:
organisation = Organisation(
name=DEFAULT_ORGANISATION_NAME,
credit=DEFAULT_ORGANISATION_CREDIT)
# create the user if we need to
user = db.session.query(User).filter_by(id=user_id).first()
if user is None:
user = User(organisation=organisation, credit=DEFAULT_USER_CREDIT)
if organisation.credit < 1 or user.credit < 1:
responseObject = {
'status': 'error',
'message': 'insufficient organisation credit remaining',
'organisation': {
'name': organisation.name,
'tally': organisation.tally,
'credit': organisation.credit
},
'user': {
'id': user_id,
'credit': user.credit,
'tally': user.tally,
}
}
return make_response(jsonify(responseObject), 200)
else:
# decrement credit
organisation.credit = organisation.credit - 1
user.credit = user.credit - 1
# increment tally
organisation.tally = organisation.tally + 1
user.tally = user.tally + 1
db.session.add(organisation)
db.session.add(user)
db.session.commit()
# TODO re-query to check persistence of updated fields
responseObject = {
'status': 'success',
'message': 'updated tally',
'organisation': {
'name': organisation.name,
'tally': organisation.tally,
'credit': organisation.credit
},
'user': {
'id': user_id,
'credit': user.credit,
'tally': user.tally,
}
}
return make_response(jsonify(responseObject), 200)
responseObject = {
'status': 'fail',
'message': resp
}
return make_response(jsonify(responseObject), 401)
else:
responseObject = {
'status': 'fail',
'message': 'Provide a valid auth token.'
}
return make_response(jsonify(responseObject), 401)
| 35.548193
| 86
| 0.469581
|
d17534ec4b4437f3848a1542dc81f17c37278d56
| 2,289
|
py
|
Python
|
tools/rvm.py
|
fyviezhao/Anim-NeRF
|
65f59a7993093e6530c05c0c47842f6f7866d7c4
|
[
"MIT"
] | 1
|
2022-03-28T09:30:22.000Z
|
2022-03-28T09:30:22.000Z
|
tools/rvm.py
|
fyviezhao/Anim-NeRF
|
65f59a7993093e6530c05c0c47842f6f7866d7c4
|
[
"MIT"
] | null | null | null |
tools/rvm.py
|
fyviezhao/Anim-NeRF
|
65f59a7993093e6530c05c0c47842f6f7866d7c4
|
[
"MIT"
] | null | null | null |
import os
import cv2
import pickle
import shutil
import argparse
import torch
import numpy as np
from tqdm import tqdm
import sys
sys.path.append('third_party/RobustVideoMatting')
from model import MattingNetwork
from inference_utils import ImageSequenceReader, ImageSequenceWriter
device = torch.device('cuda:0')
EXTS = ['jpg', 'jpeg', 'png']
def main(args):
segmentor = MattingNetwork(variant='resnet50').eval().to(device)
segmentor.load_state_dict(torch.load(args.ckpt_path))
images_folder = args.images_folder
output_folder = args.output_folder
frame_IDs = os.listdir(images_folder)
frame_IDs = [id.split('.')[0] for id in frame_IDs if id.split('.')[-1] in EXTS]
frame_IDs.sort()
frame_IDs = frame_IDs[:4][::-1] + frame_IDs
rec = [None] * 4 # Initial recurrent
downsample_ratio = 1.0 # Adjust based on your video.
for i in tqdm(range(len(frame_IDs))):
frame_ID = frame_IDs[i]
img_path = os.path.join(images_folder, '{}.png'.format(frame_ID))
img_masked_path = os.path.join(output_folder, '{}.png'.format(frame_ID))
img = cv2.imread(img_path)
src = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
src = torch.from_numpy(src).float() / 255.
src = src.permute(2, 0, 1).unsqueeze(0)
with torch.no_grad():
fgr, pha, *rec = segmentor(src.to(device), *rec, downsample_ratio) # Cycle the recurrent states.
pha = pha.permute(0, 2, 3, 1).cpu().numpy().squeeze(0)
mask = (pha > 0.5).astype(np.int32)
mask = (mask * 255).astype(np.uint8)
img_masked = np.concatenate([img, mask], axis=-1)
cv2.imwrite(img_masked_path, img_masked)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--images_folder', type=str,
help='the images folder for segmentation')
parser.add_argument('--output_folder', type=str,
help='the output folder to save results')
parser.add_argument('--ckpt_path', type=str, default='third_party/RobustVideoMatting/checkpoints/rvm_resnet50.pth',
help='the checkpoints for rvm')
args = parser.parse_args()
main(args)
| 35.765625
| 119
| 0.637396
|
676f0f3a778530b225a64817daa14cce75bcf4cf
| 317
|
py
|
Python
|
authentication/forms.py
|
RAGNAROSaa/-
|
833688d556ecc70570a9b464160271ace07380d9
|
[
"Apache-2.0"
] | 5
|
2016-09-25T02:59:13.000Z
|
2018-07-18T05:20:58.000Z
|
authentication/forms.py
|
RAGNAROSaa/-
|
833688d556ecc70570a9b464160271ace07380d9
|
[
"Apache-2.0"
] | 1
|
2016-12-01T01:11:53.000Z
|
2016-12-01T01:11:53.000Z
|
authentication/forms.py
|
RAGNAROSaa/-
|
833688d556ecc70570a9b464160271ace07380d9
|
[
"Apache-2.0"
] | 6
|
2016-09-24T02:42:57.000Z
|
2016-11-10T13:35:13.000Z
|
from django import forms
from django.contrib.auth import authenticate
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField()
def login(self):
user = authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])
return user
| 26.416667
| 107
| 0.722397
|
67d74cb95423d800e53a6caf33db67e848c0e12a
| 6,229
|
py
|
Python
|
w3testrunner/third_party/talos/ffprocess_linux.py
|
formido/browsercontrol
|
a4259cf239cdfe439e37ac13c2b7b4329c42198b
|
[
"BSD-3-Clause"
] | null | null | null |
w3testrunner/third_party/talos/ffprocess_linux.py
|
formido/browsercontrol
|
a4259cf239cdfe439e37ac13c2b7b4329c42198b
|
[
"BSD-3-Clause"
] | null | null | null |
w3testrunner/third_party/talos/ffprocess_linux.py
|
formido/browsercontrol
|
a4259cf239cdfe439e37ac13c2b7b4329c42198b
|
[
"BSD-3-Clause"
] | null | null | null |
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is standalone Firefox Windows performance test.
#
# The Initial Developer of the Original Code is Google Inc.
# Portions created by the Initial Developer are Copyright (C) 2006
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Annie Sullivan <annie.sullivan@gmail.com> (original author)
# Ben Hearsum <bhearsum@wittydomain.com> (OS independence)
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import subprocess
import signal
import os
from select import select
import time
def GenerateFirefoxCommandLine(firefox_path, profile_dir, url):
"""Generates the command line for a process to run Firefox
Args:
firefox_path: String containing the path to the firefox exe to use
profile_dir: String containing the directory of the profile to run Firefox in
url: String containing url to start with.
"""
profile_arg = ''
if profile_dir:
profile_arg = '-profile %s' % profile_dir
cmd = '%s %s %s' % (firefox_path,
profile_arg,
url)
return cmd
def GetPidsByName(process_name):
"""Searches for processes containing a given string.
This function is UNIX specific.
Args:
process_name: The string to be searched for
Returns:
A list of PIDs containing the string. An empty list is returned if none are
found.
"""
# BT: new implementation using ps -C in order to filter better.
ps_output = subprocess.Popen(('ps', '--no-headers', '-o', 'pid,cmd',
'-C', process_name),
stdout=subprocess.PIPE).communicate()[0]
pids = []
for line in ps_output.splitlines():
line = line.strip()
pid, cmd = line.split(" ", 1)
if "<defunct>" in cmd:
continue
pids.append(int(pid))
return pids
# BT: original implementation, unused:
matchingPids = []
command = ['ps', 'ax']
handle = subprocess.Popen(command, stdout=subprocess.PIPE)
# wait for the process to terminate
handle.wait()
data = handle.stdout.read()
# find all matching processes and add them to the list
for line in data.splitlines():
if line.find(process_name) >= 0:
# splits by whitespace, the first one should be the pid
pid = int(line.split()[0])
matchingPids.append(pid)
return matchingPids
def ProcessesWithNameExist(*process_names):
"""Returns true if there are any processes running with the
given name. Useful to check whether a Firefox process is still running
Args:
process_names: String or strings containing the process name, i.e. "firefox"
Returns:
True if any processes with that name are running, False otherwise.
"""
for process_name in process_names:
pids = GetPidsByName(process_name)
if len(pids) > 0:
return True
return False
# BT: new function
def ProcessWithPidExists(pid):
lines = subprocess.Popen(('ps', '--no-headers', '-o', 'pid', 'ax'),
stdout=subprocess.PIPE).communicate()[0]
return pid in [int(pid) for pid in lines.split()]
def TerminateProcess(pid):
"""Helper function to terminate a process, given the pid
Args:
pid: integer process id of the process to terminate.
"""
try:
# BT: use ProcessWithPidExists instead of ProcessesWithNameExist
if ProcessWithPidExists(pid):
os.kill(pid, signal.SIGTERM)
# BT: lowered the delay
time.sleep(2)
if ProcessWithPidExists(pid):
os.kill(pid, signal.SIGKILL)
except OSError, (errno, strerror):
print 'WARNING: failed os.kill: %s : %s' % (errno, strerror)
def TerminateAllProcesses(*process_names):
"""Helper function to terminate all processes with the given process name
Args:
process_names: String or strings containing the process name, i.e. "firefox"
"""
# Get all the process ids of running instances of this process,
# and terminate them
for process_name in process_names:
pids = GetPidsByName(process_name)
for pid in pids:
TerminateProcess(pid)
def NonBlockingReadProcessOutput(handle):
"""Does a non-blocking read from the output of the process
with the given handle.
Args:
handle: The process handle returned from os.popen()
Returns:
A tuple (bytes, output) containing the number of output
bytes read, and the actual output.
"""
output = ""
num_avail = 0
# check for data
# select() does not seem to work well with pipes.
# after data is available once it *always* thinks there is data available
# readline() will continue to return an empty string however
# so we can use this behavior to work around the problem
while select([handle], [], [], 0)[0]:
line = handle.readline()
if line:
output += line
else:
break
# this statement is true for encodings that have 1byte/char
num_avail = len(output)
return (num_avail, output)
| 32.108247
| 81
| 0.69353
|
c7016dcb800e6d01ef6ec50b9aca62be58228d0b
| 1,628
|
py
|
Python
|
PythonLib/solvers/uflp.py
|
xNok/OR_NETWORK-AND-DISCRETE-LOCATION
|
0941e09a5a09322fe0aceca631c44f3288a74fe2
|
[
"MIT"
] | null | null | null |
PythonLib/solvers/uflp.py
|
xNok/OR_NETWORK-AND-DISCRETE-LOCATION
|
0941e09a5a09322fe0aceca631c44f3288a74fe2
|
[
"MIT"
] | null | null | null |
PythonLib/solvers/uflp.py
|
xNok/OR_NETWORK-AND-DISCRETE-LOCATION
|
0941e09a5a09322fe0aceca631c44f3288a74fe2
|
[
"MIT"
] | null | null | null |
from docplex.mp.model import Model
def uflp(I, J, f, c, name='UFLP'):
"""
Inputs:
I = Set of customers (1d-array)
J = Set of Facilities (1d-array)
f = Fixed cost associated to each facility (1d-array)
c = Cost of connecting element of I with J (2d-arra)
Ouputs:
m = cplex Model Object
X = decision variables related to the routing
Y = decision variablse related to open facilities
"""
###################
# create one model instance
m = Model(name=name)
###################
# Define variables
# x(i,j) equals 1 if arc ij is in the solution
X = m.binary_var_dict([(i,j)
for i in I
for j in J], name="X")
# y(j) equales 1 if node j is in the solution
Y = m.binary_var_dict([(j)
for j in J], name="Y")
###################
# Define Objective
m.minimize(m.sum(X[i,j] * c[i][j] for i in I for j in J) \
+ m.sum(Y[j] * f[j] for j in J))
m.add_kpi(m.sum(X[i,j] * c[i][j] for i in I for j in J), "transportation cost")
m.add_kpi(m.sum(Y[j] * f[j] for j in J), "fixed cost")
###################
# Define constraints
# constraint #1: each customer is affected to a facility
for i in I:
m.add_constraint(m.sum(X[i,j] for j in J) == 1, ctname='demande_%s' % i)
# constraint #2: customer can only be associated to open facilities
for i in I:
for j in J:
m.add_constraint(X[i,j] <= Y[j], ctname='flow_%s_%s' % (i,j))
return m, X, Y
| 31.921569
| 83
| 0.512285
|
86ec11d4a486f6d91bee29476d2b3777a70234a6
| 5,856
|
py
|
Python
|
dataset/voc.py
|
francescodisalvo05/MiB
|
01faa3e62b20c5629da0e4b5bed902ea76a6aaa3
|
[
"MIT"
] | null | null | null |
dataset/voc.py
|
francescodisalvo05/MiB
|
01faa3e62b20c5629da0e4b5bed902ea76a6aaa3
|
[
"MIT"
] | null | null | null |
dataset/voc.py
|
francescodisalvo05/MiB
|
01faa3e62b20c5629da0e4b5bed902ea76a6aaa3
|
[
"MIT"
] | null | null | null |
import os
import random
import torch.utils.data as data
from torch import distributed
import torchvision as tv
import numpy as np
from .utils import Subset, filter_images, group_images
from PIL import Image
classes = {
0: 'background',
1: 'aeroplane',
2: 'bicycle',
3: 'bird',
4: 'boat',
5: 'bottle',
6: 'bus',
7: 'car',
8: 'cat',
9: 'chair',
10: 'cow',
11: 'diningtable',
12: 'dog',
13: 'horse',
14: 'motorbike',
15: 'person',
16: 'pottedplant',
17: 'sheep',
18: 'sofa',
19: 'train',
20: 'tvmonitor'
}
class VOCSegmentation(data.Dataset):
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset.
Args:
root (string): Root directory of the VOC Dataset.
image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val``
is_aug (bool, optional): If you want to use the augmented train set or not (default is True)
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
"""
def __init__(self,
root,
image_set='train',
is_aug=True,
transform=None):
self.root = os.path.expanduser(root)
self.year = "2012"
self.transform = transform
self.image_set = image_set
base_dir = "PascalVOC12"
voc_root = os.path.join(self.root, base_dir)
splits_dir = os.path.join(voc_root, 'splits')
if not os.path.isdir(voc_root):
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if is_aug and image_set == 'train':
mask_dir = os.path.join(voc_root, 'SegmentationClassAug')
assert os.path.exists(
mask_dir), "SegmentationClassAug not found"
split_f = os.path.join(splits_dir, 'train_aug.txt')
else:
split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt')
if not os.path.exists(split_f):
raise ValueError(
'Wrong image_set entered! Please use image_set="train" '
'or image_set="trainval" or image_set="val"')
# remove leading \n
with open(os.path.join(split_f), "r") as f:
file_names = [x[:-1].split(' ') for x in f.readlines()]
# REMOVE FIRST SLASH OTHERWISE THE JOIN WILL start from root
self.images = [(os.path.join(voc_root, x[0][1:]), os.path.join(voc_root, x[1][1:])) for x in file_names]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the image segmentation.
"""
img = Image.open(self.images[index][0]).convert('RGB')
target = Image.open(self.images[index][1])
if self.transform is not None:
img, target = self.transform(img, target)
return img, target
def __len__(self):
return len(self.images)
class VOCSegmentationIncremental(data.Dataset):
def __init__(self,
root,
train=True,
transform=None,
labels=None,
labels_old=None,
idxs_path=None,
masking=True,
overlap=True):
full_voc = VOCSegmentation(root, 'train' if train else 'val', is_aug=True, transform=None)
self.labels = []
self.labels_old = []
if labels is not None:
# store the labels
labels_old = labels_old if labels_old is not None else []
self.__strip_zero(labels)
self.__strip_zero(labels_old)
assert not any(l in labels_old for l in labels), "labels and labels_old must be disjoint sets"
self.labels = [0] + labels
self.labels_old = [0] + labels_old
self.order = [0] + labels_old + labels
# take index of images with at least one class in labels and all classes in labels+labels_old+[0,255]
if idxs_path is not None and os.path.exists(idxs_path):
idxs = np.load(idxs_path).tolist()
else:
idxs = filter_images(full_voc, labels, labels_old, overlap=overlap)
if idxs_path is not None:
np.save(idxs_path, np.array(idxs, dtype=int))
if train:
masking_value = 0
else:
masking_value = 255
self.inverted_order = {label: self.order.index(label) for label in self.order}
self.inverted_order[255] = masking_value
reorder_transform = tv.transforms.Lambda(
lambda t: t.apply_(lambda x: self.inverted_order[x] if x in self.inverted_order else masking_value))
if masking:
tmp_labels = self.labels + [255]
target_transform = tv.transforms.Lambda(
lambda t: t.apply_(lambda x: self.inverted_order[x] if x in tmp_labels else masking_value))
else:
target_transform = reorder_transform
# make the subset of the dataset
self.dataset = Subset(full_voc, idxs, transform, target_transform)
else:
self.dataset = full_voc
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the image segmentation.
"""
return self.dataset[index]
def __len__(self):
return len(self.dataset)
@staticmethod
def __strip_zero(labels):
while 0 in labels:
labels.remove(0)
| 32.353591
| 116
| 0.569672
|
e4890933ee2a8d1b92ede30172b2815b6173624f
| 3,342
|
py
|
Python
|
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_get_words_on_document_line_async.py
|
mrwbarg/azure-sdk-for-python
|
ecfd1093cd623040d1359444d76ac0b57a786f63
|
[
"MIT"
] | 2
|
2021-09-07T18:30:33.000Z
|
2021-11-23T02:50:57.000Z
|
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_get_words_on_document_line_async.py
|
mrwbarg/azure-sdk-for-python
|
ecfd1093cd623040d1359444d76ac0b57a786f63
|
[
"MIT"
] | 4
|
2021-10-06T16:39:52.000Z
|
2021-11-18T18:33:37.000Z
|
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_get_words_on_document_line_async.py
|
mrwbarg/azure-sdk-for-python
|
ecfd1093cd623040d1359444d76ac0b57a786f63
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_get_words_on_document_line_async.py
DESCRIPTION:
This sample demonstrates how to get the words contained in a DocumentLine.
Please note that `get_words` on DocumentLine is only available in SDK version
3.2.0b2 and later.
USAGE:
python sample_get_words_on_document_line_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
import asyncio
def format_bounding_region(bounding_regions):
if not bounding_regions:
return "N/A"
return ", ".join("Page #{}: {}".format(region.page_number, format_bounding_box(region.bounding_box)) for region in bounding_regions)
def format_bounding_box(bounding_box):
if not bounding_box:
return "N/A"
return ", ".join(["[{}, {}]".format(p.x, p.y) for p in bounding_box])
async def get_words_on_document_line_async():
path_to_sample_documents = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
"..",
"..",
"..",
"./sample_forms/forms/Form_1.jpg",
)
)
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentAnalysisClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with document_analysis_client:
with open(path_to_sample_documents, "rb") as f:
poller = await document_analysis_client.begin_analyze_document(
"prebuilt-document", document=f
)
result = await poller.result()
for idx, page in enumerate(result.pages):
print("----Analyzing lines and words from page #{}----".format(idx + 1))
print(
"Page has width: {} and height: {}, measured with unit: {}".format(
page.width, page.height, page.unit
)
)
for line_idx, line in enumerate(page.lines):
words = line.get_words()
print(
"...Line # {} has word count {} and text '{}' within bounding box '{}'".format(
line_idx,
len(words),
line.content,
format_bounding_box(line.bounding_box),
)
)
for word in words:
print(
"......Word '{}' has a confidence of {}".format(
word.content, word.confidence
)
)
print("----------------------------------------")
async def main():
await get_words_on_document_line_async()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 32.764706
| 136
| 0.585877
|
93e48f7d9df603bafdddc5903766549487355f3c
| 9,845
|
py
|
Python
|
powerdns_client/models/cryptokey.py
|
nrfta/python-powerdns-client
|
57dd0460995a5407c6f5c963553b4df0f4859667
|
[
"MIT"
] | 1
|
2021-04-05T21:37:17.000Z
|
2021-04-05T21:37:17.000Z
|
powerdns_client/models/cryptokey.py
|
nrfta/python-powerdns-client
|
57dd0460995a5407c6f5c963553b4df0f4859667
|
[
"MIT"
] | null | null | null |
powerdns_client/models/cryptokey.py
|
nrfta/python-powerdns-client
|
57dd0460995a5407c6f5c963553b4df0f4859667
|
[
"MIT"
] | 1
|
2021-12-18T04:33:58.000Z
|
2021-12-18T04:33:58.000Z
|
# coding: utf-8
"""
PowerDNS Authoritative HTTP API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.0.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Cryptokey(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'id': 'int',
'keytype': 'str',
'active': 'bool',
'published': 'bool',
'dnskey': 'str',
'ds': 'list[str]',
'privatekey': 'str',
'algorithm': 'str',
'bits': 'int'
}
attribute_map = {
'type': 'type',
'id': 'id',
'keytype': 'keytype',
'active': 'active',
'published': 'published',
'dnskey': 'dnskey',
'ds': 'ds',
'privatekey': 'privatekey',
'algorithm': 'algorithm',
'bits': 'bits'
}
def __init__(self, type=None, id=None, keytype=None, active=None, published=None, dnskey=None, ds=None, privatekey=None, algorithm=None, bits=None): # noqa: E501
"""Cryptokey - a model defined in Swagger""" # noqa: E501
self._type = None
self._id = None
self._keytype = None
self._active = None
self._published = None
self._dnskey = None
self._ds = None
self._privatekey = None
self._algorithm = None
self._bits = None
self.discriminator = None
if type is not None:
self.type = type
if id is not None:
self.id = id
if keytype is not None:
self.keytype = keytype
if active is not None:
self.active = active
if published is not None:
self.published = published
if dnskey is not None:
self.dnskey = dnskey
if ds is not None:
self.ds = ds
if privatekey is not None:
self.privatekey = privatekey
if algorithm is not None:
self.algorithm = algorithm
if bits is not None:
self.bits = bits
@property
def type(self):
"""Gets the type of this Cryptokey. # noqa: E501
set to \"Cryptokey\" # noqa: E501
:return: The type of this Cryptokey. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Cryptokey.
set to \"Cryptokey\" # noqa: E501
:param type: The type of this Cryptokey. # noqa: E501
:type: str
"""
self._type = type
@property
def id(self):
"""Gets the id of this Cryptokey. # noqa: E501
The internal identifier, read only # noqa: E501
:return: The id of this Cryptokey. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Cryptokey.
The internal identifier, read only # noqa: E501
:param id: The id of this Cryptokey. # noqa: E501
:type: int
"""
self._id = id
@property
def keytype(self):
"""Gets the keytype of this Cryptokey. # noqa: E501
:return: The keytype of this Cryptokey. # noqa: E501
:rtype: str
"""
return self._keytype
@keytype.setter
def keytype(self, keytype):
"""Sets the keytype of this Cryptokey.
:param keytype: The keytype of this Cryptokey. # noqa: E501
:type: str
"""
allowed_values = ["ksk", "zsk", "csk"] # noqa: E501
if keytype not in allowed_values:
raise ValueError(
"Invalid value for `keytype` ({0}), must be one of {1}" # noqa: E501
.format(keytype, allowed_values)
)
self._keytype = keytype
@property
def active(self):
"""Gets the active of this Cryptokey. # noqa: E501
Whether or not the key is in active use # noqa: E501
:return: The active of this Cryptokey. # noqa: E501
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this Cryptokey.
Whether or not the key is in active use # noqa: E501
:param active: The active of this Cryptokey. # noqa: E501
:type: bool
"""
self._active = active
@property
def published(self):
"""Gets the published of this Cryptokey. # noqa: E501
Whether or not the DNSKEY record is published in the zone # noqa: E501
:return: The published of this Cryptokey. # noqa: E501
:rtype: bool
"""
return self._published
@published.setter
def published(self, published):
"""Sets the published of this Cryptokey.
Whether or not the DNSKEY record is published in the zone # noqa: E501
:param published: The published of this Cryptokey. # noqa: E501
:type: bool
"""
self._published = published
@property
def dnskey(self):
"""Gets the dnskey of this Cryptokey. # noqa: E501
The DNSKEY record for this key # noqa: E501
:return: The dnskey of this Cryptokey. # noqa: E501
:rtype: str
"""
return self._dnskey
@dnskey.setter
def dnskey(self, dnskey):
"""Sets the dnskey of this Cryptokey.
The DNSKEY record for this key # noqa: E501
:param dnskey: The dnskey of this Cryptokey. # noqa: E501
:type: str
"""
self._dnskey = dnskey
@property
def ds(self):
"""Gets the ds of this Cryptokey. # noqa: E501
An array of DS records for this key # noqa: E501
:return: The ds of this Cryptokey. # noqa: E501
:rtype: list[str]
"""
return self._ds
@ds.setter
def ds(self, ds):
"""Sets the ds of this Cryptokey.
An array of DS records for this key # noqa: E501
:param ds: The ds of this Cryptokey. # noqa: E501
:type: list[str]
"""
self._ds = ds
@property
def privatekey(self):
"""Gets the privatekey of this Cryptokey. # noqa: E501
The private key in ISC format # noqa: E501
:return: The privatekey of this Cryptokey. # noqa: E501
:rtype: str
"""
return self._privatekey
@privatekey.setter
def privatekey(self, privatekey):
"""Sets the privatekey of this Cryptokey.
The private key in ISC format # noqa: E501
:param privatekey: The privatekey of this Cryptokey. # noqa: E501
:type: str
"""
self._privatekey = privatekey
@property
def algorithm(self):
"""Gets the algorithm of this Cryptokey. # noqa: E501
The name of the algorithm of the key, should be a mnemonic # noqa: E501
:return: The algorithm of this Cryptokey. # noqa: E501
:rtype: str
"""
return self._algorithm
@algorithm.setter
def algorithm(self, algorithm):
"""Sets the algorithm of this Cryptokey.
The name of the algorithm of the key, should be a mnemonic # noqa: E501
:param algorithm: The algorithm of this Cryptokey. # noqa: E501
:type: str
"""
self._algorithm = algorithm
@property
def bits(self):
"""Gets the bits of this Cryptokey. # noqa: E501
The size of the key # noqa: E501
:return: The bits of this Cryptokey. # noqa: E501
:rtype: int
"""
return self._bits
@bits.setter
def bits(self, bits):
"""Sets the bits of this Cryptokey.
The size of the key # noqa: E501
:param bits: The bits of this Cryptokey. # noqa: E501
:type: int
"""
self._bits = bits
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Cryptokey, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Cryptokey):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.323529
| 166
| 0.549518
|
b9f07a32840b99c95fe43dfb7009e598c69d89e4
| 44,774
|
py
|
Python
|
salt/modules/schedule.py
|
eiginn/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
salt/modules/schedule.py
|
eiginn/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
salt/modules/schedule.py
|
eiginn/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
Module for managing the Salt schedule on a minion
.. versionadded:: 2014.7.0
"""
import copy as pycopy
import datetime
import logging
import os
import salt.utils.event
import salt.utils.files
import salt.utils.odict
import salt.utils.yaml
try:
import dateutil.parser as dateutil_parser
_WHEN_SUPPORTED = True
_RANGE_SUPPORTED = True
except ImportError:
_WHEN_SUPPORTED = False
_RANGE_SUPPORTED = False
__proxyenabled__ = ["*"]
log = logging.getLogger(__name__)
__func_alias__ = {"list_": "list", "reload_": "reload"}
SCHEDULE_CONF = [
"name",
"maxrunning",
"function",
"splay",
"range",
"when",
"once",
"once_fmt",
"returner",
"jid_include",
"args",
"kwargs",
"_seconds",
"seconds",
"minutes",
"hours",
"days",
"enabled",
"return_job",
"metadata",
"cron",
"until",
"after",
"return_config",
"return_kwargs",
"run_on_start",
"skip_during_range",
"run_after_skip_range",
]
def list_(show_all=False, show_disabled=True, where=None, return_yaml=True):
"""
List the jobs currently scheduled on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.list
# Show all jobs including hidden internal jobs
salt '*' schedule.list show_all=True
# Hide disabled jobs from list of jobs
salt '*' schedule.list show_disabled=False
"""
schedule = {}
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](
{"func": "list", "where": where}, "manage_schedule"
)
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_list_complete", wait=30
)
if event_ret and event_ret["complete"]:
schedule = event_ret["schedule"]
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret = {}
ret["comment"] = "Event module not available. Schedule list failed."
ret["result"] = True
log.debug("Event module not available. Schedule list failed.")
return ret
_hidden = ["enabled", "skip_function", "skip_during_range"]
for job in list(schedule.keys()): # iterate over a copy since we will mutate it
if job in _hidden:
continue
# Default jobs added by salt begin with __
# by default hide them unless show_all is True.
if job.startswith("__") and not show_all:
del schedule[job]
continue
# if enabled is not included in the job,
# assume job is enabled.
if "enabled" not in schedule[job]:
schedule[job]["enabled"] = True
for item in pycopy.copy(schedule[job]):
if item not in SCHEDULE_CONF:
del schedule[job][item]
continue
if schedule[job][item] is None:
del schedule[job][item]
continue
if schedule[job][item] == "true":
schedule[job][item] = True
if schedule[job][item] == "false":
schedule[job][item] = False
# if the job is disabled and show_disabled is False, skip job
if not show_disabled and not schedule[job]["enabled"]:
del schedule[job]
continue
if "_seconds" in schedule[job]:
# remove _seconds from the listing
del schedule[job]["_seconds"]
if schedule:
if return_yaml:
tmp = {"schedule": schedule}
return salt.utils.yaml.safe_dump(tmp, default_flow_style=False)
else:
return schedule
else:
return {"schedule": {}}
def is_enabled(name=None):
"""
List a Job only if its enabled
If job is not specified, indicate
if the scheduler is enabled or disabled.
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' schedule.is_enabled name=job_name
salt '*' schedule.is_enabled
"""
current_schedule = __salt__["schedule.list"](show_all=False, return_yaml=False)
if not name:
return current_schedule.get("enabled", True)
else:
if name in current_schedule:
return current_schedule[name]
else:
return {}
def purge(**kwargs):
"""
Purge all the jobs currently scheduled on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.purge
"""
ret = {"comment": [], "result": True}
for name in list_(show_all=True, return_yaml=False):
if name == "enabled":
continue
if name.startswith("__"):
continue
if "test" in kwargs and kwargs["test"]:
ret["result"] = True
ret["comment"].append(
"Job: {} would be deleted from schedule.".format(name)
)
else:
persist = kwargs.get("persist", True)
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](
{"name": name, "func": "delete", "persist": persist},
"manage_schedule",
)
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_delete_complete", wait=30
)
if event_ret and event_ret["complete"]:
_schedule_ret = event_ret["schedule"]
if name not in _schedule_ret:
ret["result"] = True
ret["comment"].append(
"Deleted job: {} from schedule.".format(name)
)
else:
ret["comment"].append(
"Failed to delete job {} from schedule.".format(
name
)
)
ret["result"] = True
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret["comment"] = "Event module not available. Schedule add failed."
ret["result"] = True
return ret
def delete(name, **kwargs):
"""
Delete a job from the minion's schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.delete job1
"""
ret = {
"comment": "Failed to delete job {} from schedule.".format(name),
"result": False,
"changes": {},
}
if not name:
ret["comment"] = "Job name is required."
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Job: {} would be deleted from schedule.".format(name)
ret["result"] = True
else:
persist = kwargs.get("persist", True)
if name in list_(show_all=True, where="opts", return_yaml=False):
event_data = {"name": name, "func": "delete", "persist": persist}
elif name in list_(show_all=True, where="pillar", return_yaml=False):
event_data = {
"name": name,
"where": "pillar",
"func": "delete",
"persist": False,
}
else:
ret["comment"] = "Job {} does not exist.".format(name)
return ret
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](event_data, "manage_schedule")
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_delete_complete",
wait=30,
)
if event_ret and event_ret["complete"]:
schedule = event_ret["schedule"]
if name not in schedule:
ret["result"] = True
ret["comment"] = "Deleted Job {} from schedule.".format(
name
)
ret["changes"][name] = "removed"
else:
ret[
"comment"
] = "Failed to delete job {} from schedule.".format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret["comment"] = "Event module not available. Schedule add failed."
return ret
def build_schedule_item(name, **kwargs):
"""
Build a schedule job
CLI Example:
.. code-block:: bash
salt '*' schedule.build_schedule_item job1 function='test.ping' seconds=3600
"""
ret = {"comment": [], "result": True}
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
return ret
schedule = {}
schedule[name] = salt.utils.odict.OrderedDict()
schedule[name]["function"] = kwargs["function"]
time_conflict = False
for item in ["seconds", "minutes", "hours", "days"]:
if item in kwargs and "when" in kwargs:
time_conflict = True
if item in kwargs and "cron" in kwargs:
time_conflict = True
if time_conflict:
ret["result"] = False
ret["comment"] = (
'Unable to use "seconds", "minutes", "hours", or "days" with "when" or'
' "cron" options.'
)
return ret
if "when" in kwargs and "cron" in kwargs:
ret["result"] = False
ret["comment"] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
for item in ["seconds", "minutes", "hours", "days"]:
if item in kwargs:
schedule[name][item] = kwargs[item]
if "return_job" in kwargs:
schedule[name]["return_job"] = kwargs["return_job"]
if "metadata" in kwargs:
schedule[name]["metadata"] = kwargs["metadata"]
if "job_args" in kwargs:
schedule[name]["args"] = kwargs["job_args"]
if "job_kwargs" in kwargs:
schedule[name]["kwargs"] = kwargs["job_kwargs"]
if "maxrunning" in kwargs:
schedule[name]["maxrunning"] = kwargs["maxrunning"]
else:
schedule[name]["maxrunning"] = 1
if "name" in kwargs:
schedule[name]["name"] = kwargs["name"]
else:
schedule[name]["name"] = name
if "enabled" in kwargs:
schedule[name]["enabled"] = kwargs["enabled"]
else:
schedule[name]["enabled"] = True
if "jid_include" not in kwargs or kwargs["jid_include"]:
schedule[name]["jid_include"] = True
if "splay" in kwargs:
if isinstance(kwargs["splay"], dict):
# Ensure ordering of start and end arguments
schedule[name]["splay"] = salt.utils.odict.OrderedDict()
schedule[name]["splay"]["start"] = kwargs["splay"]["start"]
schedule[name]["splay"]["end"] = kwargs["splay"]["end"]
else:
schedule[name]["splay"] = kwargs["splay"]
if "when" in kwargs:
if not _WHEN_SUPPORTED:
ret["result"] = False
ret["comment"] = 'Missing dateutil.parser, "when" is unavailable.'
return ret
else:
validate_when = kwargs["when"]
if not isinstance(validate_when, list):
validate_when = [validate_when]
for _when in validate_when:
try:
dateutil_parser.parse(_when)
except ValueError:
ret["result"] = False
ret["comment"] = 'Schedule item {} for "when" in invalid.'.format(
_when
)
return ret
for item in [
"range",
"when",
"once",
"once_fmt",
"cron",
"returner",
"after",
"return_config",
"return_kwargs",
"until",
"run_on_start",
"skip_during_range",
]:
if item in kwargs:
schedule[name][item] = kwargs[item]
return schedule[name]
def add(name, **kwargs):
"""
Add a job to the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.add job1 function='test.ping' seconds=3600
# If function have some arguments, use job_args
salt '*' schedule.add job2 function='cmd.run' job_args="['date >> /tmp/date.log']" seconds=60
"""
ret = {
"comment": "Failed to add job {} to schedule.".format(name),
"result": False,
"changes": {},
}
if name in list_(show_all=True, return_yaml=False):
ret["comment"] = "Job {} already exists in schedule.".format(name)
ret["result"] = False
return ret
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
time_conflict = False
for item in ["seconds", "minutes", "hours", "days"]:
if item in kwargs and "when" in kwargs:
time_conflict = True
if item in kwargs and "cron" in kwargs:
time_conflict = True
if time_conflict:
ret["comment"] = (
'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when"'
' or "cron" options.'
)
return ret
if "when" in kwargs and "cron" in kwargs:
ret["comment"] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
persist = kwargs.get("persist", True)
_new = build_schedule_item(name, **kwargs)
if "result" in _new and not _new["result"]:
return _new
schedule_data = {}
schedule_data[name] = _new
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Job: {} would be added to schedule.".format(name)
ret["result"] = True
else:
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](
{
"name": name,
"schedule": schedule_data,
"func": "add",
"persist": persist,
},
"manage_schedule",
)
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_add_complete",
wait=30,
)
if event_ret and event_ret["complete"]:
schedule = event_ret["schedule"]
if name in schedule:
ret["result"] = True
ret["comment"] = "Added job: {} to schedule.".format(name)
ret["changes"][name] = "added"
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret["comment"] = "Event module not available. Schedule add failed."
return ret
def modify(name, **kwargs):
"""
Modify an existing job in the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.modify job1 function='test.ping' seconds=3600
"""
ret = {"comment": "", "changes": {}, "result": True}
time_conflict = False
for item in ["seconds", "minutes", "hours", "days"]:
if item in kwargs and "when" in kwargs:
time_conflict = True
if item in kwargs and "cron" in kwargs:
time_conflict = True
if time_conflict:
ret["result"] = False
ret["comment"] = (
'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when"'
" option."
)
return ret
if "when" in kwargs and "cron" in kwargs:
ret["result"] = False
ret["comment"] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
current_schedule = list_(show_all=True, return_yaml=False)
if name not in current_schedule:
ret["comment"] = "Job {} does not exist in schedule.".format(name)
ret["result"] = False
return ret
_current = current_schedule[name]
if "function" not in kwargs:
kwargs["function"] = _current.get("function")
# Remove the auto generated _seconds value
if "_seconds" in _current:
_current["seconds"] = _current.pop("_seconds")
# Copy _current _new, then update values from kwargs
_new = pycopy.deepcopy(_current)
_new.update(kwargs)
# Remove test from kwargs, it's not a valid schedule option
_new.pop("test", None)
if "result" in _new and not _new["result"]:
return _new
if _new == _current:
ret["comment"] = "Job {} in correct state".format(name)
return ret
ret["changes"][name] = {
"old": salt.utils.odict.OrderedDict(_current),
"new": salt.utils.odict.OrderedDict(_new),
}
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Job: {} would be modified in schedule.".format(name)
else:
persist = kwargs.get("persist", True)
if name in list_(show_all=True, where="opts", return_yaml=False):
event_data = {
"name": name,
"schedule": _new,
"func": "modify",
"persist": persist,
}
elif name in list_(show_all=True, where="pillar", return_yaml=False):
event_data = {
"name": name,
"schedule": _new,
"where": "pillar",
"func": "modify",
"persist": False,
}
out = __salt__["event.fire"](event_data, "manage_schedule")
if out:
ret["comment"] = "Modified job: {} in schedule.".format(name)
else:
ret["comment"] = "Failed to modify job {} in schedule.".format(name)
ret["result"] = False
return ret
def run_job(name, force=False):
"""
Run a scheduled job on the minion immediately
CLI Example:
.. code-block:: bash
salt '*' schedule.run_job job1
salt '*' schedule.run_job job1 force=True
Force the job to run even if it is disabled.
"""
ret = {"comment": [], "result": True}
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
schedule = list_(show_all=True, return_yaml=False)
if name in schedule:
data = schedule[name]
if "enabled" in data and not data["enabled"] and not force:
ret["comment"] = "Job {} is disabled.".format(name)
else:
out = __salt__["event.fire"](
{"name": name, "func": "run_job"}, "manage_schedule"
)
if out:
ret["comment"] = "Scheduling Job {} on minion.".format(name)
else:
ret["comment"] = "Failed to run job {} on minion.".format(name)
ret["result"] = False
else:
ret["comment"] = "Job {} does not exist.".format(name)
ret["result"] = False
return ret
def enable_job(name, **kwargs):
"""
Enable a job in the minion's schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.enable_job job1
"""
ret = {"comment": [], "result": True, "changes": {}}
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
if "test" in __opts__ and __opts__["test"]:
ret["comment"] = "Job: {} would be enabled in schedule.".format(name)
else:
persist = kwargs.get("persist", True)
if name in list_(show_all=True, where="opts", return_yaml=False):
event_data = {"name": name, "func": "enable_job", "persist": persist}
elif name in list_(show_all=True, where="pillar", return_yaml=False):
event_data = {
"name": name,
"where": "pillar",
"func": "enable_job",
"persist": False,
}
else:
ret["comment"] = "Job {} does not exist.".format(name)
ret["result"] = False
return ret
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](event_data, "manage_schedule")
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_enabled_job_complete",
wait=30,
)
if event_ret and event_ret["complete"]:
schedule = event_ret["schedule"]
# check item exists in schedule and is enabled
if name in schedule and schedule[name]["enabled"]:
ret["result"] = True
ret["comment"] = "Enabled Job {} in schedule.".format(name)
ret["changes"][name] = "enabled"
else:
ret["result"] = False
ret[
"comment"
] = "Failed to enable job {} in schedule.".format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret["comment"] = "Event module not available. Schedule enable job failed."
return ret
def disable_job(name, **kwargs):
"""
Disable a job in the minion's schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.disable_job job1
"""
ret = {"comment": [], "result": True, "changes": {}}
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Job: {} would be disabled in schedule.".format(name)
else:
persist = kwargs.get("persist", True)
if name in list_(show_all=True, where="opts", return_yaml=False):
event_data = {"name": name, "func": "disable_job", "persist": persist}
elif name in list_(show_all=True, where="pillar"):
event_data = {
"name": name,
"where": "pillar",
"func": "disable_job",
"persist": False,
}
else:
ret["comment"] = "Job {} does not exist.".format(name)
ret["result"] = False
return ret
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](event_data, "manage_schedule")
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_disabled_job_complete",
wait=30,
)
if event_ret and event_ret["complete"]:
schedule = event_ret["schedule"]
# check item exists in schedule and is enabled
if name in schedule and not schedule[name]["enabled"]:
ret["result"] = True
ret["comment"] = "Disabled Job {} in schedule.".format(name)
ret["changes"][name] = "disabled"
else:
ret["result"] = False
ret[
"comment"
] = "Failed to disable job {} in schedule.".format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret["comment"] = "Event module not available. Schedule enable job failed."
return ret
def save(**kwargs):
"""
Save all scheduled jobs on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.save
"""
ret = {"comment": [], "result": True}
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Schedule would be saved."
else:
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](
{"func": "save_schedule"}, "manage_schedule"
)
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_saved",
wait=30,
)
if event_ret and event_ret["complete"]:
ret["result"] = True
ret["comment"] = "Schedule (non-pillar items) saved."
else:
ret["result"] = False
ret["comment"] = "Failed to save schedule."
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret["comment"] = "Event module not available. Schedule save failed."
return ret
def enable(**kwargs):
"""
Enable all scheduled jobs on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.enable
"""
ret = {"comment": [], "changes": {}, "result": True}
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Schedule would be enabled."
else:
persist = kwargs.get("persist", True)
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](
{"func": "enable", "persist": persist}, "manage_schedule"
)
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_enabled_complete",
wait=30,
)
if event_ret and event_ret["complete"]:
schedule = event_ret["schedule"]
if "enabled" in schedule and schedule["enabled"]:
ret["result"] = True
ret["comment"] = "Enabled schedule on minion."
ret["changes"]["schedule"] = "enabled"
else:
ret["result"] = False
ret["comment"] = "Failed to enable schedule on minion."
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret["comment"] = "Event module not available. Schedule enable job failed."
return ret
def disable(**kwargs):
"""
Disable all scheduled jobs on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.disable
"""
ret = {"comment": [], "changes": {}, "result": True}
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Schedule would be disabled."
else:
persist = kwargs.get("persist", True)
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](
{"func": "disable", "persist": persist}, "manage_schedule"
)
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_disabled_complete",
wait=30,
)
if event_ret and event_ret["complete"]:
schedule = event_ret["schedule"]
if "enabled" in schedule and not schedule["enabled"]:
ret["result"] = True
ret["comment"] = "Disabled schedule on minion."
ret["changes"]["schedule"] = "disabled"
else:
ret["result"] = False
ret["comment"] = "Failed to disable schedule on minion."
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret["comment"] = "Event module not available. Schedule disable job failed."
return ret
def reload_():
"""
Reload saved scheduled jobs on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.reload
"""
ret = {"comment": [], "result": True}
# If there a schedule defined in pillar, refresh it.
if "schedule" in __pillar__:
out = __salt__["event.fire"]({}, "pillar_refresh")
if out:
ret["comment"].append("Reloaded schedule from pillar on minion.")
else:
ret["comment"].append("Failed to reload schedule from pillar on minion.")
ret["result"] = False
# move this file into an configurable opt
sfn = "{}/{}/schedule.conf".format(
__opts__["config_dir"], os.path.dirname(__opts__["default_include"])
)
if os.path.isfile(sfn):
with salt.utils.files.fopen(sfn, "rb") as fp_:
try:
schedule = salt.utils.yaml.safe_load(fp_)
except salt.utils.yaml.YAMLError as exc:
ret["comment"].append(
"Unable to read existing schedule file: {}".format(exc)
)
if schedule:
if "schedule" in schedule and schedule["schedule"]:
out = __salt__["event.fire"](
{"func": "reload", "schedule": schedule}, "manage_schedule"
)
if out:
ret["comment"].append(
"Reloaded schedule on minion from schedule.conf."
)
else:
ret["comment"].append(
"Failed to reload schedule on minion from schedule.conf."
)
ret["result"] = False
else:
ret["comment"].append(
"Failed to reload schedule on minion. Saved file is empty or"
" invalid."
)
ret["result"] = False
else:
ret["comment"].append(
"Failed to reload schedule on minion. Saved file is empty or invalid."
)
ret["result"] = False
return ret
def move(name, target, **kwargs):
"""
Move scheduled job to another minion or minions.
CLI Example:
.. code-block:: bash
salt '*' schedule.move jobname target
"""
ret = {"comment": [], "result": True}
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Job: {} would be moved from schedule.".format(name)
else:
opts_schedule = list_(show_all=True, where="opts", return_yaml=False)
pillar_schedule = list_(show_all=True, where="pillar", return_yaml=False)
if name in opts_schedule:
schedule_data = opts_schedule[name]
where = None
elif name in pillar_schedule:
schedule_data = pillar_schedule[name]
where = "pillar"
else:
ret["comment"] = "Job {} does not exist.".format(name)
ret["result"] = False
return ret
schedule_opts = []
for key, value in schedule_data.items():
temp = "{}={}".format(key, value)
schedule_opts.append(temp)
response = __salt__["publish.publish"](target, "schedule.add", schedule_opts)
# Get errors and list of affeced minions
errors = []
minions = []
for minion in response:
minions.append(minion)
if not response[minion]:
errors.append(minion)
# parse response
if not response:
ret["comment"] = "no servers answered the published schedule.add command"
return ret
elif len(errors) > 0:
ret["comment"] = "the following minions return False"
ret["minions"] = errors
return ret
else:
delete(name, where=where)
ret["result"] = True
ret["comment"] = "Moved Job {} from schedule.".format(name)
ret["minions"] = minions
return ret
return ret
def copy(name, target, **kwargs):
"""
Copy scheduled job to another minion or minions.
CLI Example:
.. code-block:: bash
salt '*' schedule.copy jobname target
"""
ret = {"comment": [], "result": True}
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Job: {} would be copied from schedule.".format(name)
else:
opts_schedule = list_(show_all=True, where="opts", return_yaml=False)
pillar_schedule = list_(show_all=True, where="pillar", return_yaml=False)
if name in opts_schedule:
schedule_data = opts_schedule[name]
elif name in pillar_schedule:
schedule_data = pillar_schedule[name]
else:
ret["comment"] = "Job {} does not exist.".format(name)
ret["result"] = False
return ret
schedule_opts = []
for key, value in schedule_data.items():
temp = "{}={}".format(key, value)
schedule_opts.append(temp)
response = __salt__["publish.publish"](target, "schedule.add", schedule_opts)
# Get errors and list of affeced minions
errors = []
minions = []
for minion in response:
minions.append(minion)
if not response[minion]:
errors.append(minion)
# parse response
if not response:
ret["comment"] = "no servers answered the published schedule.add command"
return ret
elif len(errors) > 0:
ret["comment"] = "the following minions return False"
ret["minions"] = errors
return ret
else:
ret["result"] = True
ret["comment"] = "Copied Job {} from schedule to minion(s).".format(name)
ret["minions"] = minions
return ret
return ret
def postpone_job(name, current_time, new_time, **kwargs):
"""
Postpone a job in the minion's schedule
Current time and new time should be in date string format,
default value is %Y-%m-%dT%H:%M:%S.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' schedule.postpone_job job current_time new_time
salt '*' schedule.postpone_job job current_time new_time time_fmt='%Y-%m-%dT%H:%M:%S'
"""
time_fmt = kwargs.get("time_fmt") or "%Y-%m-%dT%H:%M:%S"
ret = {"comment": [], "result": True}
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
return ret
if not current_time:
ret["comment"] = "Job current time is required."
ret["result"] = False
return ret
else:
try:
# Validate date string
datetime.datetime.strptime(current_time, time_fmt)
except (TypeError, ValueError):
log.error("Date string could not be parsed: %s, %s", new_time, time_fmt)
ret["comment"] = "Date string could not be parsed."
ret["result"] = False
return ret
if not new_time:
ret["comment"] = "Job new_time is required."
ret["result"] = False
return ret
else:
try:
# Validate date string
datetime.datetime.strptime(new_time, time_fmt)
except (TypeError, ValueError):
log.error("Date string could not be parsed: %s, %s", new_time, time_fmt)
ret["comment"] = "Date string could not be parsed."
ret["result"] = False
return ret
if "test" in __opts__ and __opts__["test"]:
ret["comment"] = "Job: {} would be postponed in schedule.".format(name)
else:
if name in list_(show_all=True, where="opts", return_yaml=False):
event_data = {
"name": name,
"time": current_time,
"new_time": new_time,
"time_fmt": time_fmt,
"func": "postpone_job",
}
elif name in list_(show_all=True, where="pillar", return_yaml=False):
event_data = {
"name": name,
"time": current_time,
"new_time": new_time,
"time_fmt": time_fmt,
"where": "pillar",
"func": "postpone_job",
}
else:
ret["comment"] = "Job {} does not exist.".format(name)
ret["result"] = False
return ret
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](event_data, "manage_schedule")
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_postpone_job_complete",
wait=30,
)
if event_ret and event_ret["complete"]:
schedule = event_ret["schedule"]
# check item exists in schedule and is enabled
if name in schedule and schedule[name]["enabled"]:
ret["result"] = True
ret["comment"] = "Postponed Job {} in schedule.".format(
name
)
else:
ret["result"] = False
ret[
"comment"
] = "Failed to postpone job {} in schedule.".format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret["comment"] = "Event module not available. Schedule postpone job failed."
return ret
def skip_job(name, current_time, **kwargs):
"""
Skip a job in the minion's schedule at specified time.
Time to skip should be specified as date string format,
default value is %Y-%m-%dT%H:%M:%S.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' schedule.skip_job job time
"""
time_fmt = kwargs.get("time_fmt") or "%Y-%m-%dT%H:%M:%S"
ret = {"comment": [], "result": True}
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
if not current_time:
ret["comment"] = "Job time is required."
ret["result"] = False
else:
# Validate date string
try:
datetime.datetime.strptime(current_time, time_fmt)
except (TypeError, ValueError):
log.error("Date string could not be parsed: %s, %s", current_time, time_fmt)
ret["comment"] = "Date string could not be parsed."
ret["result"] = False
return ret
if "test" in __opts__ and __opts__["test"]:
ret["comment"] = "Job: {} would be skipped in schedule.".format(name)
else:
if name in list_(show_all=True, where="opts", return_yaml=False):
event_data = {
"name": name,
"time": current_time,
"time_fmt": time_fmt,
"func": "skip_job",
}
elif name in list_(show_all=True, where="pillar", return_yaml=False):
event_data = {
"name": name,
"time": current_time,
"time_fmt": time_fmt,
"where": "pillar",
"func": "skip_job",
}
else:
ret["comment"] = "Job {} does not exist.".format(name)
ret["result"] = False
return ret
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](event_data, "manage_schedule")
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_skip_job_complete",
wait=30,
)
if event_ret and event_ret["complete"]:
schedule = event_ret["schedule"]
# check item exists in schedule and is enabled
if name in schedule and schedule[name]["enabled"]:
ret["result"] = True
ret["comment"] = "Added Skip Job {} in schedule.".format(
name
)
else:
ret["result"] = False
ret[
"comment"
] = "Failed to skip job {} in schedule.".format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret["comment"] = "Event module not available. Schedule skip job failed."
return ret
def show_next_fire_time(name, **kwargs):
"""
Show the next fire time for scheduled job
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' schedule.show_next_fire_time job_name
"""
ret = {"result": True}
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
try:
event_data = {"name": name, "func": "get_next_fire_time"}
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](event_data, "manage_schedule")
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_next_fire_time_complete",
wait=30,
)
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret = {}
ret[
"comment"
] = "Event module not available. Schedule show next fire time failed."
ret["result"] = True
return ret
if "next_fire_time" in event_ret:
ret["next_fire_time"] = event_ret["next_fire_time"]
else:
ret["comment"] = "next fire time not available."
return ret
def job_status(name, time_fmt="%Y-%m-%dT%H:%M:%S"):
"""
Show the information for a particular job.
CLI Example:
.. code-block:: bash
salt '*' schedule.job_status job_name
"""
def convert_datetime_objects_in_dict_to_string(data_dict, time_fmt):
return {
key: value.strftime(time_fmt)
if isinstance(value, datetime.datetime)
else value
for key, value in data_dict.items()
}
schedule = {}
try:
with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
res = __salt__["event.fire"](
{"func": "job_status", "name": name, "fire_event": True},
"manage_schedule",
)
if res:
event_ret = event_bus.get_event(
tag="/salt/minion/minion_schedule_job_status_complete", wait=30
)
data = event_ret.get("data", {})
return convert_datetime_objects_in_dict_to_string(data, time_fmt)
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret = {}
ret["comment"] = "Event module not available. Schedule list failed."
ret["result"] = True
log.debug("Event module not available. Schedule list failed.")
return ret
| 32.397974
| 101
| 0.51521
|
0d73ef7c7561d1d7cf7848df5799ad6e9ef34323
| 4,590
|
py
|
Python
|
rest_framework_mongoengine/tests/test_serializers.py
|
Careerleaf/django-rest-framework-mongoengine
|
fc28dbf7af760528f6f7247e567328df46458799
|
[
"MIT"
] | null | null | null |
rest_framework_mongoengine/tests/test_serializers.py
|
Careerleaf/django-rest-framework-mongoengine
|
fc28dbf7af760528f6f7247e567328df46458799
|
[
"MIT"
] | null | null | null |
rest_framework_mongoengine/tests/test_serializers.py
|
Careerleaf/django-rest-framework-mongoengine
|
fc28dbf7af760528f6f7247e567328df46458799
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import mongoengine as me
from unittest import TestCase
from bson import objectid
from rest_framework_mongoengine.serializers import MongoEngineModelSerializer
from rest_framework import serializers as s
class Job(me.Document):
title = me.StringField()
status = me.StringField(choices=('draft', 'published'))
notes = me.StringField(required=False)
on = me.DateTimeField(default=datetime.utcnow)
weight = me.IntField(default=0)
class JobSerializer(MongoEngineModelSerializer):
id = s.Field()
title = s.CharField()
status = s.ChoiceField(read_only=True)
sort_weight = s.IntegerField(source='weight')
class Meta:
model = Job
fields = ('id', 'title','status', 'sort_weight')
class TestReadonlyRestore(TestCase):
def test_restore_object(self):
job = Job(title='original title', status='draft', notes='secure')
data = {
'title': 'updated title ...',
'status': 'published', # this one is read only
'notes': 'hacked', # this field should not update
'sort_weight': 10 # mapped to a field with differet name
}
serializer = JobSerializer(job, data=data, partial=True)
self.assertTrue(serializer.is_valid())
obj = serializer.object
self.assertEqual(data['title'], obj.title)
self.assertEqual('draft', obj.status)
self.assertEqual('secure', obj.notes)
self.assertEqual(10, obj.weight)
# Testing restoring embedded property
class Location(me.EmbeddedDocument):
city = me.StringField()
# list of
class Category(me.EmbeddedDocument):
id = me.StringField()
counter = me.IntField(default=0, required=True)
class Secret(me.EmbeddedDocument):
key = me.StringField()
class SomeObject(me.Document):
name = me.StringField()
loc = me.EmbeddedDocumentField('Location')
categories = me.ListField(me.EmbeddedDocumentField(Category))
codes = me.ListField(me.EmbeddedDocumentField(Secret))
class LocationSerializer(MongoEngineModelSerializer):
city = s.CharField()
class Meta:
model = Location
class CategorySerializer(MongoEngineModelSerializer):
id = s.CharField(max_length=24)
class Meta:
model = Category
fields = ('id',)
class SomeObjectSerializer(MongoEngineModelSerializer):
location = LocationSerializer(source='loc')
categories = CategorySerializer(many=True, allow_add_remove=True)
class Meta:
model = SomeObject
fields = ('name', 'location', 'categories')
class TestRestoreEmbedded(TestCase):
def setUp(self):
self.data = {
'name': 'some anme',
'location': {
'city': 'Toronto'
},
'categories': [{'id': 'cat1'}, {'id': 'category_2', 'counter': 666}],
'codes': [{'key': 'mykey1'}]
}
def test_restore_new(self):
serializer = SomeObjectSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
obj = serializer.object
self.assertEqual(self.data['name'], obj.name )
self.assertEqual('Toronto', obj.loc.city )
self.assertEqual(2, len(obj.categories))
self.assertEqual('category_2', obj.categories[1].id)
# counter is not listed in serializer fields, cannot be updated
self.assertEqual(0, obj.categories[1].counter)
# codes are not listed, should not be updatable
self.assertEqual(0, len(obj.codes))
def test_restore_update(self):
data = self.data
instance = SomeObject(
name='original',
loc=Location(city="New York"),
categories=[Category(id='orig1', counter=777)],
codes=[Secret(key='confidential123')]
)
serializer = SomeObjectSerializer(instance, data=data, partial=True)
# self.assertTrue(serializer.is_valid())
if not serializer.is_valid():
print 'errors: %s' % serializer._errors
assert False, 'errors'
obj = serializer.object
self.assertEqual(data['name'], obj.name )
self.assertEqual('Toronto', obj.loc.city )
# codes is not listed, should not be updatable
self.assertEqual(1, len(obj.codes[0]))
self.assertEqual('confidential123', obj.codes[0].key) # should keep original val
self.assertEqual(2, len(obj.categories))
self.assertEqual('category_2', obj.categories[1].id)
self.assertEqual(0, obj.categories[1].counter)
| 30
| 88
| 0.641394
|
833dd20b5a3ca24a89bcf9ea536cbba27fe9e76a
| 1,431
|
py
|
Python
|
tensorflow/contrib/quantization/__init__.py
|
jdehotin/TensorFlow
|
a6c5f8e4e013e54fed8dfcf49fb6de365f018022
|
[
"Apache-2.0"
] | 680
|
2016-12-03T14:38:28.000Z
|
2022-02-16T04:06:45.000Z
|
tensorflow/contrib/quantization/__init__.py
|
alainrk/tensorflow
|
314d9cd9b607460f8bfea80fc828b1521ca18443
|
[
"Apache-2.0"
] | 38
|
2016-11-17T08:43:51.000Z
|
2019-11-12T12:27:04.000Z
|
tensorflow/contrib/quantization/__init__.py
|
alainrk/tensorflow
|
314d9cd9b607460f8bfea80fc828b1521ca18443
|
[
"Apache-2.0"
] | 250
|
2016-12-05T10:37:17.000Z
|
2022-03-18T21:26:55.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building quantized models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,g-bad-import-order
from tensorflow.contrib.quantization.python import array_ops as quantized_array_ops
from tensorflow.contrib.quantization.python.math_ops import *
from tensorflow.contrib.quantization.python.nn_ops import *
from tensorflow.contrib.quantization.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.contrib.quantization.ops.gen_array_ops import dequantize
from tensorflow.contrib.quantization.ops.gen_array_ops import quantize_v2
from tensorflow.contrib.quantization.ops.gen_array_ops import quantized_concat
| 46.16129
| 88
| 0.773585
|
40f8edfcdd18eeef63a20b8590172b5d186fbf8c
| 27,514
|
py
|
Python
|
tensorflow/python/ops/map_fn.py
|
Mithilesh1609/tensorflow
|
63f70b5611d7f50512ea26295d26016c2704901b
|
[
"Apache-2.0"
] | 8
|
2020-07-29T18:50:45.000Z
|
2021-07-25T07:06:43.000Z
|
tensorflow/python/ops/map_fn.py
|
3ecurityy/tensorflow
|
f8c0e68a8aa5d575a19129ec67c9ed6262652082
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/ops/map_fn.py
|
3ecurityy/tensorflow
|
f8c0e68a8aa5d575a19129ec67c9ed6262652082
|
[
"Apache-2.0"
] | 11
|
2020-05-31T13:14:56.000Z
|
2021-12-14T04:39:25.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.autograph.core import ag_ctx as autograph_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["map_fn"])
@deprecation.deprecated_args(None, "Use fn_output_signature instead", "dtype")
def map_fn(fn,
elems,
dtype=None,
parallel_iterations=None,
back_prop=True,
swap_memory=False,
infer_shape=True,
name=None,
fn_output_signature=None):
"""Transforms `elems` by applying `fn` to each element unstacked on axis 0.
See also `tf.scan`.
`map_fn` unstacks `elems` on axis 0 to obtain a sequence of elements;
calls `fn` to transform each element; and then stacks the transformed
values back together.
#### Mapping functions with single-Tensor inputs and outputs
If `elems` is a single tensor and `fn`'s signature is `tf.Tensor->tf.Tensor`,
then `map_fn(fn, elems)` is equivalent to
`tf.stack([fn(elem) for elem in tf.unstack(elems)])`. E.g.:
>>> tf.map_fn(fn=lambda t: tf.range(t, t + 3), elems=tf.constant([3, 5, 2]))
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[3, 4, 5],
[5, 6, 7],
[2, 3, 4]], dtype=int32)>
`map_fn(fn, elems).shape = [elems.shape[0]] + fn(elems[0]).shape`.
#### Mapping functions with multi-arity inputs and outputs
`map_fn` also supports functions with multi-arity inputs and outputs:
* If `elems` is a tuple (or nested structure) of tensors, then those tensors
must all have the same outer-dimension size (`num_elems`); and `fn` is
used to transform each tuple (or structure) of corresponding slices from
`elems`. E.g., if `elems` is a tuple `(t1, t2, t3)`, then `fn` is used to
transform each tuple of slices `(t1[i], t2[i], t3[i])`
(where `0 <= i < num_elems`).
* If `fn` returns a tuple (or nested structure) of tensors, then the
result is formed by stacking corresponding elements from those structures.
#### Specifying `fn`'s output signature
If `fn`'s input and output signatures are different, then the output
signature must be specified using `fn_output_signature`. (The input and
output signatures are differ if their structures, dtypes, or tensor types do
not match). E.g.:
>>> tf.map_fn(fn=tf.strings.length, # input & output have different dtypes
... elems=tf.constant(["hello", "moon"]),
... fn_output_signature=tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([5, 4], dtype=int32)>
>>> tf.map_fn(fn=tf.strings.join, # input & output have different structures
... elems=[tf.constant(['The', 'A']), tf.constant(['Dog', 'Cat'])],
... fn_output_signature=tf.string)
<tf.Tensor: shape=(2,), dtype=string,
numpy=array([b'TheDog', b'ACat'], dtype=object)>
`fn_output_signature` can be specified using any of the following:
* A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`)
* A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`)
* A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`)
* A (possibly nested) tuple, list, or dict containing the above types.
#### RaggedTensors
`map_fn` supports `tf.RaggedTensor` inputs and outputs. In particular:
* If `elems` is a `RaggedTensor`, then `fn` will be called with each
row of that ragged tensor.
* If `elems` has only one ragged dimension, then the values passed to
`fn` will be `tf.Tensor`s.
* If `elems` has multiple ragged dimensions, then the values passed to
`fn` will be `tf.RaggedTensor`s with one fewer ragged dimension.
* If the result of `map_fn` should be a `RaggedTensor`, then use a
`tf.RaggedTensorSpec` to specify `fn_output_signature`.
* If `fn` returns `tf.Tensor`s with varying sizes, then use a
`tf.RaggedTensorSpec` with `ragged_rank=0` to combine them into a
single ragged tensor (which will have ragged_rank=1).
* If `fn` returns `tf.RaggedTensor`s, then use a `tf.RaggedTensorSpec`
with the same `ragged_rank`.
>>> # Example: RaggedTensor input
>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])
>>> tf.map_fn(tf.reduce_sum, rt, fn_output_signature=tf.int32)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([6, 0, 9, 6], dtype=int32)>
>>> # Example: RaggedTensor output
>>> elems = tf.constant([3, 5, 0, 2])
>>> tf.map_fn(tf.range, elems,
... fn_output_signature=tf.RaggedTensorSpec(shape=[None],
... dtype=tf.int32))
<tf.RaggedTensor [[0, 1, 2], [0, 1, 2, 3, 4], [], [0, 1]]>
Note: `map_fn` should only be used if you need to map a function over the
*rows* of a `RaggedTensor`. If you wish to map a function over the
individual values, then you should use:
* `tf.ragged.map_flat_values(fn, rt)`
(if fn is expressible as TensorFlow ops)
* `rt.with_flat_values(map_fn(fn, rt.flat_values))`
(otherwise)
E.g.:
>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])
>>> tf.ragged.map_flat_values(lambda x: x + 2, rt)
<tf.RaggedTensor [[3, 4, 5], [], [6, 7], [8]]>
#### SparseTensors
`map_fn` supports `tf.sparse.SparseTensor` inputs and outputs. In particular:
* If `elems` is a `SparseTensor`, then `fn` will be called with each row
of that sparse tensor. In particular, the value passed to `fn` will be a
`tf.sparse.SparseTensor` with one fewer dimension than `elems`.
* If the result of `map_fn` should be a `SparseTensor`, then use a
`tf.SparseTensorSpec` to specify `fn_output_signature`. The individual
`SparseTensor`s returned by `fn` will be stacked into a single
`SparseTensor` with one more dimension.
>>> # Example: SparseTensor input
>>> st = tf.sparse.SparseTensor([[0, 0], [2, 0], [2, 1]], [2, 3, 4], [4, 4])
>>> tf.map_fn(tf.sparse.reduce_sum, st, fn_output_signature=tf.int32)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([2, 0, 7, 0], dtype=int32)>
>>> # Example: SparseTensor output
>>> tf.sparse.to_dense(
... tf.map_fn(tf.sparse.eye, tf.constant([2, 3]),
... fn_output_signature=tf.SparseTensorSpec(None, tf.float32)))
<tf.Tensor: shape=(2, 3, 3), dtype=float32, numpy=
array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]],
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]], dtype=float32)>
Note: `map_fn` should only be used if you need to map a function over the
*rows* of a `SparseTensor`. If you wish to map a function over the nonzero
values, then you should use:
* If the function is expressible as TensorFlow ops, use:
```python
tf.sparse.SparseTensor(st.indices, fn(st.values), st.dense_shape)
```
* Otherwise, use:
```python
tf.sparse.SparseTensor(st.indices, tf.map_fn(fn, st.values),
st.dense_shape)
```
#### `map_fn` vs. vectorized operations
`map_fn` will apply the operations used by `fn` to each element of `elems`,
resulting in `O(elems.shape[0])` total operations. This is somewhat
mitigated by the fact that `map_fn` can process elements in parallel.
However, a transform expressed using `map_fn` is still typically less
efficient than an equivalent transform expressed using vectorized operations.
`map_fn` should typically only be used if one of the following is true:
* It is difficult or expensive to express the desired transform with
vectorized operations.
* `fn` creates large intermediate values, so an equivalent vectorized
transform would take too much memory.
* Processing elements in parallel is more efficient than an equivalent
vectorized transform.
* Efficiency of the transform is not critical, and using `map_fn` is
more readable.
E.g., the example given above that maps `fn=lambda t: tf.range(t, t + 3)`
across `elems` could be rewritten more efficiently using vectorized ops:
>>> elems = tf.constant([3, 5, 2])
>>> tf.range(3) + tf.expand_dims(elems, 1)
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[3, 4, 5],
[5, 6, 7],
[2, 3, 4]], dtype=int32)>
In some cases, `tf.vectorized_map` can be used to automatically convert a
function to a vectorized eqivalent.
#### Eager execution
When executing eagerly, `map_fn` does not execute in parallel even if
`parallel_iterations` is set to a value > 1. You can still get the
performance benefits of running a function in parallel by using the
`tf.function` decorator:
>>> fn=lambda t: tf.range(t, t + 3)
>>> @tf.function
... def func(elems):
... return tf.map_fn(fn, elems, parallel_iterations=3)
>>> func(tf.constant([3, 5, 2]))
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[3, 4, 5],
[5, 6, 7],
[2, 3, 4]], dtype=int32)>
Note that if you use the `tf.function` decorator, any non-TensorFlow Python
code that you may have written in your function won't get executed. See
`tf.function` for more details. The recommendation would be to debug without
`tf.function` but switch to it to get performance benefits of running `map_fn`
in parallel.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same (possibly nested) structure as `elems`. Its output must have the
same structure as `fn_output_signature` if one is provided; otherwise it
must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unstacked along their first dimension. `fn` will be applied to the
nested sequence of the resulting slices. `elems` may include ragged and
sparse tensors.
dtype: Deprecated: Equivalent to `fn_output_signature`.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel. When graph building, the default value is 10. While executing
eagerly, the default value is set to 1.
back_prop: (optional) False disables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
name: (optional) Name prefix for the returned tensors.
fn_output_signature: The output signature of `fn`. Must be specified if
`fn`'s input and output signatures are different (i.e., if their
structures, dtypes, or tensor types do not match).
`fn_output_signature` can be specified using any of the following:
* A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`)
* A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`)
* A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`)
* A (possibly nested) tuple, list, or dict containing the above types.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor stacks the
results of applying `fn` to tensors unstacked from `elems` along the first
dimension, from first to last. The result may include ragged and sparse
tensors.
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `fn_output_signature` do not match.
ValueError: if the lengths of the output of `fn` and `fn_output_signature`
do not match.
Examples:
>>> elems = np.array([1, 2, 3, 4, 5, 6])
>>> tf.map_fn(lambda x: x * x, elems)
<tf.Tensor: shape=(6,), dtype=int64, numpy=array([ 1, 4, 9, 16, 25, 36])>
>>> elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))
>>> tf.map_fn(lambda x: x[0] * x[1], elems, fn_output_signature=tf.int64)
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([-1, 2, -3])>
>>> elems = np.array([1, 2, 3])
>>> tf.map_fn(lambda x: (x, -x), elems,
... fn_output_signature=(tf.int64, tf.int64))
(<tf.Tensor: shape=(3,), dtype=int64, numpy=array([1, 2, 3])>,
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([-1, -2, -3])>)
"""
# This function uses a `while_loop` to call `fn` on each value of the input
# tensor(s) (unstacked on dimension 0). The following sequence of variables
# are used to transform the input tensor(s) (`elems`) into the output
# tensor(s) (`result`):
#
# - Preparing and unstacking input values for the while_loop:
# - elems: The input tensor(s) to map_fn. May include composite tensors.
# - elems_flat: Flattened list of tensors from elems (using nest.flatten)
# May include composite tensors.
# - elems_batchable: Concatenation of "batchable tensor lists" for each
# tensor in elems_flat. This "boxes" composite tensors
# into sliceable tf.Tensor objects. For more info see:
# TensorSpec._to_batched_tensor_list
# - elems_batchable_ta: List of TensorArrays used to unstack each Tensor
# in elems_batchable into elems_value_batchable.
#
# - Calling `fn` on each unstacked value in the body of the while_loop:
# - elems_value_batchable: Single unstacked value from elems_batchable.
# - elems_value_flat: Single unstacked value from elems_flat,
# constructed from elems_value_batchable (using
# TensorSpec._from_tensor_list).
# - elems_value: Single unstacked value from elems (the input to fn).
# - result_value: Result of calling `fn(elems_value)`. May contain
# composite tensors.
# - result_value_flat: Flattened list of tensors from result_value.
# May contain composite tensors.
# - result_value_batchable: Concatenation of batchable tensor lists for
# each tensor in result_value_flat
# (using TensorSpec._to_tensor_list).
#
# - Collecting and stacking output values from the while_loop:
# - result_batchable_ta: List of TensorArrays used to stack each tensor
# ta result_value_batchable into result_batchable.
# - result_batchable: Stacked tensors from result_batchable_ta.
# - result_flat: Flat list of tensors for the result, constructed from
# results bactchable (using TensorSpec._from_tensor_list).
# - result: Structured result value packed from results flat
# (using nest.pack_sequence_as).
if fn_output_signature is None:
fn_output_signature = dtype
if not callable(fn):
raise TypeError("fn must be callable.")
in_graph_mode = not context.executing_eagerly()
# Set the default number of parallel_iterations depending on graph/eager mode.
if in_graph_mode and not parallel_iterations:
parallel_iterations = 10
elif not in_graph_mode and not parallel_iterations:
parallel_iterations = 1
elif not in_graph_mode and parallel_iterations > 1:
logging.log_first_n(
logging.WARN, "Setting parallel_iterations > 1 has no "
"effect when executing eagerly. Consider calling map_fn"
" with tf.function to execute fn in "
"parallel.", 1)
parallel_iterations = 1
# Flatten the input tensors, and get the TypeSpec for each one.
elems_flat = nest.flatten(elems)
elems_flat_signature = [type_spec.type_spec_from_value(e) for e in elems_flat]
elems_unflatten = lambda x: nest.pack_sequence_as(elems, x)
# Flatten fn's output signature.
if fn_output_signature is None:
# If fn_output_signature was not specified, then assume that it matches the
# input signature.
result_flat_signature = [
_most_general_compatible_type(s)._unbatch() # pylint: disable=protected-access
for s in elems_flat_signature
]
result_unflatten = elems_unflatten
else:
result_flat_signature = [
_dtype_to_spec(d) for d in nest.flatten(fn_output_signature)
]
result_unflatten = lambda x: nest.pack_sequence_as(fn_output_signature, x)
with ops.name_scope(name, "map", elems_flat):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
elems_flat = [
ops.convert_to_tensor_or_composite(t, name="elem") for t in elems_flat
]
# Check that inputs are not scalars.
elems_static_shape = elems_flat[0].shape
if elems_static_shape.ndims is not None and elems_static_shape.ndims < 1:
if len(elems_flat) == 1:
raise ValueError("elems must be a 1+ dimensional Tensor, not a scalar")
else:
raise ValueError(
"elements in elems must be 1+ dimensional Tensors, not scalars"
)
# Box any composite tensors into tensor lists.
elems_batchable = _elems_flat_to_batchable(elems_flat)
# Find the number of iterations, n. (may be known statically.)
n_static = tensor_shape.Dimension(
tensor_shape.dimension_value(
elems_batchable[0].get_shape().with_rank_at_least(1)[0]))
for tensor in elems_batchable[1:]:
n_static.merge_with(
tensor_shape.Dimension(
tensor_shape.dimension_value(
tensor.get_shape().with_rank_at_least(1)[0])))
n = n_static.value or array_ops.shape(elems_batchable[0])[0]
# Convert elems to tensor array.
# TODO(edloper): Should we set infer_shape=False for composite tensors?
elems_batchable_ta = [
tensor_array_ops.TensorArray(
dtype=t.dtype, size=n, dynamic_size=False, infer_shape=True)
for t in elems_batchable
]
# Unpack elements
elems_batchable_ta = [
ta.unstack(t) for (ta, t) in zip(elems_batchable_ta, elems_batchable)
]
i = constant_op.constant(0)
# Prepare result tensor array.
# TODO(edloper): Should we set infer_shape=False for composite tensors?
result_batchable_dtype = _result_flat_signature_to_batchable_dtype(
result_flat_signature)
result_batchable_ta = [
tensor_array_ops.TensorArray(
dtype=dt, size=n, dynamic_size=False, infer_shape=infer_shape)
for dt in result_batchable_dtype
]
def compute(i, tas):
"""The loop body of map_fn.
Args:
i: the loop counter
tas: the flat TensorArray accumulator list
Returns:
(i + 1, tas): the updated counter + updated TensorArrays
Raises:
TypeError: if fn_output_signature and result_value structure don't match
ValueType: if fn_output_signature and result_value lengths don't match
"""
elems_value_batchable = [ta.read(i) for ta in elems_batchable_ta]
elems_value_flat = _elems_value_batchable_to_flat(elems_value_batchable,
elems_flat_signature)
elems_value = elems_unflatten(elems_value_flat)
ag_ctx = autograph_ctx.control_status_ctx()
autographed_fn = autograph.tf_convert(fn, ag_ctx)
result_value = autographed_fn(elems_value)
nest.assert_same_structure(fn_output_signature or elems, result_value)
result_value_flat = nest.flatten(result_value)
result_value_batchable = _result_value_flat_to_batchable(
result_value_flat, result_flat_signature)
tas = [
ta.write(i, value) for (ta, value) in zip(tas, result_value_batchable)
]
return (i + 1, tas)
_, r_a = control_flow_ops.while_loop(
lambda i, _: i < n,
compute, (i, result_batchable_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
result_batchable = [r.stack() for r in r_a]
# Update each output tensor w/ static shape info about the outer dimension.
for r in result_batchable:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
result_flat = _result_batchable_to_flat(result_batchable,
result_flat_signature)
result = result_unflatten(result_flat)
return result
def _dtype_to_spec(d):
if not isinstance(d, type_spec.TypeSpec):
d = tensor_spec.TensorSpec(None, d)
return d
def _most_general_compatible_type(spec):
"""Returns the most general TypeSpec compatible with `spec`."""
# TODO(edloper): Consider adding most_general_compatible_type to TypeSpec API
if isinstance(spec, tensor_spec.TensorSpec):
return tensor_spec.TensorSpec(None, spec.dtype)
elif isinstance(spec, ragged_tensor.RaggedTensorSpec):
# pylint: disable=protected-access
return ragged_tensor.RaggedTensorSpec(None, spec._dtype, spec._ragged_rank,
spec._row_splits_dtype)
elif isinstance(spec, sparse_tensor.SparseTensorSpec):
# pylint: disable=protected-access
return sparse_tensor.SparseTensorSpec(None, spec.dtype)
else:
return spec
def _result_flat_signature_to_batchable_dtype(result_flat_signature):
"""Converts result_flat_signature -> result_batchable_dtype."""
components = []
for spec in result_flat_signature:
if not isinstance(spec, type_spec.BatchableTypeSpec):
raise TypeError("map_fn can not generate %s outputs" % (spec,))
# pylint: disable=protected-access
components.extend([s.dtype for s in spec._flat_tensor_specs])
return components
def _elems_flat_to_batchable(elems_flat):
"""Converts elems_flat -> elems_batchable."""
elems_batchable = []
for elems_tensor in elems_flat:
spec = type_spec.type_spec_from_value(elems_tensor)
if not isinstance(spec, type_spec.BatchableTypeSpec):
raise TypeError("map_fn can not consume %s inputs: got %r" %
(spec, elems_tensor))
# pylint: disable=protected-access
elems_batchable.extend(spec._to_batched_tensor_list(elems_tensor))
return elems_batchable
def _elems_value_batchable_to_flat(elems_value_batchable, elems_flat_signature):
"""Converts elems_value_batchable -> elems_value_flat."""
elems_value_flat = []
i = 0
for spec in elems_flat_signature:
# pylint: disable=protected-access
spec = spec._unbatch()
tensor_list = elems_value_batchable[i:i + len(spec._flat_tensor_specs)]
elems_value_flat.append(spec._from_compatible_tensor_list(tensor_list))
i += len(tensor_list)
assert i == len(elems_value_batchable)
return elems_value_flat
def _result_value_flat_to_batchable(result_value_flat, result_flat_signature):
"""Converts result_value_flat -> result_value_batchable."""
result_value_batchable = []
for (r_value, r_spec) in zip(result_value_flat, result_flat_signature):
if isinstance(r_spec, tensor_spec.TensorSpec):
result_value_batchable.append(r_value)
else:
if not r_spec.is_compatible_with(r_value):
raise ValueError(
"Error in map_fn:\n Expected `fn` to return a:\n %s\n"
" But it returned a:\n %s\n (value=%s)\n"
" To fix, update the `fn_output_signature` (or `dtype`) "
"argument to `map_fn`." %
(r_spec, type_spec.type_spec_from_value(r_value), r_value))
result_value_batchable.extend(r_spec._to_tensor_list(r_value)) # pylint: disable=protected-access
return result_value_batchable
def _result_batchable_to_flat(result_batchable, result_flat_signature):
"""Converts result_batchable -> result_flat."""
result_flat = []
i = 0
for spec in result_flat_signature:
# pylint: disable=protected-access
num_tensors = len(spec._flat_tensor_specs)
result_flat.append(
spec._batch(None)._from_compatible_tensor_list(
result_batchable[i:i + num_tensors]))
i += num_tensors
assert i == len(result_batchable)
return result_flat
@tf_export("map_fn", v1=[])
@deprecation.deprecated_arg_values(
None,
"""back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.map_fn(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))""",
warn_once=True,
back_prop=False)
@deprecation.deprecated_args(None, "Use fn_output_signature instead", "dtype")
def map_fn_v2(fn,
elems,
dtype=None,
parallel_iterations=None,
back_prop=True,
swap_memory=False,
infer_shape=True,
name=None,
fn_output_signature=None):
"""Transform `elems` by applying `fn` to each element unstacked on axis 0."""
if fn_output_signature is None:
fn_output_signature = dtype
return map_fn(
fn=fn,
elems=elems,
fn_output_signature=fn_output_signature,
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
infer_shape=infer_shape,
name=name)
# Docstring for v2 is the same as v1, except that back_prop is deprecated.
map_fn_v2.__doc__ = re.sub(
r"( back_prop: \(optional\) )(.*)",
r"\1Deprecated: prefer using `tf.stop_gradient` instead. \2",
map_fn.__doc__)
assert "prefer using `tf.stop_gradient` instead" in map_fn_v2.__doc__
| 42.199387
| 104
| 0.680054
|
30c69c20cd3deafad62d472339b84377d8396a95
| 9,798
|
py
|
Python
|
tests/wallet/test_wallet_store.py
|
Stor-Network/stor-blockchain
|
3c3cd1a3b99592e88160107ca5b81afc0937b992
|
[
"Apache-2.0"
] | 19
|
2021-06-29T20:06:09.000Z
|
2022-02-09T04:33:00.000Z
|
tests/wallet/test_wallet_store.py
|
Stor-Network/stor-blockchain
|
3c3cd1a3b99592e88160107ca5b81afc0937b992
|
[
"Apache-2.0"
] | 8
|
2021-07-04T03:21:51.000Z
|
2021-12-27T07:56:09.000Z
|
tests/wallet/test_wallet_store.py
|
Stor-Network/stor-blockchain
|
3c3cd1a3b99592e88160107ca5b81afc0937b992
|
[
"Apache-2.0"
] | 6
|
2021-10-04T17:15:30.000Z
|
2022-03-15T08:40:01.000Z
|
# TODO: write tests for other stores
# import asyncio
# from pathlib import Path
# from secrets import token_bytes
# import aiosqlite
# import pytest
# from stor.util.ints import uint32, uint64, uint128
# from stor.wallet.wallet_coin_record import WalletCoinRecord
# from stor.wallet.util.wallet_types import WalletType
# from stor.types.coin import Coin
#
#
# @pytest.fixture(scope="module")
# def event_loop():
# loop = asyncio.get_event_loop()
# yield loop
#
#
# class TestWalletStore:
# @pytest.mark.asyncio
# async def test_store(self):
# db_filename = Path("blockchain_wallet_store_test.db")
#
# if db_filename.exists():
# db_filename.unlink()
#
# db_connection = await aiosqlite.connect(db_filename)
# store = await WalletStore.create(db_connection)
# try:
# coin_1 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_2 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_3 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_4 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# record_replaced = WalletCoinRecord(coin_1, uint32(8), uint32(0),
# False, True, WalletType.STANDARD_WALLET, 0)
# record_1 = WalletCoinRecord(coin_1, uint32(4), uint32(0), False,
# True, WalletType.STANDARD_WALLET, 0)
# record_2 = WalletCoinRecord(coin_2, uint32(5), uint32(0),
# False, True, WalletType.STANDARD_WALLET, 0)
# record_3 = WalletCoinRecord(
# coin_3,
# uint32(5),
# uint32(10),
# True,
# False,
# WalletType.STANDARD_WALLET,
# 0,
# )
# record_4 = WalletCoinRecord(
# coin_4,
# uint32(5),
# uint32(15),
# True,
# False,
# WalletType.STANDARD_WALLET,
# 0,
# )
#
# # Test add (replace) and get
# assert await store.get_coin_record(coin_1.name()) is None
# await store.add_coin_record(record_replaced)
# await store.add_coin_record(record_1)
# await store.add_coin_record(record_2)
# await store.add_coin_record(record_3)
# await store.add_coin_record(record_4)
# assert await store.get_coin_record(coin_1.name()) == record_1
#
# # Test persistance
# await db_connection.close()
# db_connection = await aiosqlite.connect(db_filename)
# store = await WalletStore.create(db_connection)
# assert await store.get_coin_record(coin_1.name()) == record_1
#
# # Test set spent
# await store.set_spent(coin_1.name(), uint32(12))
# assert (await store.get_coin_record(coin_1.name())).spent
# assert (await store.get_coin_record(coin_1.name())).spent_block_index == 12
#
# # No coins at height 3
# assert len(await store.get_unspent_coins_at_height(3)) == 0
# assert len(await store.get_unspent_coins_at_height(4)) == 1
# assert len(await store.get_unspent_coins_at_height(5)) == 4
# assert len(await store.get_unspent_coins_at_height(11)) == 3
# assert len(await store.get_unspent_coins_at_height(12)) == 2
# assert len(await store.get_unspent_coins_at_height(15)) == 1
# assert len(await store.get_unspent_coins_at_height(16)) == 1
# assert len(await store.get_unspent_coins_at_height()) == 1
#
# assert len(await store.get_unspent_coins_for_wallet(0)) == 1
# assert len(await store.get_unspent_coins_for_wallet(1)) == 0
#
# coin_5 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# record_5 = WalletCoinRecord(
# coin_5,
# uint32(5),
# uint32(15),
# False,
# False,
# WalletType.STANDARD_WALLET,
# 1,
# )
# await store.add_coin_record(record_5)
# assert len(await store.get_unspent_coins_for_wallet(1)) == 1
#
# assert len(await store.get_spendable_for_index(100, 1)) == 1
# assert len(await store.get_spendable_for_index(100, 0)) == 1
# assert len(await store.get_spendable_for_index(0, 0)) == 0
#
# coin_6 = Coin(token_bytes(32), coin_4.puzzle_hash, uint64(12312))
# await store.add_coin_record(record_5)
# record_6 = WalletCoinRecord(
# coin_6,
# uint32(5),
# uint32(15),
# True,
# False,
# WalletType.STANDARD_WALLET,
# 2,
# )
# await store.add_coin_record(record_6)
# assert len(await store.get_coin_records_by_puzzle_hash(record_6.coin.puzzle_hash)) == 2 # 4 and 6
# assert len(await store.get_coin_records_by_puzzle_hash(token_bytes(32))) == 0
#
# assert await store.get_coin_record_by_coin_id(coin_6.name()) == record_6
# assert await store.get_coin_record_by_coin_id(token_bytes(32)) is None
#
# # BLOCKS
# assert len(await store.get_lca_path()) == 0
#
# # NOT lca block
# br_1 = BlockRecord(
# token_bytes(32),
# token_bytes(32),
# uint32(0),
# uint128(100),
# None,
# None,
# None,
# None,
# uint64(0),
# )
# assert await store.get_block_record(br_1.header_hash) is None
# await store.add_block_record(br_1, False)
# assert len(await store.get_lca_path()) == 0
# assert await store.get_block_record(br_1.header_hash) == br_1
#
# # LCA genesis
# await store.add_block_record(br_1, True)
# assert await store.get_block_record(br_1.header_hash) == br_1
# assert len(await store.get_lca_path()) == 1
# assert (await store.get_lca_path())[br_1.header_hash] == br_1
#
# br_2 = BlockRecord(
# token_bytes(32),
# token_bytes(32),
# uint32(1),
# uint128(100),
# None,
# None,
# None,
# None,
# uint64(0),
# )
# await store.add_block_record(br_2, False)
# assert len(await store.get_lca_path()) == 1
# await store.add_block_to_path(br_2.header_hash)
# assert len(await store.get_lca_path()) == 2
# assert (await store.get_lca_path())[br_2.header_hash] == br_2
#
# br_3 = BlockRecord(
# token_bytes(32),
# token_bytes(32),
# uint32(2),
# uint128(100),
# None,
# None,
# None,
# None,
# uint64(0),
# )
# await store.add_block_record(br_3, True)
# assert len(await store.get_lca_path()) == 3
# await store.remove_block_records_from_path(1)
# assert len(await store.get_lca_path()) == 2
#
# await store.rollback_lca_to_block(0)
# assert len(await store.get_unspent_coins_at_height()) == 0
#
# coin_7 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_8 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_9 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# coin_10 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
# record_7 = WalletCoinRecord(coin_7, uint32(0), uint32(1), True, False, WalletType.STANDARD_WALLET, 1)
# record_8 = WalletCoinRecord(coin_8, uint32(1), uint32(2), True, False, WalletType.STANDARD_WALLET, 1)
# record_9 = WalletCoinRecord(coin_9, uint32(2), uint32(3), True, False, WalletType.STANDARD_WALLET, 1)
# record_10 = WalletCoinRecord(
# coin_10,
# uint32(3),
# uint32(4),
# True,
# False,
# WalletType.STANDARD_WALLET,
# 1,
# )
#
# await store.add_coin_record(record_7)
# await store.add_coin_record(record_8)
# await store.add_coin_record(record_9)
# await store.add_coin_record(record_10)
# assert len(await store.get_unspent_coins_at_height(0)) == 1
# assert len(await store.get_unspent_coins_at_height(1)) == 1
# assert len(await store.get_unspent_coins_at_height(2)) == 1
# assert len(await store.get_unspent_coins_at_height(3)) == 1
# assert len(await store.get_unspent_coins_at_height(4)) == 0
#
# await store.add_block_record(br_2, True)
# await store.add_block_record(br_3, True)
#
# await store.rollback_lca_to_block(1)
#
# assert len(await store.get_unspent_coins_at_height(0)) == 1
# assert len(await store.get_unspent_coins_at_height(1)) == 1
# assert len(await store.get_unspent_coins_at_height(2)) == 1
# assert len(await store.get_unspent_coins_at_height(3)) == 1
# assert len(await store.get_unspent_coins_at_height(4)) == 1
#
# except AssertionError:
# await db_connection.close()
# raise
# await db_connection.close()
| 42.415584
| 115
| 0.554807
|
910575af3ef295d329853394944fed12da2e13d0
| 868
|
py
|
Python
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/3_features/numtrees_45/rule_36.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/3_features/numtrees_45/rule_36.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/3_features/numtrees_45/rule_36.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
def findDecision(obj): #obj[0]: Coupon, obj[1]: Education, obj[2]: Occupation
# {"feature": "Occupation", "instances": 23, "metric_value": 0.8281, "depth": 1}
if obj[2]<=5:
# {"feature": "Coupon", "instances": 12, "metric_value": 0.4138, "depth": 2}
if obj[0]>0:
return 'True'
elif obj[0]<=0:
# {"feature": "Education", "instances": 3, "metric_value": 0.9183, "depth": 3}
if obj[1]>0:
return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[2]>5:
# {"feature": "Coupon", "instances": 11, "metric_value": 0.994, "depth": 2}
if obj[0]<=3:
# {"feature": "Education", "instances": 10, "metric_value": 0.971, "depth": 3}
if obj[1]<=2:
return 'False'
elif obj[1]>2:
return 'True'
else: return 'True'
elif obj[0]>3:
return 'False'
else: return 'False'
else: return 'True'
| 31
| 81
| 0.587558
|
65e3f3af76395ed6257d4a10ffa5a758f13fb622
| 119
|
py
|
Python
|
PIP/Minor Assignment 3/A3Q9.py
|
ankitrajbiswal/SEM_5
|
db716e242e77149a4091e0e564356ddc724aeff0
|
[
"Apache-2.0"
] | 10
|
2021-04-24T11:46:48.000Z
|
2022-01-17T05:14:37.000Z
|
PIP/Minor Assignment 3/A3Q9.py
|
ankitrajbiswal/SEM_5
|
db716e242e77149a4091e0e564356ddc724aeff0
|
[
"Apache-2.0"
] | 2
|
2021-06-28T11:51:50.000Z
|
2021-11-01T08:21:53.000Z
|
PIP/Minor Assignment 3/A3Q9.py
|
ankitrajbiswal/SEM_5
|
db716e242e77149a4091e0e564356ddc724aeff0
|
[
"Apache-2.0"
] | 16
|
2021-04-24T11:46:58.000Z
|
2022-03-02T05:08:19.000Z
|
def printSum(n):
s=0
for i in n:
s=s+int(i)
print (s)
n=input("Enter the number")
printSum(n)
| 17
| 28
| 0.521008
|
bca0161f142480bcbc52dd96803edecc387e8c64
| 8,711
|
py
|
Python
|
neurom/check/tests/test_runner.py
|
mgeplf/NeuroM
|
e21c01979de3db643c309b6bf2fe0b5dc9363c3a
|
[
"BSD-3-Clause"
] | null | null | null |
neurom/check/tests/test_runner.py
|
mgeplf/NeuroM
|
e21c01979de3db643c309b6bf2fe0b5dc9363c3a
|
[
"BSD-3-Clause"
] | 3
|
2019-11-15T05:22:14.000Z
|
2019-12-09T01:56:24.000Z
|
neurom/check/tests/test_runner.py
|
NeuroDataDesign/NeuroM
|
61a7b5de0c3bf3c07d6eb3270c28d21be6ea7865
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from copy import copy
from nose import tools as nt
from neurom.check.runner import CheckRunner
from neurom.exceptions import ConfigError
_path = os.path.dirname(os.path.abspath(__file__))
SWC_PATH = os.path.join(_path, '../../../test_data/swc/')
NRN_PATH_0 = os.path.join(SWC_PATH, 'Neuron.swc')
NRN_PATH_1 = os.path.join(SWC_PATH, 'Neuron_zero_length_sections.swc')
NRN_PATH_2 = os.path.join(SWC_PATH, 'Single_apical.swc')
NRN_PATH_3 = os.path.join(SWC_PATH, 'Single_basal.swc')
NRN_PATH_4 = os.path.join(SWC_PATH, 'Single_axon.swc')
NRN_PATH_5 = os.path.join(SWC_PATH, 'Single_apical_no_soma.swc')
CONFIG = {
'checks': {
'structural_checks': [
'is_single_tree',
'has_soma_points',
'has_sequential_ids',
'has_increasing_ids',
'has_valid_soma',
'has_valid_neurites'
],
'neuron_checks': [
'has_basal_dendrite',
'has_axon',
'has_apical_dendrite',
'has_all_nonzero_segment_lengths',
'has_all_nonzero_section_lengths',
'has_all_nonzero_neurite_radii',
'has_nonzero_soma_radius'
]
},
'options': {
'has_nonzero_soma_radius': 0.0,
"has_all_nonzero_neurite_radii": 0.007,
"has_all_nonzero_segment_lengths": 0.01,
"has_all_nonzero_section_lengths": [0.01]
},
}
CONFIG_COLOR = copy(CONFIG)
CONFIG_COLOR['color'] = True
REF_0 = {
'files': {
NRN_PATH_0: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": True,
"Has axon": True,
"Has apical dendrite": True,
"Has all nonzero segment lengths": True,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": True
}
},
"STATUS": "PASS"
}
REF_1 = {
'files': {
NRN_PATH_1: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": True,
"Has axon": True,
"Has apical dendrite": True,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": False,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_2 = {
'files': {
NRN_PATH_2: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": False,
"Has axon": False,
"Has apical dendrite": True,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_3 = {
'files': {
NRN_PATH_3: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": True,
"Has axon": False,
"Has apical dendrite": False,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": False,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_4 = {
'files': {
NRN_PATH_4: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": False,
"Has axon": True,
"Has apical dendrite": False,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_5 = {
'files': {
NRN_PATH_5: {
"Is single tree": True,
"Has soma points": False,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": False,
"Has valid neurites": False,
"ALL": False
}
},
"STATUS": "FAIL"
}
def test_ok_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_0)
nt.assert_equal(summ, REF_0)
def test_ok_neuron_color():
checker = CheckRunner(CONFIG_COLOR)
summ = checker.run(NRN_PATH_0)
nt.assert_equal(summ, REF_0)
def test_zero_length_sections_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_1)
nt.assert_equal(summ, REF_1)
def test_single_apical_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_2)
nt.assert_equal(summ, REF_2)
def test_single_basal_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_3)
nt.assert_equal(summ, REF_3)
def test_single_axon_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_4)
nt.assert_equal(summ, REF_4)
def test_single_apical_no_soma():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_5)
nt.assert_equal(summ, REF_5)
def test_directory_input():
checker = CheckRunner(CONFIG)
summ = checker.run(SWC_PATH)
nt.eq_(summ['files'][NRN_PATH_0]['Has axon'], True)
nt.eq_(summ['files'][NRN_PATH_2]['Has axon'], False)
@nt.raises(IOError)
def test_invalid_data_path_raises_IOError():
checker = CheckRunner(CONFIG)
_ = checker.run('foo/bar/baz')
def test__sanitize_config():
# fails if missing 'checks'
nt.assert_raises(ConfigError, CheckRunner._sanitize_config, {})
# creates minimal config
new_config = CheckRunner._sanitize_config({'checks': {}})
nt.eq_(new_config, {'checks':
{'structural_checks': [],
'neuron_checks': [],
},
'options': {},
'color': False,
})
# makes no changes to already filled out config
new_config = CheckRunner._sanitize_config(CONFIG)
nt.eq_(CONFIG, new_config)
| 31.561594
| 86
| 0.601538
|
34abaf2123b0874dd559738c633638d6ca60ad3c
| 4,614
|
py
|
Python
|
tests/test_blueprints_container.py
|
celery/bootsteps
|
f2e788edb182d54037c5f2b9fad28dc81f701f8e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_blueprints_container.py
|
celery/bootsteps
|
f2e788edb182d54037c5f2b9fad28dc81f701f8e
|
[
"BSD-3-Clause"
] | 1
|
2019-10-24T16:46:50.000Z
|
2019-10-24T16:46:50.000Z
|
tests/test_blueprints_container.py
|
celery/bootsteps
|
f2e788edb182d54037c5f2b9fad28dc81f701f8e
|
[
"BSD-3-Clause"
] | 1
|
2019-09-29T03:36:17.000Z
|
2019-09-29T03:36:17.000Z
|
from unittest.mock import Mock
import pytest
from eliot.testing import LoggedAction
from bootsteps import BlueprintContainer, Step
from tests.assertions import (
assert_log_message_field_equals,
assert_logged_action_failed,
assert_logged_action_succeeded,
)
from tests.mocks import TrioCoroutineMock, create_mock_step, create_start_stop_mock_step
def test_blueprint_container_dependencies_graph(logger):
mock_step1 = create_mock_step("step1")
mock_step2 = create_mock_step("step2", requires={mock_step1})
mock_step3 = create_mock_step("step3", last=True)
mock_step4 = create_mock_step("step4", required_by={mock_step2})
mock_step5 = create_mock_step("step5", include_if=False)
mock_bootsteps = [mock_step1, mock_step4, mock_step2, mock_step3, mock_step5]
class MyBlueprintContainer(BlueprintContainer):
bootsteps = mock_bootsteps
mock_bootsteps.remove(mock_step5)
assert list(MyBlueprintContainer.blueprint._steps.nodes) == mock_bootsteps
assert set(MyBlueprintContainer.blueprint._steps.edges) == {
(mock_step2, mock_step1),
(mock_step2, mock_step4),
(mock_step3, mock_step1),
(mock_step3, mock_step4),
(mock_step3, mock_step2),
}
logged_actions = LoggedAction.of_type(
logger.messages, "bootsteps:blueprint:building_dependency_graph"
)
logged_action = logged_actions[0]
assert_log_message_field_equals(
logged_action.start_message, "name", MyBlueprintContainer.blueprint.name
)
assert_log_message_field_equals(
logged_action.end_message, "name", MyBlueprintContainer.blueprint.name
)
assert_log_message_field_equals(
logged_action.end_message,
"graph",
lambda value: value.nodes == MyBlueprintContainer.blueprint._steps.nodes
and value.edges == MyBlueprintContainer.blueprint._steps.edges,
)
assert_logged_action_succeeded(logged_action)
def test_blueprint_container_dependencies_graph_with_two_last_steps(logger):
mock_step1 = create_mock_step("step1", last=True)
mock_step2 = create_mock_step("step2", requires={mock_step1})
mock_step3 = create_mock_step("step3", last=True)
mock_bootsteps = [mock_step1, mock_step2, mock_step3]
class MyBlueprintContainer(BlueprintContainer):
bootsteps = mock_bootsteps
with pytest.raises(ValueError, match="Only one boot step can be last. Found 2."):
MyBlueprintContainer.blueprint
logged_actions = LoggedAction.of_type(
logger.messages, "bootsteps:blueprint:building_dependency_graph"
)
logged_action = logged_actions[0]
assert_log_message_field_equals(
logged_action.start_message, "name", MyBlueprintContainer.name
)
assert_logged_action_failed(logged_action)
assert_log_message_field_equals(
logged_action.end_message, "reason", "Only one boot step can be last. Found 2."
)
assert_log_message_field_equals(
logged_action.end_message, "exception", "builtins.ValueError"
)
def test_blueprint_container_dependencies_graph_with_circular_dependencies(logger):
# Can't use the create_mock_step helper here because of the circular dependency
mock_step2 = Mock(name="step2", spec=Step)
mock_step1 = Mock(name="step1", spec=Step)
mock_step1.requires = {mock_step2}
mock_step1.required_by = set()
mock_step1.last = True
mock_step2.requires = {mock_step1}
mock_step2.required_by = set()
mock_step2.last = False
mock_bootsteps = [mock_step1, mock_step2]
class MyBlueprintContainer(BlueprintContainer):
bootsteps = mock_bootsteps
with pytest.raises(ValueError, match="Circular dependencies found."):
MyBlueprintContainer.blueprint
def test_blueprint_container_dependencies_graph_with_no_circular_dependencies_other_step_not_included(
logger
):
# Can't use the create_mock_step helper here because of the circular dependency
mock_step2 = Mock(name="step2", spec=Step)
mock_step1 = Mock(name="step1", spec=Step)
mock_step1.requires = {mock_step2}
mock_step1.required_by = set()
mock_step1.last = True
mock_step2.include_if.return_value = True
mock_step2.requires = {mock_step1}
mock_step2.required_by = set()
mock_step2.last = False
mock_step2.include_if.return_value = False
mock_bootsteps = [mock_step1, mock_step2]
class MyBlueprintContainer(BlueprintContainer):
bootsteps = mock_bootsteps
try:
MyBlueprintContainer.blueprint
except ValueError:
pytest.fail("Circular dependencies found")
| 33.926471
| 102
| 0.747291
|
475872cef6090c7295bdf050dcb74b11587a58f1
| 9,773
|
py
|
Python
|
runtime/python/Lib/unittest/test/test_break.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
Thonny/Lib/unittest/test/test_break.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 30
|
2019-01-04T10:14:56.000Z
|
2020-10-12T14:00:31.000Z
|
Thonny/Lib/unittest/test/test_break.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 53
|
2019-03-12T16:50:21.000Z
|
2022-03-15T23:16:18.000Z
|
import gc
import io
import os
import sys
import signal
import weakref
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
class TestBreak(unittest.TestCase):
int_handler = None
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
if self.int_handler is not None:
signal.signal(signal.SIGINT, self.int_handler)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
self.assertNotIn(result, unittest.signals._results)
unittest.registerResult(result)
try:
self.assertIn(result, unittest.signals._results)
finally:
unittest.removeResult(result)
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
# Can't use skipIf decorator because the signal handler may have
# been changed after defining this method.
if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
self.skipTest("test requires SIGINT to not be ignored")
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# Can't use skipIf decorator because the signal handler may have
# been changed after defining this method.
if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
self.skipTest("test requires SIGINT to not be ignored")
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=io.StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.tb_locals = False
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'tb_locals': False,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'tb_locals': False,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
class TestBreakDefaultIntHandler(TestBreak):
int_handler = signal.default_int_handler
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
class TestBreakSignalIgnored(TestBreak):
int_handler = signal.SIG_IGN
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
class TestBreakSignalDefault(TestBreak):
int_handler = signal.SIG_DFL
if __name__ == "__main__":
unittest.main()
| 34.779359
| 80
| 0.597769
|
e958e64f6b51e6a1aeab18669eaf6df61c48347f
| 8,273
|
py
|
Python
|
conary/build/derive.py
|
sassoftware/conary
|
d418968acd5e11ee17ed6d91ca395ea10a040222
|
[
"Apache-2.0"
] | 43
|
2015-03-31T01:37:10.000Z
|
2021-11-14T16:26:48.000Z
|
conary/build/derive.py
|
sassoftware/conary
|
d418968acd5e11ee17ed6d91ca395ea10a040222
|
[
"Apache-2.0"
] | 9
|
2015-06-10T16:39:41.000Z
|
2020-01-27T16:35:01.000Z
|
conary/build/derive.py
|
sassoftware/conary
|
d418968acd5e11ee17ed6d91ca395ea10a040222
|
[
"Apache-2.0"
] | 9
|
2015-04-07T08:12:37.000Z
|
2020-01-26T09:54:18.000Z
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains the functions which derive a package and commit the
resulting packages to the repository.
"""
import os
import stat
from conary.cmds import branch
from conary import checkin
from conary import state
from conary.conaryclient import cmdline
from conary.lib import log, util
from conary.versions import Label
from conary.repository.changeset import ChangesetExploder
class DeriveCallback(checkin.CheckinCallback):
def setUpdateJob(self, *args, **kw):
# stifle update announcement for extract
pass
def derive(repos, cfg, targetLabel, troveSpec, checkoutDir=None,
extract=False, info=False, callback=None):
"""
Performs all the commands necessary to create a derived recipe.
First it shadows the package, then it creates a checkout of the shadow
and converts the checkout to a derived recipe package.
Finally if extract = True, it installs an version of the binary
package into a root.
@param repos: trovesource to search for and derive packages from
@param cfg: configuration to use when deriving the package
@type cfg: ConaryConfiguration object
@param targetLabel: label to derive from
@type targetLabel: versions.Label
@param checkoutDir: directory to create the checkout in. If None,
defaults to currentDir + packageName.
@param extract: If True, creates a subdirectory of the checkout named
_ROOT_ with the contents of the binary of the derived
package.
@param info: If true, only display the information about the shadow
that would be performed if the derive command were
completed.
@param callback:
"""
origDir = os.getcwd()
try:
if callback is None:
callback = DeriveCallback()
if isinstance(troveSpec, tuple):
troveName, versionSpec, flavor = troveSpec
versionSpec = str(versionSpec)
troveSpec = cmdline.toTroveSpec(troveName, versionSpec, flavor)
else:
troveName, versionSpec, flavor = cmdline.parseTroveSpec(troveSpec)
if isinstance(targetLabel, str):
targetLabel = Label(targetLabel)
troveName, versionSpec, flavor = cmdline.parseTroveSpec(troveSpec)
result = repos.findTrove(cfg.buildLabel,
(troveName, versionSpec, flavor),
cfg.flavor)
# findTrove shouldn't return multiple items for one package anymore
# when a flavor is specified.
troveToDerive, = result
# displaying output along the screen allows there to be a record
# of what operations were performed. Since this command is
# an aggregate of several commands I think that is appropriate,
# rather than simply using a progress callback.
log.info('Shadowing %s=%s[%s] onto %s' % (troveToDerive[0],
troveToDerive[1],
troveToDerive[2],
targetLabel))
if info:
cfg.interactive = False
error = branch.branch(repos, cfg, str(targetLabel),
['%s=%s[%s]'%troveToDerive],
makeShadow=True, sourceOnly=True,
binaryOnly=False, allowEmptyShadow=True,
info=info)
if info or error:
return
shadowedVersion = troveToDerive[1].createShadow(targetLabel)
shadowedVersion = shadowedVersion.getSourceVersion(False)
troveName = troveName.split(':')[0]
checkoutDir = checkoutDir or troveName
checkin.checkout(repos, cfg, checkoutDir,
["%s=%s" % (troveName, shadowedVersion)],
callback=callback)
os.chdir(checkoutDir)
nvfs = repos.getTrovesBySource(troveToDerive[0]+':source',
troveToDerive[1].getSourceVersion())
trvs = repos.getTroves(nvfs)
hasCapsule = [ x for x in trvs if x.troveInfo.capsule.type() ]
if hasCapsule:
derivedRecipeType = 'DerivedCapsuleRecipe'
removeText = ''
else:
derivedRecipeType = 'DerivedPackageRecipe'
removeText = \
"""
# This appliance uses PHP as a command interpreter but does
# not include a web server, so remove the file that creates
# a dependency on the web server
r.Remove('/etc/httpd/conf.d/php.conf')
"""
log.info('Rewriting recipe file')
recipeName = troveName + '.recipe'
className = util.convertPackageNameToClassName(troveName)
derivedRecipe = """
class %(className)sRecipe(%(recipeBaseClass)s):
name = '%(name)s'
version = '%(version)s'
def setup(r):
'''
In this recipe, you can make modifications to the package.
Examples:
# This appliance has high-memory-use PHP scripts
r.Replace('memory_limit = 8M', 'memory_limit = 32M', '/etc/php.ini')
%(removeText)s
# This appliance requires that a few binaries be replaced
# with binaries built from a custom archive that includes
# a Makefile that honors the DESTDIR variable for its
# install target.
r.addArchive('foo.tar.gz')
r.Make()
r.MakeInstall()
# This appliance requires an extra configuration file
r.Create('/etc/myconfigfile', contents='some data')
'''
""" % dict(className=className,
name=troveName,
version=shadowedVersion.trailingRevision().getVersion(),
recipeBaseClass=derivedRecipeType,
removeText=removeText)
open(recipeName, 'w').write(derivedRecipe)
log.info('Removing extra files from checkout')
conaryState = state.ConaryStateFromFile('CONARY', repos)
sourceState = conaryState.getSourceState()
# clear the factory since we don't care about how the parent trove was
# created
sourceState.setFactory('')
addRecipe=True
for (pathId, path, fileId, version) in list(sourceState.iterFileList()):
if path == recipeName:
addRecipe = False
continue
sourceState.removeFile(pathId)
if util.exists(path):
statInfo = os.lstat(path)
try:
if statInfo.st_mode & stat.S_IFDIR:
os.rmdir(path)
else:
os.unlink(path)
except OSError, e:
log.warning("cannot remove %s: %s" % (path, e.strerror))
conaryState.write('CONARY')
if addRecipe:
checkin.addFiles([recipeName])
if extract:
log.info('extracting files from %s=%s[%s]' % (troveToDerive))
# extract to _ROOT_
extractDir = os.path.join(os.getcwd(), '_ROOT_')
ts = [ (troveToDerive[0], (None, None),
(troveToDerive[1], troveToDerive[2]), True) ]
cs = repos.createChangeSet(ts, recurse = True)
ChangesetExploder(cs, extractDir)
# extract to _OLD_ROOT_
secondDir = os.path.join(os.getcwd(), '_OLD_ROOT_')
cs = repos.createChangeSet(ts, recurse = True)
ChangesetExploder(cs, secondDir)
finally:
# restore the original directory before we started
os.chdir(origDir)
| 38.658879
| 80
| 0.60365
|
b8a65323c4d835428a8c5ea7deb999b86ba3460c
| 3,755
|
py
|
Python
|
plugins/quetz_conda_suggest/tests/test_quetz_conda_suggest.py
|
maresb/quetz
|
55313ca9c2ae04577d23a1dddb38c045b4a056f4
|
[
"BSD-3-Clause"
] | 108
|
2020-09-16T16:15:01.000Z
|
2022-03-29T02:49:31.000Z
|
plugins/quetz_conda_suggest/tests/test_quetz_conda_suggest.py
|
maresb/quetz
|
55313ca9c2ae04577d23a1dddb38c045b4a056f4
|
[
"BSD-3-Clause"
] | 317
|
2020-09-07T18:37:33.000Z
|
2022-03-25T13:10:41.000Z
|
plugins/quetz_conda_suggest/tests/test_quetz_conda_suggest.py
|
LaudateCorpus1/quetz
|
339018ee3c35ae6700bea611d16a9924a33a0606
|
[
"BSD-3-Clause"
] | 36
|
2020-09-07T22:01:27.000Z
|
2022-03-26T17:06:07.000Z
|
import io
import shutil
import tarfile
import tempfile
from contextlib import contextmanager
from unittest import mock
import pytest
from quetz_conda_suggest import db_models
from quetz.condainfo import CondaInfo
def test_conda_suggest_endpoint_without_upload(client, channel, subdir):
response = client.get(
f"/api/channels/{channel.name}/{subdir}/conda-suggest"
) # noqa
assert response.status_code == 200
assert response.content == b'null'
assert response.json() == None # noqa: E711
def test_post_add_package_version(package_version, db, config):
filename = "test-package-0.1-0.tar.bz2"
with tempfile.SpooledTemporaryFile(mode='wb') as target:
with open(filename, 'rb') as fid:
shutil.copyfileobj(fid, target)
target.seek(0)
condainfo = CondaInfo(target, filename)
@contextmanager
def get_db():
yield db
from quetz_conda_suggest import main
with mock.patch("quetz_conda_suggest.main.get_db_manager", get_db):
main.post_add_package_version(package_version, condainfo)
meta = db.query(db_models.CondaSuggestMetadata).first()
assert meta.data == '{}'
# modify `files` and re-save
condainfo.files = [
b'bin/test-bin\n',
b'include/tpkg.h\n',
b'include/tpkg_utils.h\n',
b'lib/cmake/test-package/tpkgConfig.cmake\n',
b'lib/cmake/test-package/tpkgConfigVersion.cmake\n',
b'lib/libtpkg.so\n',
b'lib/pkgconfig/libtpkg.pc\n',
]
with mock.patch("quetz_conda_suggest.main.get_db_manager", get_db):
main.post_add_package_version(package_version, condainfo)
meta = db.query(db_models.CondaSuggestMetadata).all()
assert len(meta) == 1
assert meta[0].data == '{"test-bin": "test-package"}'
@pytest.fixture
def plugins():
return ["quetz-conda_suggest"]
def test_conda_suggest_endpoint_with_upload(
client,
db,
channel,
package,
subdir,
config,
profile,
):
response = client.get("/api/dummylogin/madhurt")
filename = "test-package-0.1-0.tar.bz2"
@contextmanager
def get_db():
yield db
# extract existing data
tar = tarfile.open(name=filename, mode='r:bz2')
existing_files = tar.getmembers()
existing_files_data = {}
for each_file in existing_files:
each_file_data = tar.extractfile(each_file).read()
existing_files_data[each_file] = each_file_data
tar.close()
# write content in `info/files`
files_data = [
'bin/test-bin\n',
'include/tpkg.h\n',
'include/tpkg_utils.h\n',
'lib/cmake/test-package/tpkgConfig.cmake\n',
'lib/cmake/test-package/tpkgConfigVersion.cmake\n',
'lib/libtpkg.so\n',
'lib/pkgconfig/libtpkg.pc\n',
]
files_content = "".join(files_data)
b = files_content.encode("utf-8").strip()
t = tarfile.TarInfo("info/files")
t.size = len(b)
# re-create archive with updated `info/files`
tar = tarfile.open(name=filename, mode='w:bz2')
for each_file, each_file_data in existing_files_data.items():
tar.addfile(each_file, io.BytesIO(each_file_data))
tar.addfile(t, io.BytesIO(b))
tar.close()
with mock.patch("quetz_conda_suggest.main.get_db_manager", get_db):
url = f'/api/channels/{channel.name}/files/'
files = {'files': (filename, open(filename, 'rb'))}
response = client.post(url, files=files)
assert response.status_code == 201
response = client.get(
f"/api/channels/{channel.name}/{subdir}/conda-suggest"
) # noqa
assert response.status_code == 200
assert response.headers['content-length'] == '22'
assert response.content == b'test-bin:test-package\n'
| 28.884615
| 72
| 0.665246
|
39a45b8b0cbdefb279eec45fe60a0167c18cb7f9
| 2,148
|
py
|
Python
|
svgwrite/elementfactory.py
|
bntre/py-harmony
|
c849b8be863f620b4e7c6661e0e40c1414d6b17c
|
[
"CC0-1.0"
] | null | null | null |
svgwrite/elementfactory.py
|
bntre/py-harmony
|
c849b8be863f620b4e7c6661e0e40c1414d6b17c
|
[
"CC0-1.0"
] | null | null | null |
svgwrite/elementfactory.py
|
bntre/py-harmony
|
c849b8be863f620b4e7c6661e0e40c1414d6b17c
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
#coding:utf-8
# Author: mozman --<mozman@gmx.at>
# Purpose: element factory
# Created: 15.10.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
from svgwrite import container
from svgwrite import shapes
from svgwrite import path
from svgwrite import image
from svgwrite import text
from svgwrite import gradients
from svgwrite import pattern
from svgwrite import masking
from svgwrite import animate
from svgwrite import filters
factoryelements = {
'g': container.Group,
'svg': container.SVG,
'defs': container.Defs,
'symbol': container.Symbol,
'marker': container.Marker,
'use': container.Use,
'a': container.Hyperlink,
'script': container.Script,
'style': container.Style,
'line': shapes.Line,
'rect': shapes.Rect,
'circle': shapes.Circle,
'ellipse': shapes.Ellipse,
'polyline': shapes.Polyline,
'polygon': shapes.Polygon,
'path': path.Path,
'image': image.Image,
'text': text.Text,
'tspan': text.TSpan,
'tref': text.TRef,
'textPath': text.TextPath,
'textArea': text.TextArea,
'linearGradient': gradients.LinearGradient,
'radialGradient': gradients.RadialGradient,
'pattern': pattern.Pattern,
'clipPath': masking.ClipPath,
'mask': masking.Mask,
'animate': animate.Animate,
'set': animate.Set,
'animateColor': animate.AnimateColor,
'animateMotion': animate.AnimateMotion,
'animateTransform': animate.AnimateTransform,
'filter': filters.Filter,
}
class ElementBuilder(object):
def __init__(self, cls, factory):
self.cls = cls
self.factory = factory
def __call__(self, *args, **kwargs):
# inject creator object - inherit _parameter from factory
kwargs['factory'] = self.factory
# create an object of type 'cls'
return self.cls(*args, **kwargs)
class ElementFactory(object):
def __getattr__(self, name):
if name in factoryelements:
return ElementBuilder(factoryelements[name], self)
else:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__.__name__, name))
| 28.64
| 96
| 0.675047
|
bab1144813ba0fec2fb34b151d08023eb6d23705
| 18,157
|
py
|
Python
|
swagger_client/api/account_api.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/api/account_api.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/api/account_api.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class AccountApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_account(self, access_id, account_id, **kwargs): # noqa: E501
"""Delete account # noqa: E501
Delete the account identified by **accountId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_account(access_id, account_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for the account to delete (required)
:param int account_id: The **id** for the account to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_account_with_http_info(access_id, account_id, **kwargs) # noqa: E501
else:
(data) = self.delete_account_with_http_info(access_id, account_id, **kwargs) # noqa: E501
return data
def delete_account_with_http_info(self, access_id, account_id, **kwargs): # noqa: E501
"""Delete account # noqa: E501
Delete the account identified by **accountId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_account_with_http_info(access_id, account_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for the account to delete (required)
:param int account_id: The **id** for the account to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_id', 'account_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_id' is set
if ('access_id' not in params or
params['access_id'] is None):
raise ValueError("Missing the required parameter `access_id` when calling `delete_account`") # noqa: E501
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `delete_account`") # noqa: E501
collection_formats = {}
path_params = {}
if 'access_id' in params:
path_params['accessId'] = params['access_id'] # noqa: E501
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/accesses/{accessId}/accounts/{accountId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account(self, access_id, account_id, **kwargs): # noqa: E501
"""Get account # noqa: E501
Returns the account identified by **accountId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account(access_id, account_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for the account to retrieve (required)
:param int account_id: The **id** for the account to retrieve (required)
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_with_http_info(access_id, account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_with_http_info(access_id, account_id, **kwargs) # noqa: E501
return data
def get_account_with_http_info(self, access_id, account_id, **kwargs): # noqa: E501
"""Get account # noqa: E501
Returns the account identified by **accountId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_with_http_info(access_id, account_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for the account to retrieve (required)
:param int account_id: The **id** for the account to retrieve (required)
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_id', 'account_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_id' is set
if ('access_id' not in params or
params['access_id'] is None):
raise ValueError("Missing the required parameter `access_id` when calling `get_account`") # noqa: E501
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account`") # noqa: E501
collection_formats = {}
path_params = {}
if 'access_id' in params:
path_params['accessId'] = params['access_id'] # noqa: E501
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/accesses/{accessId}/accounts/{accountId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_accounts(self, access_id, **kwargs): # noqa: E501
"""List accounts # noqa: E501
Retrieve all accounts for the current user under the **accessId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_accounts(access_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **id** for the access for which to retrieve all accounts (required)
:return: list[Account]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_accounts_with_http_info(access_id, **kwargs) # noqa: E501
else:
(data) = self.get_accounts_with_http_info(access_id, **kwargs) # noqa: E501
return data
def get_accounts_with_http_info(self, access_id, **kwargs): # noqa: E501
"""List accounts # noqa: E501
Retrieve all accounts for the current user under the **accessId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_accounts_with_http_info(access_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **id** for the access for which to retrieve all accounts (required)
:return: list[Account]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_accounts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_id' is set
if ('access_id' not in params or
params['access_id'] is None):
raise ValueError("Missing the required parameter `access_id` when calling `get_accounts`") # noqa: E501
collection_formats = {}
path_params = {}
if 'access_id' in params:
path_params['accessId'] = params['access_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/accesses/{accessId}/accounts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Account]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_account(self, access_id, account_id, name, **kwargs): # noqa: E501
"""Update account name # noqa: E501
Update the account name used in AHOI. Name must be URL encoded. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_account(access_id, account_id, name, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for which the user-defined account name should be altered (required)
:param int account_id: The **id** for which the user-defined account name should be altered (required)
:param str name: The new URL-encoded name (required)
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_account_with_http_info(access_id, account_id, name, **kwargs) # noqa: E501
else:
(data) = self.update_account_with_http_info(access_id, account_id, name, **kwargs) # noqa: E501
return data
def update_account_with_http_info(self, access_id, account_id, name, **kwargs): # noqa: E501
"""Update account name # noqa: E501
Update the account name used in AHOI. Name must be URL encoded. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_account_with_http_info(access_id, account_id, name, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for which the user-defined account name should be altered (required)
:param int account_id: The **id** for which the user-defined account name should be altered (required)
:param str name: The new URL-encoded name (required)
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_id', 'account_id', 'name'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_id' is set
if ('access_id' not in params or
params['access_id'] is None):
raise ValueError("Missing the required parameter `access_id` when calling `update_account`") # noqa: E501
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `update_account`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `update_account`") # noqa: E501
collection_formats = {}
path_params = {}
if 'access_id' in params:
path_params['accessId'] = params['access_id'] # noqa: E501
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/accesses/{accessId}/accounts/{accountId}/userdefinedname/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.986456
| 277
| 0.611445
|
b0592da7eda0ba99e9752b4f9f904b040939e2a5
| 12,658
|
py
|
Python
|
pydass_vasp/electronic_structure/dos.py
|
terencezl/pydass_vasp
|
77b5e285d6e9755f8f170159b7818a090a364917
|
[
"MIT"
] | 9
|
2015-11-13T15:30:07.000Z
|
2020-06-02T12:54:55.000Z
|
pydass_vasp/electronic_structure/dos.py
|
dlnguyen/pydass_vasp
|
77b5e285d6e9755f8f170159b7818a090a364917
|
[
"MIT"
] | 2
|
2016-02-26T16:49:48.000Z
|
2018-05-23T02:22:37.000Z
|
pydass_vasp/electronic_structure/dos.py
|
dlnguyen/pydass_vasp
|
77b5e285d6e9755f8f170159b7818a090a364917
|
[
"MIT"
] | 3
|
2018-10-01T17:45:19.000Z
|
2021-04-16T21:08:43.000Z
|
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .helpers import determine_tag_value, figs_assert, initiate_figs, plot_helper_settings
from ..xml_utils import parse
def get_tdos(filepath='DOSCAR', ISPIN=None, Ef=None, plot=False, xlim=None, ylim_upper=None, on_figs=None):
"""
Get the total density of states, with consideration of spin-polarization.
Accepts file type 'DOSCAR', or 'vasprun.xml'.
Parameters
----------
filepath: string
filepath, default to 'DOSCAR'
For DOSCAR-type file, can be any string containing 'DOSCAR'.
For vasprun.xml-type file, can be any string ending with '.xml'.
ISPIN: int
user specified ISPIN
If not given, for DOSCAR-type file, infer from 'OUTCAR'/'INCAR'.
Ef: float
user specified Ef
plot: bool
whether to plot the data, default to False
xlim: list
the range of x-axis, 2 values in a list
ylim_upper: int/float
the upper limit of y-axis(, of the spin-combined plot if ISPIN == 2)
on_figs: list/int
the current figure numbers to plot to, default to new figures
Returns
-------
a dict, containing
'data': a pandas dataframe
'ax': the axes reference
"""
# get data
if re.match(r".*\.xml", filepath):
root = parse(filepath)
NEDOS = int(root.find("./parameters/separator[@name='dos']/i[@name='NEDOS']").text)
Ef = float(root.find("./calculation/dos/i[@name='efermi']").text)
if ISPIN:
print("Using user specified ISPIN.")
else:
ISPIN = int(root.find(
"./parameters/separator[@name='electronic']/separator[@name='electronic spin']/i[@name='ISPIN']").text)
if ISPIN == 1:
data = np.zeros((NEDOS, 3))
for n_step, elem in enumerate(root.findall(
"./calculation/dos/total/array/set/set[@comment='spin 1']/r")):
data[n_step] = elem.text.split()
elif ISPIN == 2:
data1 = np.zeros((NEDOS, 3))
for n_step, elem in enumerate(root.findall(
"./calculation/dos/total/array/set/set[@comment='spin 1']/r")):
data1[n_step] = elem.text.split()
data2 = np.zeros((NEDOS, 3))
for n_step, elem in enumerate(root.findall(
"./calculation/dos/total/array/set/set[@comment='spin 2']/r")):
data2[n_step] = elem.text.split()
elif re.match(r".*DOSCAR.*", filepath):
with open(filepath, 'r') as f:
DOSCAR = f.readlines()
for i in range(len(DOSCAR)):
DOSCAR[i] = DOSCAR[i].split()
NEDOS = int(DOSCAR[5][2])
Ef = float(DOSCAR[5][3])
if ISPIN:
print("Using user specified ISPIN.")
else:
ISPIN = determine_tag_value('ISPIN', filepath)
data = np.array(DOSCAR[6:6 + NEDOS], dtype=float)
if ISPIN == 2:
data1 = data[:, [0, 1, 3]]
data2 = data[:, [0, 2, 4]]
# confluence and data organizing
if ISPIN == 1:
col_names = ['E', 'tot', 'tot_integrated']
data[:, 0] -= Ef
return_dict = {'data': pd.DataFrame(**{'columns': col_names, 'data': data})}
elif ISPIN == 2:
col_names1 = ['E', 'tot_up', 'tot_integrated_up']
col_names2 = ['E', 'tot_down', 'tot_integrated_down']
data1[:, 0] -= Ef
data2[:, 0] -= Ef
return_dict = {'data_spin_up': pd.DataFrame(**{'columns': col_names1, 'data': data1}),
'data_spin_down': pd.DataFrame(**{'columns': col_names2, 'data': data2}),
}
if plot:
# start plotting
figs_assert(on_figs, ISPIN, 'tdos')
if ISPIN == 1:
initiate_figs(on_figs)
plt.plot(data[:, 0], data[:, 1])
ax = plt.gca()
plot_helper_settings((xlim, [0, ylim_upper]), 'tdos')
return_dict.update({'ax': ax})
elif ISPIN == 2:
# Plot the combined TDOS
initiate_figs(on_figs)
plt.plot(data1[:, 0], data1[:, 1] + data2[:, 1], label='spin up + down')
ax1 = plt.gca()
plot_helper_settings((xlim, [0, ylim_upper]), 'tdos')
# Plot the separated TDOS
initiate_figs(on_figs)
plt.plot(data1[:, 0], data1[:, 1], label='spin up')
plt.plot(data2[:, 0], -data2[:, 1], label='spin down')
ax2 = plt.gca()
ylim_upper_sp = None
ylim_lower_sp = None
if ylim_upper:
ylim_upper_sp = ylim_upper/2.
ylim_lower_sp = -ylim_upper_sp
plot_helper_settings((xlim, [ylim_lower_sp, ylim_upper_sp]), 'tdos')
return_dict.update({'ax_spin_combined': ax1, 'ax_spin_separated': ax2})
return return_dict
def get_ldos(atom, filepath='DOSCAR', ISPIN=None, LORBIT=None, Ef=None, plot=False, xlim=None, ylim_upper=None,
on_figs=None):
"""
Get the local projected density of states, with consideration of spin-polarization.
Accepts file type 'DOSCAR', or 'vasprun.xml'.
Parameters
----------
atom: int
the atom number in DOSCAR/POSCAR interested, counting from 1
filepath: string
filepath, default to 'DOSCAR'
For DOSCAR-type file, can be any string containing 'DOSCAR'.
For vasprun.xml-type file, can be any string ending with '.xml'.
ISPIN: int
user specified ISPIN
If not given, for DOSCAR-type file, infer from 'OUTCAR'/'INCAR'.
LORBIT: int
user specified LORBIT
If not given, for both DOSCAR- and vasprun.xml-types of file, infer from 'OUTCAR'/'INCAR'. Because there is an
error in vasprun.xml.
Ef: float
user specified Ef
plot: bool
whether to plot the data, default to False
xlim: list
the range of x-axis, 2 values in a list
ylim_upper: int/float
the upper limit of y-axis(, of the spin-combined plot if ISPIN == 2)
on_figs: list/int
the current figure numbers to plot to, default to new figures
Returns
-------
a dict, containing
'data': a dataframe
'ax': the axes reference
"""
# get data
if re.match(r".*\.xml", filepath):
root = parse(filepath)
NEDOS = int(root.find("./parameters/separator[@name='dos']/i[@name='NEDOS']").text)
Ef = float(root.find("./calculation/dos/i[@name='efermi']").text)
if ISPIN:
print("Using user specified ISPIN.")
else:
ISPIN = int(root.find(
"./parameters/separator[@name='electronic']/separator[@name='electronic spin']/i[@name='ISPIN']").text)
# vasprun.xml's LORBIT is not correct
if LORBIT:
print("Using user specified LORBIT.")
else:
LORBIT = determine_tag_value('LORBIT', filepath)
if ISPIN == 1:
if LORBIT == 10 or LORBIT == 0:
data = np.zeros((NEDOS, 4))
elif LORBIT == 11 or LORBIT == 1:
data = np.zeros((NEDOS, 10))
for n_step, elem in enumerate(root.findall(
"./calculation/dos/partial/array/set/set[@comment='ion " + str(
atom) + "']/set[@comment='spin 1']/r")):
data[n_step] = elem.text.split()
elif ISPIN == 2:
if LORBIT == 10 or LORBIT == 0:
data1 = np.zeros((NEDOS, 4))
data2 = np.zeros((NEDOS, 4))
elif LORBIT == 11 or LORBIT == 1:
data1 = np.zeros((NEDOS, 10))
data2 = np.zeros((NEDOS, 10))
for n_step, elem in enumerate(root.findall(
"./calculation/dos/partial/array/set/set[@comment='ion " + str(
atom) + "']/set[@comment='spin 1']/r")):
data1[n_step] = elem.text.split()
for n_step, elem in enumerate(root.findall(
"./calculation/dos/partial/array/set/set[@comment='ion " + str(
atom) + "']/set[@comment='spin 2']/r")):
data2[n_step] = elem.text.split()
elif re.match(r".*DOSCAR.*", filepath):
with open(filepath, 'r') as f:
DOSCAR = f.readlines()
for i in range(len(DOSCAR)):
DOSCAR[i] = DOSCAR[i].split()
NEDOS = int(DOSCAR[5][2])
Ef = float(DOSCAR[5][3])
if ISPIN:
print("Using user specified ISPIN.")
else:
ISPIN = determine_tag_value('ISPIN', filepath)
if LORBIT:
print("Using user specified LORBIT.")
else:
LORBIT = determine_tag_value('LORBIT', filepath)
data = np.array(DOSCAR[(6 + (NEDOS + 1) * atom):(6 + (NEDOS + 1) * atom + NEDOS)], dtype=float)
if ISPIN == 2:
if LORBIT == 10 or LORBIT == 0:
data1 = data[:, [0, 1, 3, 5]]
data2 = data[:, [0, 2, 4, 6]]
elif LORBIT == 11 or LORBIT == 1:
data1 = data[:, [0, 1, 3, 5, 7, 9, 11, 13, 15, 17]]
data2 = data[:, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]]
# confluence and data organizing
if ISPIN == 1:
if LORBIT == 10 or LORBIT == 0:
col_names = ['E', 's', 'p', 'd']
elif LORBIT == 11 or LORBIT == 1:
col_names = ['E', 's', 'p_y', 'p_z', 'p_x', 'd_xy', 'd_yz', 'd_z2', 'd_xz', 'd_x2y2']
data[:, 0] -= Ef
return_dict = {'data': pd.DataFrame(**{'columns': col_names, 'data': data})}
elif ISPIN == 2:
if LORBIT == 10 or LORBIT == 0:
col_names1 = ['E', 's_up', 'p_up', 'd_up']
col_names2 = ['E', 's_down', 'p_down', 'd_down']
elif LORBIT == 11 or LORBIT == 1:
col_names1 = ['E', 's_up', 'p_y_up', 'p_z_up', 'p_x_up', 'd_xy_up', 'd_yz_up', 'd_z2_up', 'd_xz_up',
'd_x2y2_up']
col_names2 = ['E', 's_down', 'p_y_down', 'p_z_down', 'p_x_down', 'd_xy_down', 'd_yz_down', 'd_z2_down',
'd_xz_down', 'd_x2y2_down']
data1[:, 0] -= Ef
data2[:, 0] -= Ef
return_dict = {'data_spin_up': pd.DataFrame(**{'columns': col_names1, 'data': data1}),
'data_spin_down': pd.DataFrame(**{'columns': col_names2, 'data': data2}),
}
if plot:
# start plotting
figs_assert(on_figs, ISPIN, 'ldos')
if ISPIN == 1:
initiate_figs(on_figs)
if LORBIT == 10 or LORBIT == 0:
for i in range(1, 4):
plt.plot(data[:, 0], data[:, i], label=col_names[i])
elif LORBIT == 11 or LORBIT == 1:
for i in range(1, 10):
plt.plot(data[:, 0], data[:, i], label=col_names[i])
ax = plt.gca()
plot_helper_settings((xlim, [0, ylim_upper]), 'ldos')
return_dict.update({'ax': ax})
elif ISPIN == 2:
# plot spin combined
initiate_figs(on_figs)
if LORBIT == 10 or LORBIT == 0:
for i in range(1, 4):
plt.plot(data1[:, 0], data1[:, i] + data2[:, i], label=col_names1[i] + ' + ' + col_names2[i])
elif LORBIT == 11 or LORBIT == 1:
for i in range(1, 10):
plt.plot(data1[:, 0], data1[:, i] + data2[:, i], label=col_names1[i] + ' + ' + col_names2[i])
ax1 = plt.gca()
plot_helper_settings((xlim, [0, ylim_upper]), 'ldos')
# plot spin separated
initiate_figs(on_figs)
if LORBIT == 10 or LORBIT == 0:
for i in range(1, 4):
plt.plot(data1[:, 0], data1[:, i], label=col_names1[i])
plt.plot(data2[:, 0], -data2[:, i], label=col_names2[i])
elif LORBIT == 11 or LORBIT == 1:
for i in range(1, 10):
plt.plot(data1[:, 0], data1[:, i], label=col_names1[i])
plt.plot(data2[:, 0], -data2[:, i], label=col_names2[i])
ax2 = plt.gca()
ylim_upper_sp = None
ylim_lower_sp = None
if ylim_upper:
ylim_upper_sp = ylim_upper/2.
ylim_lower_sp = -ylim_upper_sp
plot_helper_settings((xlim, [ylim_lower_sp, ylim_upper_sp]), 'ldos')
return_dict.update({'ax_spin_combined': ax1, 'ax_spin_separated': ax2})
return return_dict
| 40.570513
| 119
| 0.523937
|
903331cee03bdc293e9600b4a758b4eed8e7b089
| 11,451
|
py
|
Python
|
tensorflow/python/saved_model/function_deserialization.py
|
mickyLing/tensorflow
|
170d8a86c72a1b4f025d53c3df2992b954effbbd
|
[
"Apache-2.0"
] | 1
|
2019-02-01T09:52:07.000Z
|
2019-02-01T09:52:07.000Z
|
tensorflow/python/saved_model/function_deserialization.py
|
mickyLing/tensorflow
|
170d8a86c72a1b4f025d53c3df2992b954effbbd
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/saved_model/function_deserialization.py
|
mickyLing/tensorflow
|
170d8a86c72a1b4f025d53c3df2992b954effbbd
|
[
"Apache-2.0"
] | 1
|
2021-08-08T19:12:44.000Z
|
2021-08-08T19:12:44.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for deserializing `Function`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.core.framework import function_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as function_lib
from tensorflow.python.framework import function_def_to_graph as function_def_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.util import compat
from tensorflow.python.util import nest
def _is_tensor(t):
return isinstance(t, (ops.Tensor, resource_variable_ops.ResourceVariable))
def _inputs_compatible(args, stored_inputs):
"""Checks whether function arguments are compatible with parameters."""
if len(args) != len(stored_inputs):
return False
for arg, stored_input in zip(args, stored_inputs):
if not function_lib.is_same_structure(arg, stored_input):
return False
flattened_arg = nest.flatten(arg)
flattened_stored_input = nest.flatten(stored_input)
for a, b in zip(flattened_arg, flattened_stored_input):
if _is_tensor(a):
if not isinstance(b, tensor_spec.TensorSpec):
return False
if a.dtype != b.dtype or not b.shape.is_compatible_with(a.shape):
return False
else:
if a != b:
return False
return True
def _deserialize_function_spec(function_spec_proto, coder):
"""Deserialize a FunctionSpec object from its proto representation."""
fullargspec = coder.decode_proto(function_spec_proto.fullargspec)
is_method = function_spec_proto.is_method
args_to_prepend = coder.decode_proto(function_spec_proto.args_to_prepend)
kwargs_to_include = coder.decode_proto(function_spec_proto.kwargs_to_include)
input_signature = coder.decode_proto(function_spec_proto.input_signature)
return function_lib.FunctionSpec(fullargspec, is_method, args_to_prepend,
kwargs_to_include, input_signature)
def recreate_concrete_function(saved_concrete_function, concrete_functions):
"""Recreates a user-facing concrete function."""
coder = nested_structure_coder.StructureCoder()
concrete_function = concrete_functions[saved_concrete_function.name]
input_signature = coder.decode_proto(
saved_concrete_function.canonicalized_input_signature)
input_signature_args, input_signature_kwargs = input_signature
if input_signature_kwargs:
raise ValueError("Restoring concrete function with non-empty kwargs (%s)." %
input_signature_kwargs)
# pylint: disable=protected-access
# Set metadata required for the concrete function to accept keyword and
# positional arguments in __call__. Normally this is set in
# get_concrete_function.
concrete_function._arg_keywords = [spec.name for spec in input_signature_args]
# TODO(allenl): Should we preserve the number of allowed positional arguments?
concrete_function._num_positional_args = len(input_signature_args)
# pylint: enable=protected-access
concrete_function.add_to_graph()
return concrete_function
class RestoredFunction(def_function.Function):
"""Wrapper class for a function that has been restored from saved state.
See `def_function.Function`.
"""
def __init__(self, python_function, name, function_spec, concrete_functions):
# TODO(mdan): We may enable autograph once exceptions are supported.
super(RestoredFunction, self).__init__(
python_function, name, autograph=False)
self._concrete_functions = concrete_functions
# TODO(vbardiovsky): This does not propagate to stateful and stateless
# functions of the RestoredFunction, which will have seen only defunned
# restored_function_body(*args, **kwargs). Therefore get_concrete_function()
# called on RestoredFunction will not work properly.
self._function_spec = function_spec
def _list_all_concrete_functions_for_serialization(self):
return self._concrete_functions
def get_concrete_function(self, *args, **kwargs):
raise NotImplementedError()
def recreate_function(saved_function, concrete_functions):
"""Creates a `Function` from a `SavedFunction`.
Args:
saved_function: `SavedFunction` proto.
concrete_functions: map from function name to `ConcreteFunction`.
Returns:
A `Function`.
"""
# TODO(andresp): Construct a `Function` with the cache populated
# instead of creating a new `Function` backed by a Python layer to
# glue things together. Current approach is nesting functions deeper for each
# serialization cycle.
coder = nested_structure_coder.StructureCoder()
function_spec = _deserialize_function_spec(saved_function.function_spec,
coder)
def restored_function_body(*args, **kwargs):
"""Calls a restored function."""
# TODO(allenl): Functions saved with input_signatures should revive with
# input_signatures.
try:
canonicalized_inputs = function_spec.canonicalize_function_inputs(
*args, **kwargs)
except ValueError as e:
raise ValueError(
"Cannot canonicalize input args %r and kwargs %r. Error: %r." %
(args, kwargs, e))
debug_considered_signatures = []
for concrete_function in saved_function.concrete_function:
function_obj = concrete_functions[concrete_function.name]
canonicalized_original_inputs = coder.decode_proto(
concrete_function.canonicalized_input_signature)
debug_considered_signatures.append(canonicalized_original_inputs)
if _inputs_compatible(canonicalized_inputs,
canonicalized_original_inputs):
flattened_inputs = nest.flatten(canonicalized_inputs)
filtered_inputs = [t for t in flattened_inputs if _is_tensor(t)]
return function_obj._call_flat(filtered_inputs) # pylint: disable=protected-access
raise AssertionError(
"Could not find matching function to call for canonicalized inputs %r. "
"Only existing signatures are %r."
% (canonicalized_inputs, debug_considered_signatures))
cfs = [concrete_functions[f.name] for f in saved_function.concrete_function]
return RestoredFunction(restored_function_body,
restored_function_body.__name__, function_spec, cfs)
def load_function_def_library(library):
"""Load a set of functions as concrete functions without captured inputs.
Functions names are manipulated during load such that they do not overlap
with previously created ones.
Args:
library: FunctionDefLibrary proto message.
Returns:
Map of original function names in the library to instances of
`ConcreteFunction` without captured inputs.
Raises:
ValueError: if functions dependencies have a cycle.
"""
functions = {}
for fdef in _sort_function_defs(library):
copy = _fix_fdef(fdef, functions)
func_graph = function_def_lib.function_def_to_graph(copy)
func = function_lib.ConcreteFunction(func_graph)
func.add_to_graph()
functions[fdef.signature.name] = func
# Also register the gradients in the current root context.
with ops.init_scope():
func._register_gradient() # pylint: disable=protected-access
return functions
def _sort_function_defs(library):
"""Return a topologic sort of FunctionDefs in a library."""
edges = collections.defaultdict(list)
in_count = collections.defaultdict(lambda: 0)
for fdef in library.function:
for dep in _list_function_deps(fdef):
edges[dep].append(fdef.signature.name)
in_count[fdef.signature.name] += 1
ready = [
fdef.signature.name
for fdef in library.function
if in_count[fdef.signature.name] == 0
]
output = []
while ready:
node = ready.pop()
output.append(node)
for dest in edges[node]:
in_count[dest] -= 1
if not in_count[dest]:
ready.append(dest)
if len(output) != len(library.function):
failed_to_resolve = sorted(set(in_count.keys()) - set(output))
raise ValueError("There is a cyclic-dependency between functions. ",
"Could not resolve %r." % (failed_to_resolve,))
reverse = {fdef.signature.name: fdef for fdef in library.function}
return [reverse[x] for x in output]
def _fix_fdef(orig_fdef, functions):
"""Fixes a FunctionDef proto to be loaded in current context.
In particular, when loading a function library into an eager context, one
must rename the functions to avoid conflicts with existent functions.
Args:
orig_fdef: FunctionDef proto to fix. It is not modified.
functions: map from function name to a ConcreteFunction instance.
Returns:
A fixed copy of the original FunctionDef.
"""
fdef = function_pb2.FunctionDef()
fdef.CopyFrom(orig_fdef)
for node_def in fdef.node_def:
if "_gradient_op_type" in node_def.attr:
if node_def.op in ["StatefulPartitionedCall", "PartitionedCall"]:
# TODO(andresp): This code assumes that the gradient registered for this
# function call is the default gradient for the function and not a
# custom one.
fname = node_def.attr["f"].func.name
node_def.attr["_gradient_op_type"].s = compat.as_bytes(
functions[fname]._gradient_name) # pylint: disable=protected-access
else:
logging.warning("Importing a function (%s) with ops with custom "
"gradients. Will likely fail if a gradient is "
"requested.", fdef.signature.name)
for _, attr_value in node_def.attr.items():
if attr_value.func.name:
attr_value.func.name = functions[attr_value.func.name].name
fdef.signature.name = _clean_function_name(fdef.signature.name)
return fdef
def _list_function_deps(fdef):
# TODO(andresp): Recurse into list attributes and into NameAttrList attrs both
# when listing deps and when fixing them. `function_def_to_graph` also
# requires fixes.
deps = set()
for node_def in fdef.node_def:
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
deps.add(attr_value.func.name)
return deps
def _clean_function_name(name):
"""Vanity function to keep the function names comprehensible."""
# Note: each time a function is wrapped into `function_lib.ConcreteFunction`
# its name becomes "__inference_<orig>_xyz".
match = re.search(r"^__inference_(.*)_\d+$", name)
if match:
return match.group(1)
else:
return name
| 37.792079
| 91
| 0.733648
|
13c685267fbbcc38a887ff06c4f3848259f98ac3
| 691
|
py
|
Python
|
config/lisa_config.py
|
agoila/lisa-faster-R-CNN
|
3b88c9b7da2106a805089f9619ea62cdc1f21d99
|
[
"MIT"
] | 17
|
2018-09-09T10:56:58.000Z
|
2022-02-22T07:18:50.000Z
|
config/lisa_config.py
|
agoila/lisa-faster-R-CNN
|
3b88c9b7da2106a805089f9619ea62cdc1f21d99
|
[
"MIT"
] | null | null | null |
config/lisa_config.py
|
agoila/lisa-faster-R-CNN
|
3b88c9b7da2106a805089f9619ea62cdc1f21d99
|
[
"MIT"
] | 21
|
2018-09-19T11:07:10.000Z
|
2022-02-22T07:18:45.000Z
|
# import the necessary packages
import os
# initialize the base path for the LISA dataset
BASE_PATH = "lisa"
# build the path to the annotations file
ANNOT_PATH = os.path.sep.join([BASE_PATH, "allAnnotations.csv"])
# build the path to the output training and testing record files,
# along with the class labels file
TRAIN_RECORD = os.path.sep.join([BASE_PATH,
"records/training.record"])
TEST_RECORD = os.path.sep.join([BASE_PATH,
"records/testing.record"])
CLASSES_FILE = os.path.sep.join([BASE_PATH,
"records/classes.pbtxt"])
# initialize the test split size
TEST_SIZE = 0.25
# initialize the class labels dictionary
CLASSES = {"pedestrianCrossing": 1, "signalAhead": 2, "stop": 3}
| 30.043478
| 65
| 0.75398
|
9e34312fcb8aa63e72558ec64b9b4c820f2243d5
| 19,649
|
py
|
Python
|
process_data_script/rolling_mean_annual.py
|
idunnam/Thesis
|
a567a25aa037c949de285158804a6ee396fc0e6c
|
[
"MIT"
] | null | null | null |
process_data_script/rolling_mean_annual.py
|
idunnam/Thesis
|
a567a25aa037c949de285158804a6ee396fc0e6c
|
[
"MIT"
] | 1
|
2022-01-28T13:12:26.000Z
|
2022-01-28T13:12:26.000Z
|
process_data_script/rolling_mean_annual.py
|
idunnam/Thesis
|
a567a25aa037c949de285158804a6ee396fc0e6c
|
[
"MIT"
] | null | null | null |
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
ACCESS = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/ACCESS_anomaly_annual.nc')
HADGEM_cloud = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/HADGEM_anomaly_cloud_annual.nc')
HADGEM_SMB = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/HADGEM_anomaly_SMB_annual.nc')
HADGEM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/HADGEM_anomaly_annual.nc')
CSIRO = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CSIRO_anomaly_annual.nc')
IPSL = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/IPSL_anomaly_annual.nc')
MIROC5 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/MIROC5_anomaly_annual.nc')
NORESM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/NORESM_anomaly_annual.nc')
#CMIP6 models
CESM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CESM_anomaly_annual.nc')
CNRM_ESM2 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CNRM_ESM2_anomaly_annual.nc')
CNRM_CM6 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CNRM_CM6_anomaly_annual.nc')
MRI = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/MRI_anomaly_annual.nc')
UKMO = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/UKMO_anomaly_annual.nc')
#Spatial-rolling mean
ACCESS_time = ACCESS.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
HADGEM_time = HADGEM.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
HADGEM_cloud_time = HADGEM_cloud.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
HADGEM_SMB_time = HADGEM_SMB.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
CSIRO_time = CSIRO.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
IPSL_time = IPSL.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
MIROC5_time = MIROC5.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
NORESM_time = NORESM.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
CESM_time = CESM.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
CNRM_ESM2_time = CNRM_ESM2.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
CNRM_CM6_time = CNRM_CM6.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
MRI_time = MRI.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
UKMO_time = UKMO.mean(dim=['X10_105', 'Y21_199']).rolling(year=20,center= True).mean()
"""
#Rolling mean
ACCESS_r = ACCESS.rolling(year=20,center= True).mean()
HADGEM_r = HADGEM.rolling(year=20,center= True).mean()
HADGEM_cloud_r = HADGEM_cloud.rolling(year=20,center= True).mean()
CSIRO_r = CSIRO.rolling(year=20,center= True).mean()
IPSL_r = IPSL.rolling(year=20,center= True).mean()
MIROC5_r = MIROC5.rolling(year=20,center= True).mean()
NORESM_r = NORESM.rolling(year=20,center= True).mean()
CESM_r = CESM.rolling(year=20,center= True).mean()
CNRM_ESM2_r = CNRM_ESM2.rolling(year=20,center= True).mean()
CNRM_CM6_r = CNRM_CM6.rolling(year=20,center= True).mean()
MRI_r = MRI.rolling(year=20,center= True).mean()
UKMO_r = UKMO.rolling(year=20,center= True).mean()
"""
TAS = int(input('Enter TAS='))
#Select the year -/+10year interval of the year closest to 3.5deg warming for each model.
ACCESS_sel = ACCESS.sel(year= slice(str(ACCESS_time.year.where((ACCESS_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(ACCESS_time.year.where((ACCESS_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
HADGEM_sel = HADGEM.sel(year= slice(str(HADGEM_time.year.where((HADGEM_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(HADGEM_time.year.where((HADGEM_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
HADGEM_cloud_sel = HADGEM_cloud.sel(year= slice(str(HADGEM_cloud_time.year.where((HADGEM_cloud_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(HADGEM_cloud_time.year.where((HADGEM_cloud_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
HADGEM_SMB_sel = HADGEM_SMB.sel(year= slice(str(HADGEM_SMB_time.year.where((HADGEM_SMB_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(HADGEM_SMB_time.year.where((HADGEM_SMB_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
#if season == 'JJA':
# CSIRO_sel= CSIRO.sel(year=slice('2080','2100'))
#else:
CSIRO_sel = CSIRO.sel(year= slice(str(CSIRO_time.year.where((CSIRO_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(CSIRO_time.year.where((CSIRO_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
IPSL_sel = IPSL.sel(year= slice(str(IPSL_time.year.where((IPSL_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(IPSL_time.year.where((IPSL_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
MIROC5_sel = MIROC5.sel(year= slice(str(MIROC5_time.year.where((MIROC5_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(MIROC5_time.year.where((MIROC5_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
NORESM_sel = NORESM.sel(year= slice(str(NORESM_time.year.where((NORESM_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(NORESM_time.year.where((NORESM_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
CESM_sel = CESM.sel(year= slice(str(CESM_time.year.where((CESM_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(CESM_time.year.where((CESM_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
CNRM_ESM2_sel = CNRM_ESM2.sel(year= slice(str(CNRM_ESM2_time.year.where((CNRM_ESM2_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(CNRM_ESM2_time.year.where((CNRM_ESM2_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
CNRM_CM6_sel = CNRM_CM6.sel(year= slice(str(CNRM_CM6_time.year.where((CNRM_CM6_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(CNRM_CM6_time.year.where((CNRM_CM6_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
MRI_sel = MRI.sel(year= slice(str(MRI_time.year.where((MRI_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(MRI_time.year.where((MRI_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
UKMO_sel = UKMO.sel(year= slice(str(UKMO_time.year.where((UKMO_time.TT >=TAS)).dropna(dim='year')[0].values - 10),
str(UKMO_time.year.where((UKMO_time.TT >=TAS)).dropna(dim='year')[0].values + 10)))
#Yearly mean of the 20year interval selected above
ACCESS_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/ACCESS_rol_'+str(TAS)+'_annual.nc')
HADGEM_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/HADGEM_rol_'+str(TAS)+'_annual.nc')
HADGEM_cloud_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/HADGEM_cloud_rol_'+str(TAS)+'_annual.nc')
HADGEM_SMB_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/HADGEM_SMB_rol_'+str(TAS)+'_annual.nc')
CSIRO_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/CSIRO_rol_'+str(TAS)+'_annual.nc')
IPSL_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/IPSL_rol_'+str(TAS)+'_annual.nc')
MIROC5_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/MIROC5_rol_'+str(TAS)+'_annual.nc')
NORESM_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/NORESM_rol_'+str(TAS)+'_annual.nc')
CESM_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/CESM_rol_'+str(TAS)+'_annual.nc')
CNRM_ESM2_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/CNRM_ESM2_rol_'+str(TAS)+'_annual.nc')
CNRM_CM6_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/CNRM_CM6_rol_'+str(TAS)+'_annual.nc')
MRI_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/MRI_rol_'+str(TAS)+'_annual.nc')
UKMO_sel.to_netcdf('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/UKMO_rol_'+str(TAS)+'_annual.nc')
#Print
ACCESS = ACCESS.mean(dim=['X10_105', 'Y21_199'])
HADGEM = HADGEM.mean(dim=['X10_105', 'Y21_199'])
CSIRO = CSIRO.mean(dim=['X10_105', 'Y21_199'])
IPSL = IPSL.mean(dim=['X10_105', 'Y21_199'])
MIROC5 = MIROC5.mean(dim=['X10_105', 'Y21_199'])
NORESM = NORESM.mean(dim=['X10_105', 'Y21_199'])
CESM = CESM.mean(dim=['X10_105', 'Y21_199'])
CNRM_ESM2 = CNRM_ESM2.mean(dim=['X10_105', 'Y21_199'])
CNRM_CM6 = CNRM_CM6.mean(dim=['X10_105', 'Y21_199'])
MRI = MRI.mean(dim=['X10_105', 'Y21_199'])
UKMO = UKMO.mean(dim=['X10_105', 'Y21_199'])
ACCESS = ACCESS.rolling(year=20,center= True).mean()
HADGEM = HADGEM.rolling(year=20,center= True).mean()
CSIRO = CSIRO.rolling(year=20,center= True).mean()
IPSL = IPSL.rolling(year=20,center= True).mean()
MIROC5 = MIROC5.rolling(year=20,center= True).mean()
NORESM = NORESM.rolling(year=20,center= True).mean()
CESM = CESM.rolling(year=20,center= True).mean()
CNRM_ESM2 = CNRM_ESM2.rolling(year=20,center= True).mean()
CNRM_CM6 = CNRM_CM6.rolling(year=20,center= True).mean()
MRI = MRI.rolling(year=20,center= True).mean()
UKMO = UKMO.rolling(year=20,center= True).mean()
TAS = [1.5, 2.0 ,2.5, 3.0, 3.5, 4.0]
for i in range(0,6):
print('TAS:', TAS[i])
print('Model',' year', ' interval', ' mean', ' std')
print('--------------------------------------------------')
print('ACCESS :',np.int(ACCESS.year.where((ACCESS.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(ACCESS.year.where((ACCESS.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(ACCESS.year.where((ACCESS.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((ACCESS.sel(year=slice(
str(np.int(ACCESS.year.where((ACCESS.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(ACCESS.year.where((ACCESS.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((ACCESS.sel(year=slice(
str(np.int(ACCESS.year.where((ACCESS.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(ACCESS.year.where((ACCESS.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('HADGEM :',np.int(HADGEM.year.where((HADGEM.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(HADGEM.year.where((HADGEM.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(HADGEM.year.where((HADGEM.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((HADGEM.sel(year=slice(
str(np.int(HADGEM.year.where((HADGEM.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(HADGEM.year.where((HADGEM.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((HADGEM.sel(year=slice(
str(np.int(HADGEM.year.where((HADGEM.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(HADGEM.year.where((HADGEM.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('CSIRO :',np.int(CSIRO.year.where((CSIRO.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(CSIRO.year.where((CSIRO.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(CSIRO.year.where((CSIRO.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((CSIRO.sel(year=slice(
str(np.int(CSIRO.year.where((CSIRO.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(CSIRO.year.where((CSIRO.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((CSIRO.sel(year=slice(
str(np.int(CSIRO.year.where((CSIRO.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(CSIRO.year.where((CSIRO.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('IPSL :',np.int(IPSL.year.where((IPSL.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(IPSL.year.where((IPSL.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(IPSL.year.where((IPSL.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((IPSL.sel(year=slice(
str(np.int(IPSL.year.where((IPSL.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(IPSL.year.where((IPSL.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((IPSL.sel(year=slice(
str(np.int(IPSL.year.where((IPSL.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(IPSL.year.where((IPSL.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('MIROC5 :',np.int(MIROC5.year.where((MIROC5.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(MIROC5.year.where((MIROC5.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(MIROC5.year.where((MIROC5.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((MIROC5.sel(year=slice(
str(np.int(MIROC5.year.where((MIROC5.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(MIROC5.year.where((MIROC5.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((MIROC5.sel(year=slice(
str(np.int(MIROC5.year.where((MIROC5.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(MIROC5.year.where((MIROC5.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('NORESM :',np.int(NORESM.year.where((NORESM.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(NORESM.year.where((NORESM.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(NORESM.year.where((NORESM.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((NORESM.sel(year=slice(
str(np.int(NORESM.year.where((NORESM.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(NORESM.year.where((NORESM.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((NORESM.sel(year=slice(
str(np.int(NORESM.year.where((NORESM.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(NORESM.year.where((NORESM.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('CESM :',np.int(CESM.year.where((CESM.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(CESM.year.where((CESM.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(CESM.year.where((CESM.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((CESM.sel(year=slice(
str(np.int(CESM.year.where((CESM.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(CESM.year.where((CESM.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((CESM.sel(year=slice(
str(np.int(CESM.year.where((CESM.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(CESM.year.where((CESM.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('CNRM_ESM2:',np.int(CNRM_ESM2.year.where((CNRM_ESM2.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(CNRM_ESM2.year.where((CNRM_ESM2.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(CNRM_ESM2.year.where((CNRM_ESM2.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((CNRM_ESM2.sel(year=slice(
str(np.int(CNRM_ESM2.year.where((CNRM_ESM2.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(CNRM_ESM2.year.where((CNRM_ESM2.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((CNRM_ESM2.sel(year=slice(
str(np.int(CNRM_ESM2.year.where((CNRM_ESM2.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(CNRM_ESM2.year.where((CNRM_ESM2.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('CNRM_CM6 :',np.int(CNRM_CM6.year.where((CNRM_CM6.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(CNRM_CM6.year.where((CNRM_CM6.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(CNRM_CM6.year.where((CNRM_CM6.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((CNRM_CM6.sel(year=slice(
str(np.int(CNRM_CM6.year.where((CNRM_CM6.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(CNRM_CM6.year.where((CNRM_CM6.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((CNRM_CM6.sel(year=slice(
str(np.int(CNRM_CM6.year.where((CNRM_CM6.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(CNRM_CM6.year.where((CNRM_CM6.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('MRI :',np.int(MRI.year.where((MRI.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(MRI.year.where((MRI.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(MRI.year.where((MRI.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((MRI.sel(year=slice(
str(np.int(MRI.year.where((MRI.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(MRI.year.where((MRI.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((MRI.sel(year=slice(
str(np.int(MRI.year.where((MRI.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(MRI.year.where((MRI.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('UKESM :',np.int(UKMO.year.where((UKMO.TT >=TAS[i])).dropna(dim='year')[0].values),
' (',
np.int(UKMO.year.where((UKMO.TT >=TAS[i])).dropna(dim='year')[0].values - 10),
'-',
np.int(UKMO.year.where((UKMO.TT >=TAS[i])).dropna(dim='year')[0].values + 10),
') ',
np.round((UKMO.sel(year=slice(
str(np.int(UKMO.year.where((UKMO.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(UKMO.year.where((UKMO.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT.mean()).values,2),
' ',
np.round(np.std((UKMO.sel(year=slice(
str(np.int(UKMO.year.where((UKMO.TT >=TAS[i])).dropna(dim='year')[0].values - 10)),
str(UKMO.year.where((UKMO.TT >=TAS[i])).dropna(dim='year')[0].values + 10))).TT)).values,2))
print('--------------------------------------------------')
print('--------------------------------------------------')
| 61.403125
| 147
| 0.608021
|
0b5eaeb1760454cb4b36ceb35fbb5f1cdaa8f01f
| 3,115
|
py
|
Python
|
paystackapi/transaction_split.py
|
eadwinCode/paystack-python
|
dde449e3c62d843d047ef99eb8eb4c8731cb88de
|
[
"MIT"
] | 89
|
2016-03-18T17:08:43.000Z
|
2022-03-27T09:56:27.000Z
|
paystackapi/transaction_split.py
|
eadwinCode/paystack-python
|
dde449e3c62d843d047ef99eb8eb4c8731cb88de
|
[
"MIT"
] | 46
|
2016-04-01T14:59:47.000Z
|
2022-03-31T17:18:12.000Z
|
paystackapi/transaction_split.py
|
eadwinCode/paystack-python
|
dde449e3c62d843d047ef99eb8eb4c8731cb88de
|
[
"MIT"
] | 38
|
2016-03-29T16:22:23.000Z
|
2022-03-27T09:57:19.000Z
|
"""Script used to define the paystack Plan class."""
from paystackapi.base import PayStackBase
class TransactionSplit(PayStackBase):
"""docstring for Transaction Split."""
@classmethod
def create(cls, **kwargs):
"""
Create a split payment on your integration
Args:
name: Name of the transaction split
type: The type of transaction split you want to create [ percentage | flat ]
currency: Any of NGN, GHS, ZAR, or USD
subaccounts: A list of object containing subaccount code and number of shares
bearer_type: Any of subaccount | account | all-proportional | all
bearer_subaccount: Subaccount code
**kwargs
Returns:
Json data from paystack API.
"""
return cls().requests.post('split', data=kwargs)
@classmethod
def list(cls, **kwargs):
"""
List/search for the transaction splits available on your integration.
Args:
perPage: records you want to retrieve per page (Integer)
page: what page you want to retrieve (Integer)
Returns:
JSON data from paystack's API.
"""
return cls().requests.get("split", qs=kwargs)
@classmethod
def fetch(cls, split_id):
"""
Get details of a split on your integration.
Args:
split_id: split ID
Returns:
Json data from paystack API.
"""
return cls().requests.get(f"split/{split_id}")
@classmethod
def update(cls, split_id, **kwargs):
"""
Update a transaction split details on your integration
Args:
split_id: split ID
name: Name of the transaction split
active: True or False
subaccounts: A list of object containing subaccount code and number of shares
bearer_type: Any of subaccount | account | all-proportional | all
bearer_subaccount: Subaccount code
**kwargs
Returns:
Json data from paystack API.
"""
return cls().requests.put(f"split/{split_id}", data=kwargs)
@classmethod
def add_or_update_split_subaccount(cls, split_id, **kwargs):
"""
Add a Subaccount to a Transaction Split, or update the share of an existing Subaccount in a Transaction Split
Args:
split_id: split ID
subaccount: This is the sub account code
share: This is the transaction share for the subaccount
Returns:
Json data from paystack API.
"""
return cls().requests.post(f"split/{split_id}/subaccount/add", data=kwargs)
@classmethod
def remove_split_subaccount(cls, split_id, **kwargs):
"""
Remove a subaccount from a transaction split
Args:
split_id: split ID
subaccount: This is the sub account code
Returns:
Json data from paystack API.
"""
return cls().requests.post(f"split/{split_id}/subaccount/remove", data=kwargs)
| 30.539216
| 117
| 0.6
|
1bece2e2659068cbf1198b77d77fa472941a1f3d
| 7,856
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20200701/get_ip_allocation.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20200701/get_ip_allocation.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20200701/get_ip_allocation.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetIpAllocationResult',
'AwaitableGetIpAllocationResult',
'get_ip_allocation',
]
@pulumi.output_type
class GetIpAllocationResult:
"""
IpAllocation resource.
"""
def __init__(__self__, allocation_tags=None, etag=None, id=None, ipam_allocation_id=None, location=None, name=None, prefix=None, prefix_length=None, prefix_type=None, subnet=None, tags=None, type=None, virtual_network=None):
if allocation_tags and not isinstance(allocation_tags, dict):
raise TypeError("Expected argument 'allocation_tags' to be a dict")
pulumi.set(__self__, "allocation_tags", allocation_tags)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ipam_allocation_id and not isinstance(ipam_allocation_id, str):
raise TypeError("Expected argument 'ipam_allocation_id' to be a str")
pulumi.set(__self__, "ipam_allocation_id", ipam_allocation_id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if prefix and not isinstance(prefix, str):
raise TypeError("Expected argument 'prefix' to be a str")
pulumi.set(__self__, "prefix", prefix)
if prefix_length and not isinstance(prefix_length, int):
raise TypeError("Expected argument 'prefix_length' to be a int")
pulumi.set(__self__, "prefix_length", prefix_length)
if prefix_type and not isinstance(prefix_type, str):
raise TypeError("Expected argument 'prefix_type' to be a str")
pulumi.set(__self__, "prefix_type", prefix_type)
if subnet and not isinstance(subnet, dict):
raise TypeError("Expected argument 'subnet' to be a dict")
pulumi.set(__self__, "subnet", subnet)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network and not isinstance(virtual_network, dict):
raise TypeError("Expected argument 'virtual_network' to be a dict")
pulumi.set(__self__, "virtual_network", virtual_network)
@property
@pulumi.getter(name="allocationTags")
def allocation_tags(self) -> Optional[Mapping[str, str]]:
"""
IpAllocation tags.
"""
return pulumi.get(self, "allocation_tags")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipamAllocationId")
def ipam_allocation_id(self) -> Optional[str]:
"""
The IPAM allocation ID.
"""
return pulumi.get(self, "ipam_allocation_id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
"""
The address prefix for the IpAllocation.
"""
return pulumi.get(self, "prefix")
@property
@pulumi.getter(name="prefixLength")
def prefix_length(self) -> Optional[int]:
"""
The address prefix length for the IpAllocation.
"""
return pulumi.get(self, "prefix_length")
@property
@pulumi.getter(name="prefixType")
def prefix_type(self) -> Optional[str]:
"""
The address prefix Type for the IpAllocation.
"""
return pulumi.get(self, "prefix_type")
@property
@pulumi.getter
def subnet(self) -> 'outputs.SubResourceResponse':
"""
The Subnet that using the prefix of this IpAllocation resource.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetwork")
def virtual_network(self) -> 'outputs.SubResourceResponse':
"""
The VirtualNetwork that using the prefix of this IpAllocation resource.
"""
return pulumi.get(self, "virtual_network")
class AwaitableGetIpAllocationResult(GetIpAllocationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIpAllocationResult(
allocation_tags=self.allocation_tags,
etag=self.etag,
id=self.id,
ipam_allocation_id=self.ipam_allocation_id,
location=self.location,
name=self.name,
prefix=self.prefix,
prefix_length=self.prefix_length,
prefix_type=self.prefix_type,
subnet=self.subnet,
tags=self.tags,
type=self.type,
virtual_network=self.virtual_network)
def get_ip_allocation(expand: Optional[str] = None,
ip_allocation_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIpAllocationResult:
"""
IpAllocation resource.
:param str expand: Expands referenced resources.
:param str ip_allocation_name: The name of the IpAllocation.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['ipAllocationName'] = ip_allocation_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200701:getIpAllocation', __args__, opts=opts, typ=GetIpAllocationResult).value
return AwaitableGetIpAllocationResult(
allocation_tags=__ret__.allocation_tags,
etag=__ret__.etag,
id=__ret__.id,
ipam_allocation_id=__ret__.ipam_allocation_id,
location=__ret__.location,
name=__ret__.name,
prefix=__ret__.prefix,
prefix_length=__ret__.prefix_length,
prefix_type=__ret__.prefix_type,
subnet=__ret__.subnet,
tags=__ret__.tags,
type=__ret__.type,
virtual_network=__ret__.virtual_network)
| 34.761062
| 228
| 0.635056
|
e63ea67ba2cdb5892ddd6714ac230748ce283df7
| 4,002
|
py
|
Python
|
trackchanges.py
|
farooqy/feizhonglaravel
|
93847038b021ccf449427066a755caaa260ac791
|
[
"MIT"
] | null | null | null |
trackchanges.py
|
farooqy/feizhonglaravel
|
93847038b021ccf449427066a755caaa260ac791
|
[
"MIT"
] | 5
|
2020-08-24T16:49:50.000Z
|
2022-02-26T18:43:43.000Z
|
trackchanges.py
|
farooqy/feizhonglaravel
|
93847038b021ccf449427066a755caaa260ac791
|
[
"MIT"
] | null | null | null |
import os
import json
import hashlib
def deunicodify_hook(pairs):
new_pairs = []
for key, value in pairs:
if isinstance(value, unicode):
value = value.encode('utf-8')
if isinstance(key, unicode):
key = key.encode('utf-8')
new_pairs.append((key, value))
return dict(new_pairs)
if(os.path.isfile('hash_file.json') is False):
print("[-] The hash file has not been found")
create_now = raw_input("Do you want to create now? (Y/N): ")
if(create_now is "Y"):
hash_file = open('hash_file.json', 'w')
if(hash_file is False):
print("[-] Failed to create the hash file")
else:
print("[+] Successfully created the hash file")
# close(hash_file)
else:
print("[-] Exiting tracker ")
exit(0)
hash_file = open('hash_file.json','r')
filedata = hash_file.read()
if(filedata is ""):
filedata = "{}"
data = json.loads(filedata, object_pairs_hook=deunicodify_hook)
# print(data)
directory = raw_input("Specific directory?: ")
copy_dir = raw_input("Copy to which directory?: ")
if(directory is ""):
directory = "."
elif(os.path.exists(directory) is False):
print("[+] The directory ",directory," Does not exist")
exit(0)
elif(os.path.exists(copy_dir) is False):
print("[+] The copy directory ",copy_dir," Does not exist")
exit(0)
applied_changes = False
print("")
print("")
exception_files_dir = [
"index.php",".htaccess"
]
for root, dirs, files in os.walk(directory):
for filename in files:
skip = False
for dir in exception_files_dir:
if(dir in root or dir == filename):
skip = True
if(skip):
print("[**] Exception file or directory. Skipping ....")
continue
# print('Filname: ',filename)
# _go_on = raw_input("Continue? (Y/N): ")
# if(_go_on is "N"):
# continue
# else:
if(filename in exception_files_dir):
print("[**] Found exceptional file. Skipping ...")
continue
file_content = open(root+"/"+filename, 'rb')
if(file_content is False):
print("[-] The file ", filename, " failed to open, possible permission error")
continue
index = root+filename
hash_object = hashlib.md5(file_content.read())
digest = hash_object.hexdigest()
# print("[+] ",filename, " ---> ",digest)
# write_json = raw_input("Write to json file? (y/n) : ")
# if(write_json is "Y"):
source = root+"/"
if directory in source:
sub_dir = source.split(directory)[1]
else:
sub_dir = ""
target_dr = copy_dir+sub_dir
if(os.path.exists(target_dr) is False):
os.system("mkdir -p "+target_dr)
if(index in data and data[index] == digest):
# print("[+] files are equal digest ",index)
continue
elif index in data:
print("[*] file change detected at ",index, ' new digest ',digest, ' ---> ',data[index])
print("[*] Tracking changes .... ")
data[index] = digest
applied_changes = True
else:
print("[*] New file has been discovered at ",filename," setting digest ---> ",digest)
# track = raw_input(" Track new file? (y/n): ")
# if(track is "y"):
data[index] = digest
os.system("cp "+root+"/"+filename+" "+target_dr+filename)
applied_changes = True
# elif track is "x":
# hash_file = open('hash_file.json', 'w')
# json.dump(data, hash_file)
# exit(0)
hash_file = open('hash_file.json', 'w')
json.dump(data, hash_file)
# exit(0)
if(applied_changes):
print("")
print("")
print("[***] Completed changes Successfully")
else:
print("")
print("")
print("[****] No changes detected. Process completed")
| 33.07438
| 100
| 0.55922
|
ba31ad3b491508ba28d7c89b2479c7a02abc77bb
| 21,044
|
py
|
Python
|
electrum/plugins/keepkey/keepkey.py
|
fujicoin/electrum-fjc-3.3.9
|
051fe8988058a64ee61a84aa28baf0f029982d73
|
[
"MIT"
] | 4
|
2017-07-10T00:10:05.000Z
|
2020-05-22T12:16:38.000Z
|
electrum/plugins/keepkey/keepkey.py
|
fujicoin/electrum-fjc-3.3.9
|
051fe8988058a64ee61a84aa28baf0f029982d73
|
[
"MIT"
] | 7
|
2018-02-08T03:52:12.000Z
|
2021-11-15T17:49:57.000Z
|
electrum/plugins/keepkey/keepkey.py
|
fujicoin/electrum-fjc-3.3.9
|
051fe8988058a64ee61a84aa28baf0f029982d73
|
[
"MIT"
] | 11
|
2017-07-13T02:53:08.000Z
|
2022-02-05T13:48:32.000Z
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for d in WebUsbTransport.enumerate():
if device.id_.startswith(d.getSerialNumber()):
return WebUsbTransport(d)
return WebUsbTransport(device)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Fujicoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
| 42.772358
| 127
| 0.598318
|
699dcbbb67022941a57e874522d19f3274e0f9fe
| 56
|
py
|
Python
|
src/optimizer.py
|
muzammil360/DL-learn
|
16e90d099246e75eb7a9cc4a6e0515c0178423e0
|
[
"MIT"
] | null | null | null |
src/optimizer.py
|
muzammil360/DL-learn
|
16e90d099246e75eb7a9cc4a6e0515c0178423e0
|
[
"MIT"
] | null | null | null |
src/optimizer.py
|
muzammil360/DL-learn
|
16e90d099246e75eb7a9cc4a6e0515c0178423e0
|
[
"MIT"
] | null | null | null |
def getOptimizer():
print("This is getOptimizer")
pass
| 18.666667
| 30
| 0.75
|
3f08e9f432e88955762abf66ae6c2e3b2acdb5b3
| 1,821
|
py
|
Python
|
tools/importHexo.py
|
OhYee/OBlog
|
a9d7e4fda5651cf9c5afd4c128c4df4442794e97
|
[
"BSD-3-Clause"
] | 23
|
2018-02-23T12:56:43.000Z
|
2021-12-20T13:21:47.000Z
|
tools/importHexo.py
|
OhYee/OBlog
|
a9d7e4fda5651cf9c5afd4c128c4df4442794e97
|
[
"BSD-3-Clause"
] | 17
|
2018-02-23T12:52:39.000Z
|
2018-12-04T05:50:58.000Z
|
tools/importHexo.py
|
OhYee/OBlog
|
a9d7e4fda5651cf9c5afd4c128c4df4442794e97
|
[
"BSD-3-Clause"
] | 2
|
2018-06-16T20:52:23.000Z
|
2021-04-08T15:29:44.000Z
|
import os
import re
import json
Hexo_source_post_dir = r"D:\OneDrive\OneDrive - eclass inc\Workspace\Code\Blog\source\_posts"
def listFiles(root, relative=''):
res = []
List = os.listdir(root + relative)
for file in List:
filename = root + relative + '/' + file
if os.path.isdir(filename):
res += listFiles(root, relative + '/' + file)
elif os.path.isfile(filename):
res.append(relative + '/' + file)
else:
print("error at ", file)
return res
if __name__ == '__main__':
List = listFiles(Hexo_source_post_dir)
posts = []
idx = 0
for path in List:
print(path)
f = open(Hexo_source_post_dir + path, 'r', encoding='utf-8')
text = f.read()
res = re.match(r'^---\n(.*?)\n---\n(.*)$', text, flags=re.S)
raw = res.group(2)
title = re.findall(r'.*title: (.+?)\n', res.group(1))[0]
time = re.findall(r'^date: (.+?)$', res.group(1), re.M)[0]
taglist = re.findall(r'^[ ]*-[ ]+(.+?)[ ]*$', res.group(1), re.M)
taglist += re.findall(r'^categories:[ ]+(.+?)[ ]*$',
res.group(1), re.M)
taglist += re.findall(r'^tags:[ ]+(.+?)[ ]*$', res.group(1), re.M)
tags = ','.join(taglist)
taglist = set(taglist)
post = {
"url": path[1:-3],
"title": title,
"time": time,
"tags": tags,
"raw": raw,
}
posts.append(post)
# print(post)
# break
# idx += 1
# if idx >= 20:
# break
f.close()
f = open('./posts.json', 'w', encoding='utf-8')
# f.write(str(posts))
f.write(json.dumps(posts))
f.close()
| 28.015385
| 94
| 0.462932
|
858dbaa4da12d06e147f06ed5e02dad82f27473a
| 1,152
|
py
|
Python
|
migrations/versions/15831a43fb71_add_the_user_login_model_attributes.py
|
ThiraTheNerd/the_blog
|
3edd51b2507726b4339f3b59b95133f9e2005700
|
[
"MIT"
] | null | null | null |
migrations/versions/15831a43fb71_add_the_user_login_model_attributes.py
|
ThiraTheNerd/the_blog
|
3edd51b2507726b4339f3b59b95133f9e2005700
|
[
"MIT"
] | null | null | null |
migrations/versions/15831a43fb71_add_the_user_login_model_attributes.py
|
ThiraTheNerd/the_blog
|
3edd51b2507726b4339f3b59b95133f9e2005700
|
[
"MIT"
] | null | null | null |
"""add the user login model attributes
Revision ID: 15831a43fb71
Revises: 7951013acf32
Create Date: 2021-06-23 00:02:56.627253
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '15831a43fb71'
down_revision = '7951013acf32'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('users_role_id_fkey', 'users', type_='foreignkey')
op.drop_column('users', 'role_id')
op.drop_column('users', 'profile_pic_path')
op.drop_column('users', 'bio')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('bio', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('users', sa.Column('profile_pic_path', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('users', sa.Column('role_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('users_role_id_fkey', 'users', 'roles', ['role_id'], ['id'])
# ### end Alembic commands ###
| 32.914286
| 107
| 0.702257
|
4d1f1ccdc13883c1c64f142d8434d485fd196847
| 10,446
|
py
|
Python
|
test/functional/wallet_balance.py
|
go-keiryo/bitcoin
|
385bb453040542b4c428ba120c84e438311ff49b
|
[
"MIT"
] | 2
|
2019-05-15T17:03:40.000Z
|
2021-04-05T20:40:18.000Z
|
test/functional/wallet_balance.py
|
go-keiryo/bitcoin
|
385bb453040542b4c428ba120c84e438311ff49b
|
[
"MIT"
] | 4
|
2019-07-23T08:32:01.000Z
|
2020-07-22T08:05:26.000Z
|
test/functional/wallet_balance.py
|
go-keiryo/bitcoin
|
385bb453040542b4c428ba120c84e438311ff49b
|
[
"MIT"
] | 10
|
2019-05-23T03:15:07.000Z
|
2021-12-04T13:32:05.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet balance RPC methods."""
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
sync_blocks,
)
def create_transactions(node, address, amt, fees):
# Create and sign raw transactions from node to address for amt.
# Creates a transaction for each fee and returns an array
# of the raw transactions.
utxos = [u for u in node.listunspent(0) if u['spendable']]
# Create transactions
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total + max(fees) > amt:
break
txs = []
for fee in fees:
outputs = {address: amt, node.getrawchangeaddress(): ins_total - amt - fee}
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'], # Limit mempool descendants as a hack to have wallet txs rejected from the mempool
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
# Check that nodes don't own any UTXOs
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Check that only node 0 is watching an address")
assert 'watchonly' in self.nodes[0].getbalances()
assert 'watchonly' not in self.nodes[1].getbalances()
self.log.info("Mining blocks ...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(101, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getwalletinfo()['balance'], 50)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getbalances()['watchonly']['immature'], 5000)
assert 'watchonly' not in self.nodes[1].getbalances()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 100)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 100)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 50)
# Send 40 BTC from 0 to 1 and 60 BTC from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test getbalance and getunconfirmedbalance with unconfirmed inputs")
def test_balances(*, fee_node_1=0):
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('30') - fee_node_1) # change from node 1's send
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('30') - fee_node_1)
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[0].getbalances()['mine']['untrusted_pending'], Decimal('60'))
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60'))
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('0')) # Doesn't include output of node 0's send since it was spent
assert_equal(self.nodes[1].getbalances()['mine']['untrusted_pending'], Decimal('0'))
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('0'))
test_balances(fee_node_1=Decimal('0.01'))
# Node 1 bumps the transaction fee and resends
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.nodes[0].sendrawtransaction(txs[1]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
self.log.info("Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs")
test_balances(fee_node_1=Decimal('0.02'))
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
# balances are correct after the transactions are confirmed
assert_equal(self.nodes[0].getbalance(), Decimal('69.99')) # node 1's send plus change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('29.98')) # change from node 0's send
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, ADDRESS_WATCHONLY)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getunconfirmedbalance()
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet('')
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet('')
after = self.nodes[1].getunconfirmedbalance()
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 99)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 99 * 10**8).hex(),
struct.pack("<q", 98 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)[0]
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
connect_nodes_bi(self.nodes, 0, 1)
sync_blocks(self.nodes)
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
| 48.138249
| 139
| 0.664465
|
6c309715469c07a7de64996c388446f36fe89c1c
| 1,953
|
py
|
Python
|
src/engine/SCons/Tool/f03.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 1
|
2017-01-28T15:39:07.000Z
|
2017-01-28T15:39:07.000Z
|
src/engine/SCons/Tool/f03.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 4
|
2019-04-11T16:27:45.000Z
|
2019-04-11T23:56:30.000Z
|
src/engine/SCons/Tool/f03.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 2
|
2018-01-16T11:29:16.000Z
|
2020-05-13T16:48:26.000Z
|
"""engine.SCons.Tool.f03
Tool-specific initialization for the generic Posix f03 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Defaults
import SCons.Tool
import SCons.Util
from . import fortran
from SCons.Tool.FortranCommon import add_all_to_env, add_f03_to_env
compilers = ['f03']
def generate(env):
add_all_to_env(env)
add_f03_to_env(env)
fcomp = env.Detect(compilers) or 'f03'
env['F03'] = fcomp
env['SHF03'] = fcomp
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = fcomp
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 30.515625
| 73
| 0.757296
|
7e2d20529b3f2eca13d19d9df5b3fc00906b93ac
| 914
|
py
|
Python
|
dash_docs/reusable_components/Chapter.py
|
joelostblom/dash-docs
|
7be5aed7795f61ac32375ce33a18046b8f2f5254
|
[
"MIT"
] | 379
|
2017-06-21T14:35:52.000Z
|
2022-03-20T01:47:14.000Z
|
dash_docs/reusable_components/Chapter.py
|
joelostblom/dash-docs
|
7be5aed7795f61ac32375ce33a18046b8f2f5254
|
[
"MIT"
] | 746
|
2017-06-21T19:58:17.000Z
|
2022-03-23T14:51:24.000Z
|
dash_docs/reusable_components/Chapter.py
|
joelostblom/dash-docs
|
7be5aed7795f61ac32375ce33a18046b8f2f5254
|
[
"MIT"
] | 201
|
2017-06-21T21:53:19.000Z
|
2022-03-17T13:23:55.000Z
|
import dash_html_components as html
import dash_core_components as dcc
from dash_docs.tools import relpath
from .Markdown import Markdown
def Chapter(name, href=None, caption=None, className='', chapter='', icon=''):
linkComponent = html.A if href.startswith('http') else dcc.Link
return html.Div(className='toc--chapter', children=[
html.Li([
html.I(className=icon, style={'width': 25}) if icon != '' else None,
linkComponent(
name,
href=relpath(href),
id=href,
className='toc--chapter-link ' + className
),
]),
html.Small(
className='toc--chapter-content',
children=Markdown(caption or ''),
style={
'display': 'block',
'marginTop': '-10px' if caption else ''
}
) if caption else None
])
| 33.851852
| 80
| 0.549234
|
5eeb40c1ede297d25c98ad543d0116e9893c363c
| 1,118
|
py
|
Python
|
app.py
|
AyushShaw/todo-flask
|
0a3335c91c83541e2d098b5633354ac6a743e2de
|
[
"MIT"
] | null | null | null |
app.py
|
AyushShaw/todo-flask
|
0a3335c91c83541e2d098b5633354ac6a743e2de
|
[
"MIT"
] | null | null | null |
app.py
|
AyushShaw/todo-flask
|
0a3335c91c83541e2d098b5633354ac6a743e2de
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Todo(db.Model):
id = db.Column(db.Integer,primary_key=True)
content = db.Column(db.String(255),nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow)
def __repr__(self):
return '<Task {}>'.format(self.id)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method=='POST':
task_content=request.form['content']
new_task = Todo(content=task_content)
try:
print("Trying data Entry.")
db.session.add(new_task)
db.session.commit()
return redirect('/')
except:
return 'Issue in Post CodeBlock'
else:
task = Todo.query.order_by(Todo.date_created).all()
return render_template('index.html', tasks=task)
if __name__ == "__main__":
app.run(debug=True)
| 27.268293
| 67
| 0.620751
|
dd0df19f02b08c93eee4eb48c9dbacd144d6bd47
| 414
|
py
|
Python
|
example/subuser/post_trade_market.py
|
bailzx5522/huobi_Python
|
d87cb11b44304c32da6e57c8ada8d03ee5fdb0e7
|
[
"Apache-2.0"
] | 611
|
2019-07-10T08:17:50.000Z
|
2022-03-21T18:56:39.000Z
|
example/subuser/post_trade_market.py
|
bailzx5522/huobi_Python
|
d87cb11b44304c32da6e57c8ada8d03ee5fdb0e7
|
[
"Apache-2.0"
] | 105
|
2019-07-12T03:43:41.000Z
|
2022-03-30T10:33:06.000Z
|
example/subuser/post_trade_market.py
|
bailzx5522/huobi_Python
|
d87cb11b44304c32da6e57c8ada8d03ee5fdb0e7
|
[
"Apache-2.0"
] | 325
|
2019-07-12T02:46:54.000Z
|
2022-03-21T18:56:41.000Z
|
from huobi.client.subuser import SubuserClient
from huobi.constant import *
from huobi.utils import *
subuser_client = SubuserClient(api_key=g_api_key, secret_key=g_secret_key)
subUids = '159284259'
accountType = SubuserTradePrivilegeType.MARGIN
activation = SubUserTradeStatus.DEACTIVATED
subUserList = subuser_client.post_set_tradable_market(subUids, accountType, activation)
LogInfo.output_list(subUserList)
| 31.846154
| 87
| 0.850242
|
360f92f5356106a4aec5e9aa8eca378c90277e03
| 3,761
|
py
|
Python
|
tests/oauth2/test_id_token.py
|
yhuang/google-auth-library-python
|
ccf2e502e0b15633956c007fae92e2404a6418ad
|
[
"Apache-2.0"
] | 1
|
2020-05-27T15:48:51.000Z
|
2020-05-27T15:48:51.000Z
|
tests/oauth2/test_id_token.py
|
yhuang/google-auth-library-python
|
ccf2e502e0b15633956c007fae92e2404a6418ad
|
[
"Apache-2.0"
] | null | null | null |
tests/oauth2/test_id_token.py
|
yhuang/google-auth-library-python
|
ccf2e502e0b15633956c007fae92e2404a6418ad
|
[
"Apache-2.0"
] | 1
|
2019-11-11T18:39:46.000Z
|
2019-11-11T18:39:46.000Z
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import pytest
from google.auth import exceptions
from google.auth import transport
from google.oauth2 import id_token
def make_request(status, data=None):
response = mock.create_autospec(transport.Response, instance=True)
response.status = status
if data is not None:
response.data = json.dumps(data).encode('utf-8')
request = mock.create_autospec(transport.Request)
request.return_value = response
return request
def test__fetch_certs_success():
certs = {'1': 'cert'}
request = make_request(200, certs)
returned_certs = id_token._fetch_certs(request, mock.sentinel.cert_url)
request.assert_called_once_with(mock.sentinel.cert_url, method='GET')
assert returned_certs == certs
def test__fetch_certs_failure():
request = make_request(404)
with pytest.raises(exceptions.TransportError):
id_token._fetch_certs(request, mock.sentinel.cert_url)
request.assert_called_once_with(mock.sentinel.cert_url, method='GET')
@mock.patch('google.auth.jwt.decode', autospec=True)
@mock.patch('google.oauth2.id_token._fetch_certs', autospec=True)
def test_verify_token(_fetch_certs, decode):
result = id_token.verify_token(mock.sentinel.token, mock.sentinel.request)
assert result == decode.return_value
_fetch_certs.assert_called_once_with(
mock.sentinel.request, id_token._GOOGLE_OAUTH2_CERTS_URL)
decode.assert_called_once_with(
mock.sentinel.token,
certs=_fetch_certs.return_value,
audience=None)
@mock.patch('google.auth.jwt.decode', autospec=True)
@mock.patch('google.oauth2.id_token._fetch_certs', autospec=True)
def test_verify_token_args(_fetch_certs, decode):
result = id_token.verify_token(
mock.sentinel.token,
mock.sentinel.request,
audience=mock.sentinel.audience,
certs_url=mock.sentinel.certs_url)
assert result == decode.return_value
_fetch_certs.assert_called_once_with(
mock.sentinel.request, mock.sentinel.certs_url)
decode.assert_called_once_with(
mock.sentinel.token,
certs=_fetch_certs.return_value,
audience=mock.sentinel.audience)
@mock.patch('google.oauth2.id_token.verify_token', autospec=True)
def test_verify_oauth2_token(verify_token):
result = id_token.verify_oauth2_token(
mock.sentinel.token,
mock.sentinel.request,
audience=mock.sentinel.audience)
assert result == verify_token.return_value
verify_token.assert_called_once_with(
mock.sentinel.token,
mock.sentinel.request,
audience=mock.sentinel.audience,
certs_url=id_token._GOOGLE_OAUTH2_CERTS_URL)
@mock.patch('google.oauth2.id_token.verify_token', autospec=True)
def test_verify_firebase_token(verify_token):
result = id_token.verify_firebase_token(
mock.sentinel.token,
mock.sentinel.request,
audience=mock.sentinel.audience)
assert result == verify_token.return_value
verify_token.assert_called_once_with(
mock.sentinel.token,
mock.sentinel.request,
audience=mock.sentinel.audience,
certs_url=id_token._GOOGLE_APIS_CERTS_URL)
| 32.422414
| 78
| 0.742888
|
df6f247ff1a917d043c3d95694c6cea1c8add65a
| 979
|
py
|
Python
|
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_rdbms/postgresql/models/virtual_network_rule_paged.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_rdbms/postgresql/models/virtual_network_rule_paged.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_rdbms/postgresql/models/virtual_network_rule_paged.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class VirtualNetworkRulePaged(Paged):
"""
A paging container for iterating over a list of :class:`VirtualNetworkRule <azure.mgmt.rdbms.postgresql.models.VirtualNetworkRule>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[VirtualNetworkRule]'}
}
def __init__(self, *args, **kwargs):
super(VirtualNetworkRulePaged, self).__init__(*args, **kwargs)
| 34.964286
| 142
| 0.592441
|
a5be4c8a7562c5766de5d64b5112e734682f2410
| 22,381
|
py
|
Python
|
azimuth/load_data.py
|
bowhan/Azimuth
|
d49ea6ee97efa67af4081631c75c333f724cc18a
|
[
"BSD-3-Clause"
] | null | null | null |
azimuth/load_data.py
|
bowhan/Azimuth
|
d49ea6ee97efa67af4081631c75c333f724cc18a
|
[
"BSD-3-Clause"
] | null | null | null |
azimuth/load_data.py
|
bowhan/Azimuth
|
d49ea6ee97efa67af4081631c75c333f724cc18a
|
[
"BSD-3-Clause"
] | 1
|
2021-10-05T14:42:17.000Z
|
2021-10-05T14:42:17.000Z
|
import pandas
from . import util
import matplotlib.pyplot as plt
import scipy as sp
import scipy.stats
import numpy as np
import os
cur_dir = os.path.dirname(os.path.abspath(__file__))
def from_custom_file(data_file, learn_options):
# use semantics of when we load V2 data
print("Loading inputs to predict from %s" % data_file)
data = pandas.read_csv(data_file)
mandatory_columns = ['30mer', 'Target gene', 'Percent Peptide', 'Amino Acid Cut position']
for col in mandatory_columns:
assert col in data.columns, "inputs for prediction must include these columns: %s" % mandatory_columns
Xdf = pandas.DataFrame(data)
Xdf['30mercopy'] = Xdf['30mer']
Xdf = Xdf.set_index(['30mer', 'Target gene'])
Xdf['30mer'] = Xdf['30mercopy']
Xdf.index.names = ['Sequence', 'Target']
Xdf['drug']= ['dummydrug%s' % i for i in range(Xdf.shape[0])]
Xdf = Xdf.set_index('drug', append=True)
Y = None
gene_position = Xdf[['Percent Peptide', 'Amino Acid Cut position']]
target_genes = np.unique(Xdf.index.levels[1])
learn_options = set_V2_target_names(learn_options)
return Xdf, Y, gene_position, target_genes
def from_file(data_file, learn_options, data_file2=None, data_file3=None):
if learn_options["V"] == 1: # from Nature Biotech paper
print("loading V%d data" % learn_options["V"])
assert not learn_options["weighted"] is not None, "not supported for V1 data"
annotations, gene_position, target_genes, Xdf, Y = read_V1_data(data_file, learn_options)
learn_options['binary target name'] = 'average threshold'
learn_options['rank-transformed target name'] = 'average rank'
learn_options['raw target name'] = 'average activity'
# NF: not sure why the line below was uncommented
# gene_position, selected_ind, target_genes, Xdf, Y = extract_by_organism("mouse", Xdf, Y, gene_position)
elif learn_options["V"] == 2: # from Nov 2014, hot off the machines
Xdf, drugs_to_genes, target_genes, Y, gene_position = read_V2_data(data_file, learn_options)
# check that data is consistent with sgRNA score
xx = Xdf['sgRNA Score'].values
yy = Y['score_drug_gene_rank'].values
rr,pp = sp.stats.pearsonr(xx, yy)
assert rr > 0, "data processing has gone wrong as correlation with previous predictions is negative"
learn_options = set_V2_target_names(learn_options)
elif learn_options["V"] == 3: # merge of V1 and V2--this is what is used for the final model
# these are relative to the V2 data, and V1 will be made to automatically match
learn_options['binary target name'] = 'score_drug_gene_threshold'
learn_options['rank-transformed target name'] = 'score_drug_gene_rank'
learn_options['raw target name'] = None
Xdf, Y, gene_position, target_genes = mergeV1_V2(data_file, data_file2, learn_options)
elif learn_options["V"] == 4: # merge of V1 and V2 and the Xu et al data
# these are relative to the V2 data, and V1 and Xu et al. will be made to automatically match
learn_options['binary target name'] = 'score_drug_gene_threshold'
learn_options['rank-transformed target name'] = 'score_drug_gene_rank'
learn_options['raw target name'] = None
Xdf, Y, gene_position, target_genes = merge_all(data_file, data_file2, data_file3, learn_options)
elif learn_options['V'] == 5:
learn_options['binary target name'] = 'score_drug_gene_threshold'
learn_options['rank-transformed target name'] = 'score_drug_gene_rank'
learn_options['raw target name'] = None
gene_position, target_genes, Xdf, Y = read_xu_et_al(data_file3)
# truncate down to 30--some data sets gave us more.
Xdf["30mer"] = Xdf["30mer"].apply(lambda x: x[0:30])
return Xdf, Y, gene_position, target_genes
def set_V2_target_names(learn_options):
if 'binary target name' not in list(learn_options.keys()):
learn_options['binary target name'] = 'score_drug_gene_threshold'
if 'rank-transformed target name' not in list(learn_options.keys()):
learn_options['rank-transformed target name'] = 'score_drug_gene_rank'
learn_options['raw target name'] = 'score'
return learn_options
def combine_organisms(human_data, mouse_data):
# 'Target' is the column name, 'CD13' are some rows in that column
# xs slices through the pandas data frame to return another one
cd13 = human_data.xs('CD13', level='Target', drop_level=False)
# y_names are column names, cd13 is a pandas object
X_CD13, Y_CD13 = util.get_data(cd13, y_names=['NB4 CD13', 'TF1 CD13'])
cd33 = human_data.xs('CD33', level='Target', drop_level=False)
X_CD33, Y_CD33 = util.get_data(cd33, y_names=['MOLM13 CD33', 'TF1 CD33', 'NB4 CD33'])
cd15 = human_data.xs('CD15', level='Target', drop_level=False)
X_CD15, Y_CD15 = util.get_data(cd15, y_names=['MOLM13 CD15'])
mouse_X = pandas.DataFrame()
mouse_Y = pandas.DataFrame()
for k in mouse_data.index.levels[1]:
# is k the gene
X, Y = util.get_data(mouse_data.xs(k, level='Target', drop_level=False), ["On-target Gene"], target_gene=k, organism='mouse')
mouse_X = pandas.concat([mouse_X, X], axis=0)
mouse_Y = pandas.concat([mouse_Y, Y], axis=0)
X = pandas.concat([X_CD13, X_CD15, X_CD33, mouse_X], axis=0)
Y = pandas.concat([Y_CD13, Y_CD15, Y_CD33, mouse_Y], axis=0)
return X, Y
def read_V1_data(data_file, learn_options, AML_file=cur_dir + "/data/V1_suppl_data.txt"):
if data_file is None:
data_file = cur_dir + "/data/V1_data.xlsx"
human_data = pandas.read_excel(data_file, sheet_name=0, index_col=[0, 1])
mouse_data = pandas.read_excel(data_file, sheet_name=1, index_col=[0, 1])
Xdf, Y = combine_organisms(human_data, mouse_data)
# get position within each gene, then join and re-order
# note that 11 missing guides we were told to ignore
annotations = pandas.read_csv(AML_file, delimiter='\t', index_col=[0, 4])
annotations.index.names = Xdf.index.names
gene_position = pandas.merge(Xdf, annotations, how="inner", left_index=True, right_index=True)
gene_position = util.impute_gene_position(gene_position)
gene_position = gene_position[['Amino Acid Cut position', 'Nucleotide cut position', 'Percent Peptide']]
Y = Y.loc[gene_position.index]
Xdf = Xdf.loc[gene_position.index]
Y['test'] = 1 # for bookeeping to keep consistent with V2 which uses this for "extra pairs"
target_genes = Y['Target gene'].unique()
Y.index.names = ['Sequence', 'Target gene']
assert Xdf.index.equals(Y.index), "The index of Xdf is different from the index of Y (this can cause inconsistencies/random performance later on)"
if learn_options is not None and learn_options["flipV1target"]:
print("************************************************************************")
print("*****************MATCHING DOENCH CODE (DEBUG MODE)**********************")
print("************************************************************************")
# normally it is: Y['average threshold'] = Y['average rank'] > 0.8, where 1s are good guides, 0s are not
Y['average threshold'] = Y['average rank'] < 0.2 # 1s are bad guides
print("press c to continue")
import ipdb
ipdb.set_trace()
return annotations, gene_position, target_genes, Xdf, Y
def rank_transform(x):
return 1.0 - sp.stats.mstats.rankdata(x)/sp.stats.mstats.rankdata(x).max()
def read_xu_et_al(data_file, learn_options=None, verbose=True, subsetting='ours'):
if data_file is None:
data_file = '../data/xu_et_al_data.xlsx'
datasets = ['ribo', 'non_ribo', 'mESC']
aggregated = None
for d in datasets:
data_efficient = pandas.read_excel(data_file, sheet_name='%s_efficient_sgRNA' % d, skiprows=2)
data_inefficient = pandas.read_excel(data_file, sheet_name='%s_inefficient_sgRNA' % d, skiprows=2)
data_efficient['threshold'] = 1.
data_inefficient['threshold'] = 0.
exp_data = pandas.concat((data_efficient, data_inefficient))
exp_data['rank_KBM7'] = exp_data.groupby('Gene Symbol')['log2 fold change, KBM7'].transform(rank_transform)
exp_data['rank_HL60'] = exp_data.groupby('Gene Symbol')['log2 fold change, HL60'].transform(rank_transform)
if aggregated is None:
aggregated = exp_data
else:
aggregated = pandas.concat((aggregated, exp_data))
# go from 40mer to 30mer
if subsetting == 'ours':
aggregated["sequence(target+3'+5')"] = aggregated["sequence(target+3'+5')"].apply(lambda x: x[6:-4])
else:
aggregated["sequence(target+3'+5')"] = aggregated["sequence(target+3'+5')"].apply(lambda x: x[10:])
# make sure EVEYTHING is uppercase
aggregated["sequence(target+3'+5')"] = aggregated["sequence(target+3'+5')"].apply(lambda x: x.upper())
# rename columns
aggregated.rename(columns={"sequence(target+3'+5')": '30mer', 'Gene Symbol': 'Target gene', 'strand':'Strand'}, inplace=True)
aggregated['Strand'].loc[aggregated['Strand']=='+'] = 'sense'
aggregated['Strand'].loc[aggregated['Strand']=='-'] = 'antisense'
aggregated['average rank'] = aggregated[['rank_HL60', 'rank_KBM7']].mean(axis=1)
df = aggregated
df = df.rename(columns={'30mer': 'Sequence', 'Target gene': 'Target'})
df['drug'] = 'nodrug'
df['test'] = 1
df = df.set_index(['Sequence', 'Target', 'drug'])
df['30mer'] = df.index.get_level_values(0)
df['Target gene'] = df.index.get_level_values(1)
df['Organism'] = 'unknown'
df['score_drug_gene_rank'] = df['average rank']
df['score_drug_gene_threshold'] = df['threshold']
df['Nucleotide cut position'] = df['start of target']
df['Percent Peptide'] = 0
df['Amino Acid Cut position'] = 0
target_genes = np.unique(df['Target gene'].values)
return df[['Nucleotide cut position', 'Percent Peptide', 'Amino Acid Cut position']], target_genes, df[['30mer', 'Strand']], df[['score_drug_gene_rank', 'score_drug_gene_threshold', 'test', 'Target gene']]
def read_V2_data(data_file, learn_options=None, verbose=True):
if data_file is None:
data_file = cur_dir + "/data/V2_data.xlsx"
# to compare
# import predict as pr; a1, g1, t1, X1, Y1 = pr.data_setup()
# a1.index.names
data = pandas.read_excel(data_file, sheet_name="ResultsFiltered", skiprows=list(range(0, 6+1)), index_col=[0, 4])
# grab data relevant to each of three drugs, which exludes some genes
# note gene MED12 has two drugs, all others have at most one
Xdf = pandas.DataFrame()
# This comes from the "Pairs" tab in their excel sheet,
# note HPRT/HPRT1 are same thing, and also PLX_2uM/PLcX_2uM
known_pairs = {'AZD_200nM': ['CCDC101', 'MED12', 'TADA2B', 'TADA1'],
'6TG_2ug/mL': ['HPRT1'],
'PLX_2uM': ['CUL3', 'NF1', 'NF2', 'MED12']}
drugs_to_genes = {'AZD_200nM': ['CCDC101', 'MED12', 'TADA2B', 'TADA1'],
'6TG_2ug/mL': ['HPRT1'],
'PLX_2uM': ['CUL3', 'NF1', 'NF2', 'MED12']}
if learn_options is not None:
assert not (learn_options['extra pairs'] and learn_options['all pairs']), "extra pairs and all pairs options (in learn_options) can't be active simultaneously."
if learn_options['extra pairs']:
drugs_to_genes['AZD_200nM'].extend(['CUL3', 'NF1', 'NF2'])
elif learn_options['all pairs']:
drugs_to_genes['AZD_200nM'].extend(['HPRT1', 'CUL3', 'NF1', 'NF2'])
drugs_to_genes['PLX_2uM'].extend(['HPRT1', 'CCDC101', 'TADA2B', 'TADA1'])
drugs_to_genes['6TG_2ug/mL'].extend(['CCDC101', 'MED12', 'TADA2B', 'TADA1', 'CUL3', 'NF1', 'NF2'])
count = 0
for drug in list(drugs_to_genes.keys()):
genes = drugs_to_genes[drug]
for g in genes:
Xtmp = data.copy().xs(g, level='Target gene', drop_level=False)
Xtmp['drug'] = drug
Xtmp['score'] = Xtmp[drug].copy() # grab the drug results that are relevant for this gene
if g in known_pairs[drug]:
Xtmp['test'] = 1.
else:
Xtmp['test'] = 0.
count = count + Xtmp.shape[0]
Xdf = pandas.concat([Xdf, Xtmp], axis=0)
if verbose:
print("Loaded %d samples for gene %s \ttotal number of samples: %d" % (Xtmp.shape[0], g, count))
# create new index that includes the drug
Xdf = Xdf.set_index('drug', append=True)
Y = pandas.DataFrame(Xdf.pop("score"))
Y.columns.names = ["score"]
test_gene = pandas.DataFrame(Xdf.pop('test'))
target = pandas.DataFrame(Xdf.index.get_level_values('Target gene').values, index=Y.index, columns=["Target gene"])
Y = pandas.concat((Y, target, test_gene), axis=1)
target_genes = Y['Target gene'].unique()
gene_position = Xdf[["Percent Peptide", "Amino Acid Cut position"]].copy()
# convert to ranks for each (gene, drug combo)
# flip = True
y_rank = pandas.DataFrame()
y_threshold = pandas.DataFrame()
y_quant = pandas.DataFrame()
for drug in list(drugs_to_genes.keys()):
gene_list = drugs_to_genes[drug]
for gene in gene_list:
ytmp = pandas.DataFrame(Y.xs((gene, drug), level=["Target gene", "drug"], drop_level=False)['score'])
y_ranktmp, y_rank_raw, y_thresholdtmp, y_quanttmp = util.get_ranks(ytmp, thresh=0.8, prefix="score_drug_gene", flip=False)
# np.unique(y_rank.values-y_rank_raw.values)
y_rank = pandas.concat((y_rank, y_ranktmp), axis=0)
y_threshold = pandas.concat((y_threshold, y_thresholdtmp), axis=0)
y_quant = pandas.concat((y_quant, y_quanttmp), axis=0)
yall = pandas.concat((y_rank, y_threshold, y_quant), axis=1)
Y = pandas.merge(Y, yall, how='inner', left_index=True, right_index=True)
# convert also by drug only, irrespective of gene
y_rank = pandas.DataFrame()
y_threshold = pandas.DataFrame()
y_quant = pandas.DataFrame()
for drug in list(drugs_to_genes.keys()):
ytmp = pandas.DataFrame(Y.xs(drug, level="drug", drop_level=False)['score'])
y_ranktmp, y_rank_raw, y_thresholdtmp, y_quanttmp = util.get_ranks(ytmp, thresh=0.8, prefix="score_drug", flip=False)
# np.unique(y_rank.values-y_rank_raw.values)
y_rank = pandas.concat((y_rank, y_ranktmp), axis=0)
y_threshold = pandas.concat((y_threshold, y_thresholdtmp), axis=0)
y_quant = pandas.concat((y_quant, y_quanttmp), axis=0)
yall = pandas.concat((y_rank, y_threshold, y_quant), axis=1)
Y = pandas.merge(Y, yall, how='inner', left_index=True, right_index=True)
PLOT = False
if PLOT:
# to better understand, try plotting something like:
labels = ["score", "score_drug_gene_rank", "score_drug_rank", "score_drug_gene_threshold", "score_drug_threshold"]
for label in labels:
plt.figure()
plt.plot(Xdf['sgRNA Score'].values, Y[label].values, '.')
r, pearp = sp.stats.pearsonr(Xdf['sgRNA Score'].values.flatten(), Y[label].values.flatten())
plt.title(label + ' VS pred. score, $r$=%0.2f (p=%0.2e)' % (r, pearp))
plt.xlabel("sgRNA prediction score")
plt.ylabel(label)
gene_position = util.impute_gene_position(gene_position)
if learn_options is not None and learn_options["weighted"] == "variance":
print("computing weights from replicate variance...")
# compute the variance across replicates so can use it as a weight
data = pandas.read_excel(data_file, sheet_name="Normalized", skiprows=list(range(0, 6+1)), index_col=[0, 4])
data.index.names = ["Sequence", "Target gene"]
experiments = {}
experiments['AZD_200nM'] = ['Deep 25', 'Deep 27', 'Deep 29 ', 'Deep 31']
experiments['6TG_2ug/mL'] = ['Deep 33', 'Deep 35', 'Deep 37', 'Deep 39']
experiments['PLX_2uM'] = ['Deep 49', 'Deep 51', 'Deep 53', 'Deep 55']
variance = None
for drug in list(drugs_to_genes.keys()):
data_tmp = data.iloc[data.index.get_level_values('Target gene').isin(drugs_to_genes[drug])][experiments[drug]]
data_tmp["drug"] = drug
data_tmp = data_tmp.set_index('drug', append=True)
data_tmp["variance"] = np.var(data_tmp.values, axis=1)
if variance is None:
variance = data_tmp["variance"].copy()
else:
variance = pandas.concat((variance, data_tmp["variance"]), axis=0)
orig_index = Y.index.copy()
Y = pandas.merge(Y, pandas.DataFrame(variance), how="inner", left_index=True, right_index=True)
Y = Y.ix[orig_index]
print("done.")
# Make sure to keep this check last in this function
assert Xdf.index.equals(Y.index), "The index of Xdf is different from the index of Y (this can cause inconsistencies/random performance later on)"
return Xdf, drugs_to_genes, target_genes, Y, gene_position
def merge_all(data_file=None, data_file2=None, data_file3=None, learn_options=None):
Xdf, Y, gene_position, target_genes = mergeV1_V2(data_file, data_file2, learn_options)
gene_position_xu, target_genes_xu, Xdf_xu, Y_xu = read_xu_et_al(data_file3, learn_options)
Xdf = pandas.concat((Xdf, Xdf_xu))
Y = pandas.concat((Y, Y_xu))
gene_position = pandas.concat((gene_position, gene_position_xu))
target_genes = np.concatenate((target_genes, target_genes_xu))
return Xdf, Y, gene_position, target_genes
def mergeV1_V2(data_file, data_file2, learn_options):
'''
ground_truth_label, etc. are taken to correspond to the V2 data, and then the V1 is appropriately matched
based on semantics
'''
assert not learn_options['include_strand'], "don't currently have 'Strand' column in V1 data"
annotations, gene_position1, target_genes1, Xdf1, Y1 = read_V1_data(data_file, learn_options)
Xdf2, drugs_to_genes, target_genes2, Y2, gene_position2 = read_V2_data(data_file2)
Y1.rename(columns={'average rank': learn_options["rank-transformed target name"]}, inplace=True)
Y1.rename(columns={'average threshold': learn_options["binary target name"]}, inplace=True)
# rename columns, and add a dummy "drug" to V1 so can join the data sets
Y1["drug"] = ["nodrug" for x in range(Y1.shape[0])]
Y1 = Y1.set_index('drug', append=True)
Y1.index.names = ['Sequence', 'Target gene', 'drug']
Y_cols_to_keep = np.unique(['Target gene', 'test', 'score_drug_gene_rank', 'score_drug_gene_threshold'])
Y1 = Y1[Y_cols_to_keep]
Y2 = Y2[Y_cols_to_keep]
Xdf1["drug"] = ["nodrug" for x in range(Xdf1.shape[0])]
Xdf1 = Xdf1.set_index('drug', append=True)
X_cols_to_keep = ['30mer', 'Strand']
Xdf1 = Xdf1[X_cols_to_keep]
Xdf2 = Xdf2[X_cols_to_keep]
gene_position1["drug"] = ["nodrug" for x in range(gene_position1.shape[0])]
gene_position1 = gene_position1.set_index('drug', append=True)
gene_position1.index.names = ['Sequence', 'Target gene', 'drug']
cols_to_keep = ['Percent Peptide', 'Amino Acid Cut position']
gene_position1 = gene_position1[cols_to_keep]
gene_position2 = gene_position2[cols_to_keep]
Y = pandas.concat((Y1, Y2), axis=0)
Xdf = pandas.concat((Xdf1, Xdf2), axis=0)
gene_position = pandas.concat((gene_position1, gene_position2))
# target_genes = target_genes1 + target_genes2
target_genes = np.concatenate((target_genes1, target_genes2))
save_to_file = False
if save_to_file:
Y.index.names = ['Sequence', 'Target', 'drug']
assert np.all(Xdf.index.values==Y.index.values), "rows don't match up"
onedupind = np.where(Y.index.duplicated())[0][0]
alldupind = np.where(Y.index.get_level_values(0).values==Y.index[onedupind][0])[0]
#arbitrarily set one of these to have "nodrug2" as the third level index
#so that they are not repeated, and the joints therefore do not augment the data set
assert len(alldupind)==2, "expected only duplicates"
newindex = Y.index.tolist()
newindex[onedupind] = (newindex[onedupind][0], newindex[onedupind][1], "nodrug2")
Y.index = pandas.MultiIndex.from_tuples(newindex, names = Y.index.names)
Xdf.index = pandas.MultiIndex.from_tuples(newindex, names = Y.index.names)
# there seems to be a duplicate index, and thus this increases the data set size, so doing it the hacky way...
XandY = pandas.merge(Xdf, Y, how="inner", left_index=True, right_index=True)
gene_position_tmp = gene_position.copy()
gene_position_tmp.index.names = ['Sequence', 'Target', 'drug']
gene_position_tmp.index = pandas.MultiIndex.from_tuples(newindex, names = Y.index.names)
XandY = pandas.merge(XandY, gene_position_tmp, how="inner", left_index=True, right_index=True)
# truncate to 30mers
XandY["30mer"] = XandY["30mer"].apply(lambda x: x[0:30])
XandY.to_csv(r'D:\Source\CRISPR\data\tmp\V3.csv')
return Xdf, Y, gene_position, target_genes
def get_V1_genes(data_file=None):
annotations, gene_position, target_genes, Xdf, Y = read_V1_data(data_file, learn_options=None)
return target_genes
def get_V2_genes(data_file=None):
Xdf, drugs_to_genes, target_genes, Y, gene_position = read_V2_data(data_file, verbose=False)
return target_genes
def get_V3_genes(data_fileV1=None, data_fileV2=None):
target_genes = np.concatenate((get_V1_genes(data_fileV1), get_V2_genes(data_fileV2)))
return target_genes
def get_xu_genes(data_file=None):
return read_xu_et_al(data_file)[1]
def get_mouse_genes(data_file=None):
annotations, gene_position, target_genes, Xdf, Y = read_V1_data(data_file, learn_options=None)
return Xdf[Xdf['Organism'] == 'mouse']['Target gene'].unique()
def get_human_genes(data_file=None):
annotations, gene_position, target_genes, Xdf, Y = read_V1_data(data_file, learn_options=None)
mouse_genes = Xdf[Xdf['Organism'] == 'mouse']['Target gene'].unique()
all_genes = get_V3_genes(None, None) # TODO this needs to support specifying file names (!= 'None')
return np.setdiff1d(all_genes, mouse_genes)
| 46.43361
| 209
| 0.663822
|
ea33183c689686f9df52f9bbc4b7c6d967dd9d72
| 839
|
py
|
Python
|
examples/multilayer_neural_network.py
|
TK-21st/Neuroballad
|
6d4800e969c35b0f2d64897db24b734a9daaa160
|
[
"BSD-3-Clause"
] | null | null | null |
examples/multilayer_neural_network.py
|
TK-21st/Neuroballad
|
6d4800e969c35b0f2d64897db24b734a9daaa160
|
[
"BSD-3-Clause"
] | null | null | null |
examples/multilayer_neural_network.py
|
TK-21st/Neuroballad
|
6d4800e969c35b0f2d64897db24b734a9daaa160
|
[
"BSD-3-Clause"
] | null | null | null |
from neuroballad import * #Import Neuroballad
# Create a circuit
C = Circuit()
# Create 784 LeakyIAF neurons and get their ID's
in_neurons = C.add_cluster(784, LeakyIAF())
# Create 32 Hodgkin-Huxley neurons and get their ID's
middle_neurons = C.add_cluster(32, HodgkinHuxley())
# Join nodes together via alpha synapses
C.dense_connect_via(in_neurons, middle_neurons, AlphaSynapse())
# Create 10 more Hodgkin-Huxley neurons and get their ID's
out_neurons = C.add_cluster(10, HodgkinHuxley())
# Join nodes together via alpha synapses
C.dense_connect_via(middle_neurons, out_neurons, AlphaSynapse())
# Create inputs for the first set of neurons
input_list = []
for i in in_neurons:
input_list.append(InIStep(i, 40., 0.25, 0.50))
# Simulate the circuit
C.sim(1., 1e-4, input_list)
sim_results = C.collect_results() #Get simulation results
| 39.952381
| 64
| 0.77354
|
4d786ce53bf4f41b8ff352eec66640dd8755af58
| 2,955
|
py
|
Python
|
datcore-sdk/python/datcore_sdk/models/create_consortium.py
|
mguidon/aiohttp-dsm
|
612e4c7f6f73df7d6752269965c428fda0276191
|
[
"MIT"
] | null | null | null |
datcore-sdk/python/datcore_sdk/models/create_consortium.py
|
mguidon/aiohttp-dsm
|
612e4c7f6f73df7d6752269965c428fda0276191
|
[
"MIT"
] | null | null | null |
datcore-sdk/python/datcore_sdk/models/create_consortium.py
|
mguidon/aiohttp-dsm
|
612e4c7f6f73df7d6752269965c428fda0276191
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Blackfynn Swagger
Swagger documentation for the Blackfynn api # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CreateConsortium(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None): # noqa: E501
"""CreateConsortium - a model defined in OpenAPI""" # noqa: E501
self._name = None
self.discriminator = None
self.name = name
@property
def name(self):
"""Gets the name of this CreateConsortium. # noqa: E501
:return: The name of this CreateConsortium. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateConsortium.
:param name: The name of this CreateConsortium. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateConsortium):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.921053
| 90
| 0.548223
|
8a5f8e4c15f6fe7ae0c84b81a60209edf51c3c8b
| 11,977
|
py
|
Python
|
rslgym/wrapper/script/base_vec_env.py
|
mcx/RSLGym
|
9211c8c23042c7a56802751f8d7cfd4e7248d7a2
|
[
"MIT"
] | 13
|
2021-04-16T07:14:48.000Z
|
2022-03-14T04:20:03.000Z
|
rslgym/wrapper/script/base_vec_env.py
|
mcx/RSLGym
|
9211c8c23042c7a56802751f8d7cfd4e7248d7a2
|
[
"MIT"
] | null | null | null |
rslgym/wrapper/script/base_vec_env.py
|
mcx/RSLGym
|
9211c8c23042c7a56802751f8d7cfd4e7248d7a2
|
[
"MIT"
] | 2
|
2021-11-02T06:22:27.000Z
|
2021-12-21T06:16:17.000Z
|
# The MIT License
#
# Copyright (c) 2017 OpenAI (http://openai.com)
# Copyright (c) 2018-2019 Stable-Baselines Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from abc import ABC, abstractmethod
import inspect
import pickle
from typing import Sequence, Optional, List, Union
import cloudpickle
import numpy as np
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
:param num_envs: (int) the number of environments
:param observation_space: (Gym Space) the observation space
:param action_space: (Gym Space) the action space
"""
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
:return: ([int] or [float]) observation
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
:return: ([int] or [float], [float], [bool], dict) observation, reward, done, information
"""
pass
@abstractmethod
def close(self):
"""
Clean up the environment's resources.
"""
pass
@abstractmethod
def get_attr(self, attr_name, indices=None):
"""
Return attribute from vectorized environment.
:param attr_name: (str) The name of the attribute whose value to return
:param indices: (list,int) Indices of envs to get attribute from
:return: (list) List of values of 'attr_name' in all environments
"""
pass
@abstractmethod
def set_attr(self, attr_name, value, indices=None):
"""
Set attribute inside vectorized environments.
:param attr_name: (str) The name of attribute to assign new value
:param value: (obj) Value to assign to `attr_name`
:param indices: (list,int) Indices of envs to assign value
:return: (NoneType)
"""
pass
@abstractmethod
def env_method(self, method_name, *method_args, indices=None, **method_kwargs):
"""
Call instance methods of vectorized environments.
:param method_name: (str) The name of the environment method to invoke.
:param indices: (list,int) Indices of envs whose method to call
:param method_args: (tuple) Any positional arguments to provide in the call
:param method_kwargs: (dict) Any keyword arguments to provide in the call
:return: (list) List of items returned by the environment's method call
"""
pass
@abstractmethod
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
"""
Sets the random seeds for all environments, based on a given seed.
Each individual environment will still get its own seed, by incrementing the given seed.
:param seed: (Optional[int]) The random seed. May be None for completely random seeding.
:return: (List[Union[None, int]]) Returns a list containing the seeds for each individual env.
Note that all list elements may be None, if the env does not return anything when being seeded.
"""
pass
def step(self, actions):
"""
Step the environments with the given action
:param actions: ([int] or [float]) the action
:return: ([int] or [float], [float], [bool], dict) observation, reward, done, information
"""
self.step_async(actions)
return self.step_wait()
def get_images(self) -> Sequence[np.ndarray]:
"""
Return RGB images from each environment
"""
raise NotImplementedError
def render(self, mode: str = 'human'):
"""
Gym environment rendering
:param mode: the rendering type
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def getattr_depth_check(self, name, already_found):
"""Check if an attribute reference is being hidden in a recursive call to __getattr__
:param name: (str) name of attribute to check for
:param already_found: (bool) whether this attribute has already been found in a wrapper
:return: (str or None) name of module whose attribute is being shadowed, if any.
"""
if hasattr(self, name) and already_found:
return "{0}.{1}".format(type(self).__module__, type(self).__name__)
else:
return None
def _get_indices(self, indices):
"""
Convert a flexibly-typed reference to environment indices to an implied list of indices.
:param indices: (None,int,Iterable) refers to indices of envs.
:return: (list) the implied list of indices.
"""
if indices is None:
indices = range(self.num_envs)
elif isinstance(indices, int):
indices = [indices]
return indices
class VecEnvWrapper(VecEnv):
"""
Vectorized environment base class
:param venv: (VecEnv) the vectorized environment to wrap
:param observation_space: (Gym Space) the observation space (can be None to load from venv)
:param action_space: (Gym Space) the action space (can be None to load from venv)
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self, num_envs=venv.num_envs, observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
self.class_attributes = dict(inspect.getmembers(self.__class__))
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def seed(self, seed=None):
return self.venv.seed(seed)
def close(self):
return self.venv.close()
def render(self, mode: str = 'human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def get_attr(self, attr_name, indices=None):
return self.venv.get_attr(attr_name, indices)
def set_attr(self, attr_name, value, indices=None):
return self.venv.set_attr(attr_name, value, indices)
def env_method(self, method_name, *method_args, indices=None, **method_kwargs):
return self.venv.env_method(method_name, *method_args, indices=indices, **method_kwargs)
def __getattr__(self, name):
"""Find attribute from wrapped venv(s) if this wrapper does not have it.
Useful for accessing attributes from venvs which are wrapped with multiple wrappers
which have unique attributes of interest.
"""
blocked_class = self.getattr_depth_check(name, already_found=False)
if blocked_class is not None:
own_class = "{0}.{1}".format(type(self).__module__, type(self).__name__)
format_str = ("Error: Recursive attribute lookup for {0} from {1} is "
"ambiguous and hides attribute from {2}")
raise AttributeError(format_str.format(name, own_class, blocked_class))
return self.getattr_recursive(name)
def _get_all_attributes(self):
"""Get all (inherited) instance and class attributes
:return: (dict<str, object>) all_attributes
"""
all_attributes = self.__dict__.copy()
all_attributes.update(self.class_attributes)
return all_attributes
def getattr_recursive(self, name):
"""Recursively check wrappers to find attribute.
:param name (str) name of attribute to look for
:return: (object) attribute
"""
all_attributes = self._get_all_attributes()
if name in all_attributes: # attribute is present in this wrapper
attr = getattr(self, name)
elif hasattr(self.venv, 'getattr_recursive'):
# Attribute not present, child is wrapper. Call getattr_recursive rather than getattr
# to avoid a duplicate call to getattr_depth_check.
attr = self.venv.getattr_recursive(name)
else: # attribute not present, child is an unwrapped VecEnv
attr = getattr(self.venv, name)
return attr
def getattr_depth_check(self, name, already_found):
"""See base class.
:return: (str or None) name of module whose attribute is being shadowed, if any.
"""
all_attributes = self._get_all_attributes()
if name in all_attributes and already_found:
# this venv's attribute is being hidden because of a higher venv.
shadowed_wrapper_class = "{0}.{1}".format(type(self).__module__, type(self).__name__)
elif name in all_attributes and not already_found:
# we have found the first reference to the attribute. Now check for duplicates.
shadowed_wrapper_class = self.venv.getattr_depth_check(name, True)
else:
# this wrapper does not have the attribute. Keep searching.
shadowed_wrapper_class = self.venv.getattr_depth_check(name, already_found)
return shadowed_wrapper_class
class CloudpickleWrapper(object):
def __init__(self, var):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
:param var: (Any) the variable you wish to wrap for pickling with cloudpickle
"""
self.var = var
def __getstate__(self):
return cloudpickle.dumps(self.var)
def __setstate__(self, obs):
self.var = cloudpickle.loads(obs)
| 37.080495
| 116
| 0.659097
|
93ceb6fd70900f77e9a54919d165033e6d323ebf
| 353
|
py
|
Python
|
MySQL_Databses.py
|
windloid/PythonExamples
|
6a9d1d79cb9e58dd46b2b0e1a708f7cda94ff6a5
|
[
"MIT"
] | 3
|
2020-05-22T09:16:02.000Z
|
2022-02-08T20:20:51.000Z
|
MySQL_Databses.py
|
windloid/PythonExamples
|
6a9d1d79cb9e58dd46b2b0e1a708f7cda94ff6a5
|
[
"MIT"
] | 5
|
2021-03-19T08:04:40.000Z
|
2022-03-12T00:04:25.000Z
|
MySQL_Databses.py
|
windloid/PythonExamples
|
6a9d1d79cb9e58dd46b2b0e1a708f7cda94ff6a5
|
[
"MIT"
] | 4
|
2020-05-22T09:16:04.000Z
|
2021-08-20T13:42:41.000Z
|
import mysql.connector
# MySQl databses details
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="db_name"
)
mycursor = mydb.cursor()
# Execute SQL Query =>>>> mycursor.execute("SQL Query")
mycursor.execute("SELECT column FROM table")
myresult = mycursor.fetchall()
for x in myresult:
print(x)
| 17.65
| 55
| 0.688385
|
7c08eb1d39014f6f159ba4755eff24f437084934
| 4,558
|
py
|
Python
|
DRFdemo/BookTest/serializers.py
|
Nicholas-violet/Django_Rest_Framework
|
5f9fef4836980cb3de04cc47fa0f5ed7e065cd89
|
[
"MIT"
] | null | null | null |
DRFdemo/BookTest/serializers.py
|
Nicholas-violet/Django_Rest_Framework
|
5f9fef4836980cb3de04cc47fa0f5ed7e065cd89
|
[
"MIT"
] | null | null | null |
DRFdemo/BookTest/serializers.py
|
Nicholas-violet/Django_Rest_Framework
|
5f9fef4836980cb3de04cc47fa0f5ed7e065cd89
|
[
"MIT"
] | null | null | null |
# 针对BookInfo模型类数据,定义一个BookInfoSerializer序列化器
# 来对BooKinfo数据进行序列化操作
'''
from rest_framework import serializers
# # 我们自己心里清楚,这个序列化器是针对BookInfo的!!!
# class BookInfoSerializer(serializers.Serializer):
# # 通过指定同名类属性的形式,来定义转化结果字典中的属性
#
# btitle = serializers.CharField()
# bpub_date = serializers.DateField()
# bread = serializers.IntegerField()
# bcomment = serializers.IntegerField()
# is_delete = serializers.BooleanField()
# image = serializers.ImageField()
class HeroInfoSerializer2(serializers.Serializer):
GENDER_CHOICES = (
(0, 'male'),
(1, 'female')
)
id = serializers.IntegerField(label='ID', read_only=True)
hname = serializers.CharField(label='名字', max_length=20)
hgender = serializers.ChoiceField(choices=GENDER_CHOICES, label='性别', required=False)
# 定义一个针对btitle的校验函数
# def check_btitle(value):
# # 参数value:经过前序校验之后的btitle数据
# # 我们通过抛出ValidationError异常表示校验失败!!
# # 返回值无
#
# # 如果"django"字符串不再value中,表示不符合格式
# # {"btitle": "围城"}
# if "django" not in value:
# # 不是一本关于django的书
# raise serializers.ValidationError("这不是一本关于djangod的书!")
class BookInfoSerializer(serializers.Serializer):
"""图书数据序列化器"""
# read_only设置为True表示该字段只作用于序列化,反序列化的时候直接忽略
id = serializers.IntegerField(label='ID', read_only=True)
# write_only设置为True表示该字段只作用于反序列化,序列化的时候直接忽略
btitle = serializers.CharField(label='名称',
max_length=20,
min_length=2,
# validators约束条件指定多个针对当前字段的校验函数
# validators=[check_btitle]
)
bpub_date = serializers.DateField(label='发布日期', required=True)
bread = serializers.IntegerField(label='阅读量', required=False, min_value=0)
bcomment = serializers.IntegerField(label='评论量', required=False, min_value=0)
image = serializers.ImageField(label='图片', required=False, allow_null=True)
is_delete =serializers.BooleanField(required=False)
# heros隐藏字段,多个从表HeroInfo对象
# heros = serializers.PrimaryKeyRelatedField(read_only=True, many=True)
# heros = serializers.StringRelatedField(many=True)
# heros = HeroInfoSerializer2(many=True)
class HeroInfoSerializer(serializers.Serializer):
"""英雄数据序列化器"""
GENDER_CHOICES = (
(0, 'male'),
(1, 'female')
)
# HeroInfo的固有字段/属性
id = serializers.IntegerField(label='ID', read_only=True)
hname = serializers.CharField(label='名字', max_length=20)
hgender = serializers.ChoiceField(choices=GENDER_CHOICES, label='性别', required=False)
hcomment = serializers.CharField(label='描述信息', max_length=200, required=False, allow_null=True)
is_delete = serializers.BooleanField()
# 外间关联属性
# hbook是当前英雄对象关联的"唯一"的主表"BookInfo对象"
# (1)如果想把关联字段,序列化成关联对象数据的主键; read_only=True当前字段只作用于序列化操作
# hbook = serializers.PrimaryKeyRelatedField(read_only=True)
# (2) 把关联字段,序列化成它的__str__方法返回的结果
# {
# "hname": xxx,
# "hgener": xxx,
# ...
# "hbook": "射雕英雄传"
# }
hbook = serializers.StringRelatedField() # 无约束条件,默认read_only=True
# (3) 关联字段自定义序列化
# {
# "hname": xxx,
# "hgener": xxx,
# ...
# "hbook": {"btitle": xxx, "bpub_date": xxx}
# }
# hbook = BookInfoSerializer()
'''
"""
使用模型类序列化器
"""
from rest_framework import serializers
from .models import *
# serializers.Serializer ---- 自定义序列化器所继承的
# serializers.ModelSerializer ---- 专门针对模型类数据的序列化器
# 定义一个针对BookInfo的模型类序列化器
class BookInfoModelSerializer(serializers.ModelSerializer):
# 可以通过在序列化器中手动自定映射的字段
# 对于非主键的隐藏字段
# heros = serializers.StringRelatedField(many=True)
# 手动定义的字段,会覆盖自动映射的字段
# btitle = serializers.CharField(min_length=2, max_length=20, required=True)
class Meta:
model = BookInfo # 声明当前序列化器操作的目标模型类
fields = "__all__" # 声明操作的模型类的字段为所有:把所有的字段映射到序列化器中
# fields = ['id', 'btitle', 'bpub_date', 'bread'] # 指定字段映射
# exclude = ['image'] # 除了image字段,其他的字段映射到序列化器中
# 对模型类序列化器自动构建的约束条件进行修订
extra_kwargs = {
"bread": {"min_value": 0}, # 把bread字段的min_value约束条件设置为0
# "required": True,
}
# 批量地把一些字段设置为read_only=True
# read_only_fields = ['id', 'bread']
class HeroInfoModelSerializer(serializers.ModelSerializer):
# 外间关联字段,自动映射的类型是PrimaryKeyRelatedField
# 关联对象的主键隐藏字段不会自动映射
hbook_id = serializers.IntegerField()
class Meta:
model = HeroInfo
fields = "__all__"
| 28.4875
| 99
| 0.659061
|
973fb64d8c966361515128612fec065da2e3b0c2
| 144,236
|
py
|
Python
|
nova/tests/unit/virt/vmwareapi/test_vmops.py
|
10088/nova
|
972c06c608f0b00e9066d7f581fd81197065cf49
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/vmwareapi/test_vmops.py
|
10088/nova
|
972c06c608f0b00e9066d7f581fd81197065cf49
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/vmwareapi/test_vmops.py
|
10088/nova
|
972c06c608f0b00e9066d7f581fd81197065cf49
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import vim_util as vutil
from nova.compute import power_state
from nova import context
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova import version
from nova.virt import hardware
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
class DsPathMatcher(object):
def __init__(self, expected_ds_path_str):
self.expected_ds_path_str = expected_ds_path_str
def __eq__(self, ds_path_param):
return str(ds_path_param) == self.expected_ds_path_str
class VMwareVMOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMOpsTestCase, self).setUp()
ds_util.dc_cache_reset()
vmwareapi_fake.reset()
stubs.set_stubs(self)
self.flags(enabled=True, group='vnc')
self.flags(subdirectory_name='vmware_base', group='image_cache')
self.flags(my_ip='',
flat_injected=True)
self._context = context.RequestContext('fake_user', 'fake_project')
self._session = session.VMwareAPISession()
self._virtapi = mock.Mock()
self._image_id = uuids.image
fake_ds_ref = vmwareapi_fake.ManagedObjectReference(
name='Datastore', value='fake-ds')
self._ds = ds_obj.Datastore(
ref=fake_ds_ref, name='fake_ds',
capacity=10 * units.Gi,
freespace=10 * units.Gi)
self._dc_info = ds_util.DcInfo(
ref='fake_dc_ref', name='fake_dc',
vmFolder=vmwareapi_fake.ManagedObjectReference(
name='Folder', value='fake_vm_folder'))
cluster = vmwareapi_fake.create_cluster('fake_cluster', fake_ds_ref)
self._uuid = uuids.foo
fake_info_cache = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'instance_uuid': self._uuid,
'network_info': '[]',
}
self._instance_values = {
'name': 'fake_name',
'display_name': 'fake_display_name',
'uuid': self._uuid,
'vcpus': 1,
'memory_mb': 512,
'image_ref': self._image_id,
'root_gb': 10,
'node': '%s(%s)' % (cluster.mo_id, cluster.name),
'info_cache': fake_info_cache,
'expected_attrs': ['system_metadata', 'info_cache'],
}
self._instance = fake_instance.fake_instance_obj(
self._context, **self._instance_values)
self._flavor = objects.Flavor(name='m1.small', memory_mb=512, vcpus=1,
root_gb=10, ephemeral_gb=0, swap=0,
extra_specs={})
self._instance.flavor = self._flavor
self._vmops = vmops.VMwareVMOps(self._session, self._virtapi, None,
cluster=cluster.obj)
self._cluster = cluster
self._image_meta = objects.ImageMeta.from_dict({'id': self._image_id})
subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
dns=[network_model.IP('192.168.0.1')],
gateway=
network_model.IP('192.168.0.1'),
ips=[
network_model.IP('192.168.0.100')],
routes=None)
subnet_6 = network_model.Subnet(cidr='dead:beef::1/64',
dns=None,
gateway=
network_model.IP('dead:beef::1'),
ips=[network_model.IP(
'dead:beef::dcad:beff:feef:0')],
routes=None)
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_4, subnet_6],
vlan=None,
bridge_interface=None,
injected=True)
self._network_values = {
'id': None,
'address': 'DE:AD:BE:EF:00:00',
'network': network,
'type': network_model.VIF_TYPE_OVS,
'devname': None,
'ovs_interfaceid': None,
'rxtx_cap': 3
}
self.network_info = network_model.NetworkInfo([
network_model.VIF(**self._network_values)
])
pure_IPv6_network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_6],
vlan=None,
bridge_interface=None,
injected=True)
self.pure_IPv6_network_info = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=pure_IPv6_network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])
self._metadata = (
"name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.micro\n"
"flavor:memory_mb:8\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
"flavor:swap:33550336\n"
"imageid:%s\n"
"package:%s\n" % (
uuids.image,
version.version_string_with_package()))
def test_get_machine_id_str(self):
result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
self.assertEqual('DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;'
'192.168.0.1;192.168.0.255;192.168.0.1#', result)
result = vmops.VMwareVMOps._get_machine_id_str(
self.pure_IPv6_network_info)
self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result)
def _setup_create_folder_mocks(self):
ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
base_name = 'folder'
ds_name = "datastore"
ds_ref = vmwareapi_fake.ManagedObjectReference(value=1)
dc_ref = mock.Mock()
ds_util._DS_DC_MAPPING[ds_ref.value] = ds_util.DcInfo(
ref=dc_ref,
name='fake-name',
vmFolder='fake-folder')
path = ds_obj.DatastorePath(ds_name, base_name)
return ds_name, ds_ref, ops, path, dc_ref
@mock.patch.object(ds_util, 'mkdir')
def test_create_folder_if_missing(self, mock_mkdir):
ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
mock_mkdir.assert_called_with(ops._session, path, dc)
@mock.patch.object(ds_util, 'mkdir')
def test_create_folder_if_missing_exception(self, mock_mkdir):
ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
ds_util.mkdir.side_effect = vexc.FileAlreadyExistsException()
ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
mock_mkdir.assert_called_with(ops._session, path, dc)
def test_get_valid_vms_from_retrieve_result(self):
ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock())
fake_objects = vmwareapi_fake.FakeRetrieveResult()
for x in range(0, 3):
vm = vmwareapi_fake.VirtualMachine()
vm.set('config.extraConfig["nvp.vm-uuid"]',
vmwareapi_fake.OptionValue(
value=uuidutils.generate_uuid()))
fake_objects.add_object(vm)
vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
self.assertEqual(3, len(vms))
def test_get_valid_vms_from_retrieve_result_with_invalid(self):
ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock())
fake_objects = vmwareapi_fake.FakeRetrieveResult()
valid_vm = vmwareapi_fake.VirtualMachine()
valid_vm.set('config.extraConfig["nvp.vm-uuid"]',
vmwareapi_fake.OptionValue(
value=uuidutils.generate_uuid()))
fake_objects.add_object(valid_vm)
invalid_vm1 = vmwareapi_fake.VirtualMachine()
invalid_vm1.set('runtime.connectionState', 'orphaned')
invalid_vm1.set('config.extraConfig["nvp.vm-uuid"]',
vmwareapi_fake.OptionValue(
value=uuidutils.generate_uuid()))
invalid_vm2 = vmwareapi_fake.VirtualMachine()
invalid_vm2.set('runtime.connectionState', 'inaccessible')
invalid_vm2.set('config.extraConfig["nvp.vm-uuid"]',
vmwareapi_fake.OptionValue(
value=uuidutils.generate_uuid()))
fake_objects.add_object(invalid_vm1)
fake_objects.add_object(invalid_vm2)
vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
self.assertEqual(1, len(vms))
def test_delete_vm_snapshot(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('RemoveSnapshot_Task', method)
self.assertEqual('fake_vm_snapshot', args[0])
self.assertFalse(kwargs['removeChildren'])
self.assertTrue(kwargs['consolidate'])
return 'fake_remove_snapshot_task'
with test.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method', fake_call_method)
) as (_wait_for_task, _call_method):
self._vmops._delete_vm_snapshot(self._instance,
"fake_vm_ref", "fake_vm_snapshot")
_wait_for_task.assert_has_calls([
mock.call('fake_remove_snapshot_task')])
def test_create_vm_snapshot(self):
method_list = ['CreateSnapshot_Task', 'get_object_property']
def fake_call_method(module, method, *args, **kwargs):
expected_method = method_list.pop(0)
self.assertEqual(expected_method, method)
if (expected_method == 'CreateSnapshot_Task'):
self.assertEqual('fake_vm_ref', args[0])
self.assertFalse(kwargs['memory'])
self.assertTrue(kwargs['quiesce'])
return 'fake_snapshot_task'
elif (expected_method == 'get_object_property'):
task_info = mock.Mock()
task_info.result = "fake_snapshot_ref"
self.assertEqual(('fake_snapshot_task', 'info'), args)
return task_info
with test.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method', fake_call_method)
) as (_wait_for_task, _call_method):
snap = self._vmops._create_vm_snapshot(self._instance,
"fake_vm_ref")
self.assertEqual("fake_snapshot_ref", snap)
_wait_for_task.assert_has_calls([
mock.call('fake_snapshot_task')])
def test_update_instance_progress(self):
with mock.patch.object(self._instance, 'save') as mock_save:
self._vmops._update_instance_progress(self._instance._context,
self._instance, 5, 10)
mock_save.assert_called_once_with()
self.assertEqual(50, self._instance.progress)
@mock.patch.object(vm_util, 'get_vm_ref',
return_value=vmwareapi_fake.ManagedObjectReference())
def test_get_info(self, mock_get_vm_ref):
result = {
'summary.config.numCpu': 4,
'summary.config.memorySizeMB': 128,
'runtime.powerState': 'poweredOn'
}
with mock.patch.object(self._session, '_call_method',
return_value=result):
info = self._vmops.get_info(self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
expected = hardware.InstanceInfo(state=power_state.RUNNING)
self.assertEqual(expected, info)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
def test_get_info_when_ds_unavailable(self, mock_get_vm_ref):
result = {
'runtime.powerState': 'poweredOff'
}
with mock.patch.object(self._session, '_call_method',
return_value=result):
info = self._vmops.get_info(self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
self.assertEqual(hardware.InstanceInfo(state=power_state.SHUTDOWN),
info)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
def test_get_info_instance_deleted(self, mock_get_vm_ref):
props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
'runtime.powerState']
prop_cpu = vmwareapi_fake.Prop(props[0], 4)
prop_mem = vmwareapi_fake.Prop(props[1], 128)
prop_state = vmwareapi_fake.Prop(props[2], 'poweredOn')
prop_list = [prop_state, prop_mem, prop_cpu]
obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
result = vmwareapi_fake.FakeRetrieveResult()
result.add_object(obj_content)
def mock_call_method(module, method, *args, **kwargs):
raise vexc.ManagedObjectNotFoundException()
with mock.patch.object(self._session, '_call_method',
mock_call_method):
self.assertRaises(exception.InstanceNotFound,
self._vmops.get_info,
self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False):
instance_ds_ref = vmwareapi_fake.ManagedObjectReference(value='ds-1')
_vcvmops = vmops.VMwareVMOps(self._session, None, None)
result = vmwareapi_fake.FakeRetrieveResult()
if ds_ref_exists:
ds_ref = vmwareapi_fake.ManagedObjectReference(value='ds-1')
result.add_object(vmwareapi_fake.Datacenter(ds_ref=ds_ref))
else:
result.add_object(vmwareapi_fake.Datacenter(ds_ref=None))
result.add_object(vmwareapi_fake.Datacenter())
with mock.patch.object(self._session, '_call_method',
return_value=result) as fake_call:
dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref)
fake_call.assert_called_once_with(
vim_util, "get_objects", "Datacenter",
["name", "datastore", "vmFolder"])
if ds_ref_exists:
self.assertEqual(1, len(ds_util._DS_DC_MAPPING))
self.assertEqual("ha-datacenter", dc_info.name)
else:
self.assertIsNone(dc_info)
def test_get_datacenter_ref_and_name(self):
self._test_get_datacenter_ref_and_name(ds_ref_exists=True)
def test_get_datacenter_ref_and_name_with_no_datastore(self):
self._test_get_datacenter_ref_and_name()
@mock.patch('nova.image.glance.API.get')
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(ds_util, 'disk_copy')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
@mock.patch.object(vm_util, 'find_rescue_device')
@mock.patch.object(vm_util, 'get_vm_boot_spec')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'power_on_instance')
@mock.patch.object(ds_obj, 'get_datastore_by_ref')
def test_rescue(self, mock_get_ds_by_ref, mock_power_on, mock_reconfigure,
mock_get_boot_spec, mock_find_rescue,
mock_get_vm_ref, mock_disk_copy,
mock_power_off, mock_glance):
_volumeops = mock.Mock()
self._vmops._volumeops = _volumeops
ds_ref = vmwareapi_fake.ManagedObjectReference(value='fake-ref')
ds = ds_obj.Datastore(ds_ref, 'ds1')
mock_get_ds_by_ref.return_value = ds
mock_find_rescue.return_value = 'fake-rescue-device'
mock_get_boot_spec.return_value = 'fake-boot-spec'
vm_ref = vmwareapi_fake.ManagedObjectReference()
mock_get_vm_ref.return_value = vm_ref
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = ds.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] test (uuid)/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
with test.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (_get_dc_ref_and_name, fake_vmdk_info):
dc_info = mock.Mock()
_get_dc_ref_and_name.return_value = dc_info
self._vmops.rescue(
self._context, self._instance, None, self._image_meta)
mock_power_off.assert_called_once_with(self._session,
self._instance,
vm_ref)
uuid = self._instance.image_ref
cache_path = ds.build_path('vmware_base', uuid, uuid + '.vmdk')
vm_folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
rescue_path = ds.build_path(vm_folder, uuid + '-rescue.vmdk')
mock_disk_copy.assert_called_once_with(self._session, dc_info.ref,
cache_path, rescue_path)
_volumeops.attach_disk_to_vm.assert_called_once_with(vm_ref,
self._instance, mock.ANY, mock.ANY, rescue_path)
mock_get_boot_spec.assert_called_once_with(mock.ANY,
'fake-rescue-device')
mock_reconfigure.assert_called_once_with(self._session,
vm_ref,
'fake-boot-spec')
mock_power_on.assert_called_once_with(self._session,
self._instance,
vm_ref=vm_ref)
def test_unrescue_power_on(self):
self._test_unrescue(True)
def test_unrescue_power_off(self):
self._test_unrescue(False)
def _test_unrescue(self, power_on):
_volumeops = mock.Mock()
self._vmops._volumeops = _volumeops
vm_ref = mock.Mock()
def fake_call_method(module, method, *args, **kwargs):
expected_args = (vm_ref, 'config.hardware.device')
self.assertEqual('get_object_property', method)
self.assertEqual(expected_args, args)
with test.nested(
mock.patch.object(vm_util, 'power_on_instance'),
mock.patch.object(vm_util, 'find_rescue_device'),
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._session, '_call_method',
fake_call_method),
mock.patch.object(vm_util, 'power_off_instance')
) as (_power_on_instance, _find_rescue, _get_vm_ref,
_call_method, _power_off):
self._vmops.unrescue(self._instance, power_on=power_on)
if power_on:
_power_on_instance.assert_called_once_with(self._session,
self._instance, vm_ref=vm_ref)
else:
self.assertFalse(_power_on_instance.called)
_get_vm_ref.assert_called_once_with(self._session,
self._instance)
_power_off.assert_called_once_with(self._session, self._instance,
vm_ref)
_volumeops.detach_disk_from_vm.assert_called_once_with(
vm_ref, self._instance, mock.ANY, destroy_disk=True)
@mock.patch.object(time, 'sleep')
def _test_clean_shutdown(self, mock_sleep,
timeout, retry_interval,
returns_on, returns_off,
vmware_tools_status,
succeeds):
"""Test the _clean_shutdown method
:param timeout: timeout before soft shutdown is considered a fail
:param retry_interval: time between rechecking instance power state
:param returns_on: how often the instance is reported as poweredOn
:param returns_off: how often the instance is reported as poweredOff
:param vmware_tools_status: Status of vmware tools
:param succeeds: the expected result
"""
instance = self._instance
vm_ref = mock.Mock()
return_props = []
expected_methods = ['get_object_properties_dict']
props_on = {'runtime.powerState': 'poweredOn',
'summary.guest.toolsStatus': vmware_tools_status,
'summary.guest.toolsRunningStatus': 'guestToolsRunning'}
props_off = {'runtime.powerState': 'poweredOff',
'summary.guest.toolsStatus': vmware_tools_status,
'summary.guest.toolsRunningStatus': 'guestToolsRunning'}
# initialize expected instance methods and returned properties
if vmware_tools_status == "toolsOk":
if returns_on > 0:
expected_methods.append('ShutdownGuest')
for x in range(returns_on + 1):
return_props.append(props_on)
for x in range(returns_on):
expected_methods.append('get_object_properties_dict')
for x in range(returns_off):
return_props.append(props_off)
if returns_on > 0:
expected_methods.append('get_object_properties_dict')
else:
return_props.append(props_off)
def fake_call_method(module, method, *args, **kwargs):
expected_method = expected_methods.pop(0)
self.assertEqual(expected_method, method)
if expected_method == 'get_object_properties_dict':
props = return_props.pop(0)
return props
elif expected_method == 'ShutdownGuest':
return
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._session, '_call_method',
side_effect=fake_call_method)
) as (mock_get_vm_ref, mock_call_method):
result = self._vmops._clean_shutdown(instance, timeout,
retry_interval)
self.assertEqual(succeeds, result)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
def test_clean_shutdown_first_time(self):
self._test_clean_shutdown(timeout=10,
retry_interval=3,
returns_on=1,
returns_off=1,
vmware_tools_status="toolsOk",
succeeds=True)
def test_clean_shutdown_second_time(self):
self._test_clean_shutdown(timeout=10,
retry_interval=3,
returns_on=2,
returns_off=1,
vmware_tools_status="toolsOk",
succeeds=True)
def test_clean_shutdown_timeout(self):
self._test_clean_shutdown(timeout=10,
retry_interval=3,
returns_on=4,
returns_off=0,
vmware_tools_status="toolsOk",
succeeds=False)
def test_clean_shutdown_already_off(self):
self._test_clean_shutdown(timeout=10,
retry_interval=3,
returns_on=0,
returns_off=1,
vmware_tools_status="toolsOk",
succeeds=False)
def test_clean_shutdown_no_vwaretools(self):
self._test_clean_shutdown(timeout=10,
retry_interval=3,
returns_on=1,
returns_off=0,
vmware_tools_status="toolsNotOk",
succeeds=False)
def _test_finish_migration(self, power_on=True, resize_instance=False):
with test.nested(
mock.patch.object(self._vmops,
'_resize_create_ephemerals_and_swap'),
mock.patch.object(self._vmops, "_update_instance_progress"),
mock.patch.object(vm_util, "power_on_instance"),
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-ref')
) as (fake_resize_create_ephemerals_and_swap,
fake_update_instance_progress, fake_power_on, fake_get_vm_ref):
self._vmops.finish_migration(context=self._context,
migration=None,
instance=self._instance,
disk_info=None,
network_info=None,
block_device_info=None,
resize_instance=resize_instance,
image_meta=None,
power_on=power_on)
fake_resize_create_ephemerals_and_swap.assert_called_once_with(
'fake-ref', self._instance, None)
if power_on:
fake_power_on.assert_called_once_with(self._session,
self._instance,
vm_ref='fake-ref')
else:
self.assertFalse(fake_power_on.called)
calls = [
mock.call(self._context, self._instance, step=5,
total_steps=vmops.RESIZE_TOTAL_STEPS),
mock.call(self._context, self._instance, step=6,
total_steps=vmops.RESIZE_TOTAL_STEPS)]
fake_update_instance_progress.assert_has_calls(calls)
def test_finish_migration_power_on(self):
self._test_finish_migration(power_on=True, resize_instance=False)
def test_finish_migration_power_off(self):
self._test_finish_migration(power_on=False, resize_instance=False)
def test_finish_migration_power_on_resize(self):
self._test_finish_migration(power_on=True, resize_instance=True)
@mock.patch.object(vmops.VMwareVMOps, '_create_swap')
@mock.patch.object(vmops.VMwareVMOps, '_create_ephemeral')
@mock.patch.object(ds_obj, 'get_datastore_by_ref',
return_value='fake-ds-ref')
@mock.patch.object(vm_util, 'get_vmdk_info')
def _test_resize_create_ephemerals(self, vmdk, datastore,
mock_get_vmdk_info,
mock_get_datastore_by_ref,
mock_create_ephemeral,
mock_create_swap):
mock_get_vmdk_info.return_value = vmdk
dc_info = ds_util.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
with mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info) as mock_get_dc_ref_and_name:
self._vmops._resize_create_ephemerals_and_swap(
'vm-ref', self._instance, 'block-devices')
mock_get_vmdk_info.assert_called_once_with(
self._session, 'vm-ref', uuid=self._instance.uuid)
if vmdk.device:
mock_get_datastore_by_ref.assert_called_once_with(
self._session, datastore.ref)
mock_get_dc_ref_and_name.assert_called_once_with(datastore.ref)
mock_create_ephemeral.assert_called_once_with(
'block-devices', self._instance, 'vm-ref',
dc_info, 'fake-ds-ref', 'uuid', 'fake-adapter')
mock_create_swap.assert_called_once_with(
'block-devices', self._instance, 'vm-ref',
dc_info, 'fake-ds-ref', 'uuid', 'fake-adapter')
else:
self.assertFalse(mock_create_ephemeral.called)
self.assertFalse(mock_get_dc_ref_and_name.called)
self.assertFalse(mock_get_datastore_by_ref.called)
def test_resize_create_ephemerals(self):
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
self._test_resize_create_ephemerals(vmdk, datastore)
def test_resize_create_ephemerals_no_root(self):
vmdk = vm_util.VmdkInfo(None, None, None, 0, None)
self._test_resize_create_ephemerals(vmdk, None)
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vmops.VMwareVMOps, '_resize_create_ephemerals_and_swap')
@mock.patch.object(vmops.VMwareVMOps, '_remove_ephemerals_and_swap')
@mock.patch.object(ds_util, 'disk_delete')
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'file_exists',
return_value=True)
@mock.patch.object(vmops.VMwareVMOps, '_get_ds_browser',
return_value='fake-browser')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_vm_resize_spec',
return_value='fake-spec')
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
@mock.patch.object(vm_util, 'power_on_instance')
def _test_finish_revert_migration(self, fake_power_on,
fake_get_vm_ref, fake_power_off,
fake_resize_spec, fake_reconfigure_vm,
fake_get_browser,
fake_original_exists, fake_disk_move,
fake_disk_delete,
fake_remove_ephemerals_and_swap,
fake_resize_create_ephemerals_and_swap,
fake_get_extra_specs,
power_on):
"""Tests the finish_revert_migration method on vmops."""
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
dc_info = ds_util.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
extra_specs = vm_util.ExtraSpecs()
fake_get_extra_specs.return_value = extra_specs
with test.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (fake_get_dc_ref_and_name, fake_get_vmdk_info):
self._vmops._volumeops = mock.Mock()
mock_attach_disk = self._vmops._volumeops.attach_disk_to_vm
mock_detach_disk = self._vmops._volumeops.detach_disk_from_vm
self._vmops.finish_revert_migration(self._context,
instance=self._instance,
network_info=None,
block_device_info=None,
power_on=power_on)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_power_off.assert_called_once_with(self._session,
self._instance,
'fake-ref')
# Validate VM reconfiguration
metadata = ('name:fake_display_name\n'
'userid:fake_user\n'
'username:None\n'
'projectid:fake_project\n'
'projectname:None\n'
'flavor:name:m1.small\n'
'flavor:memory_mb:512\n'
'flavor:vcpus:1\n'
'flavor:ephemeral_gb:0\n'
'flavor:root_gb:10\n'
'flavor:swap:0\n'
'imageid:%s\n'
'package:%s\n' % (
uuids.image,
version.version_string_with_package()))
fake_resize_spec.assert_called_once_with(
self._session.vim.client.factory,
int(self._instance.vcpus),
int(self._instance.memory_mb),
extra_specs,
metadata=metadata)
fake_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-spec')
# Validate disk configuration
fake_get_vmdk_info.assert_called_once_with(
self._session, 'fake-ref', uuid=self._instance.uuid)
fake_get_browser.assert_called_once_with('fake-ref')
fake_original_exists.assert_called_once_with(
self._session, 'fake-browser',
ds_obj.DatastorePath(datastore.name, 'uuid'),
'original.vmdk')
mock_detach_disk.assert_called_once_with('fake-ref',
self._instance,
device)
fake_disk_delete.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/root.vmdk')
fake_disk_move.assert_called_once_with(
self._session, dc_info.ref,
'[fake] uuid/original.vmdk',
'[fake] uuid/root.vmdk')
mock_attach_disk.assert_called_once_with(
'fake-ref', self._instance, 'fake-adapter', 'fake-disk',
'[fake] uuid/root.vmdk',
disk_io_limits=extra_specs.disk_io_limits)
fake_remove_ephemerals_and_swap.assert_called_once_with('fake-ref')
fake_resize_create_ephemerals_and_swap.assert_called_once_with(
'fake-ref', self._instance, None)
if power_on:
fake_power_on.assert_called_once_with(self._session,
self._instance)
else:
self.assertFalse(fake_power_on.called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(power_on=True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(power_on=False)
def _test_find_esx_host(self, cluster_hosts, ds_hosts):
def mock_call_method(module, method, *args, **kwargs):
if args[0] == 'fake_cluster':
ret = mock.MagicMock()
ret.ManagedObjectReference = cluster_hosts
return ret
elif args[0] == 'fake_ds':
ret = mock.MagicMock()
ret.DatastoreHostMount = ds_hosts
return ret
with mock.patch.object(self._session, '_call_method',
mock_call_method):
return self._vmops._find_esx_host('fake_cluster', 'fake_ds')
def test_find_esx_host(self):
ch1 = vmwareapi_fake.ManagedObjectReference(value='host-10')
ch2 = vmwareapi_fake.ManagedObjectReference(value='host-12')
ch3 = vmwareapi_fake.ManagedObjectReference(value='host-15')
dh1 = vmwareapi_fake.DatastoreHostMount('host-8')
dh2 = vmwareapi_fake.DatastoreHostMount('host-12')
dh3 = vmwareapi_fake.DatastoreHostMount('host-17')
ret = self._test_find_esx_host([ch1, ch2, ch3], [dh1, dh2, dh3])
self.assertEqual('host-12', ret.value)
def test_find_esx_host_none(self):
ch1 = vmwareapi_fake.ManagedObjectReference(value='host-10')
ch2 = vmwareapi_fake.ManagedObjectReference(value='host-12')
ch3 = vmwareapi_fake.ManagedObjectReference(value='host-15')
dh1 = vmwareapi_fake.DatastoreHostMount('host-8')
dh2 = vmwareapi_fake.DatastoreHostMount('host-13')
dh3 = vmwareapi_fake.DatastoreHostMount('host-17')
ret = self._test_find_esx_host([ch1, ch2, ch3], [dh1, dh2, dh3])
self.assertIsNone(ret)
@mock.patch.object(vm_util, 'get_vmdk_info')
@mock.patch.object(ds_obj, 'get_datastore_by_ref')
def test_find_datastore_for_migration(self, mock_get_ds, mock_get_vmdk):
def mock_call_method(module, method, *args, **kwargs):
ds1 = vmwareapi_fake.ManagedObjectReference(value='datastore-10')
ds2 = vmwareapi_fake.ManagedObjectReference(value='datastore-12')
ds3 = vmwareapi_fake.ManagedObjectReference(value='datastore-15')
ret = mock.MagicMock()
ret.ManagedObjectReference = [ds1, ds2, ds3]
return ret
ds_ref = vmwareapi_fake.ManagedObjectReference(value='datastore-12')
vmdk_dev = mock.MagicMock()
vmdk_dev.device.backing.datastore = ds_ref
mock_get_vmdk.return_value = vmdk_dev
ds = ds_obj.Datastore(ds_ref, 'datastore1')
mock_get_ds.return_value = ds
with mock.patch.object(self._session, '_call_method',
mock_call_method):
ret = self._vmops._find_datastore_for_migration(self._instance,
'fake_vm', 'cluster_ref',
None)
self.assertIs(ds, ret)
mock_get_vmdk.assert_called_once_with(self._session, 'fake_vm',
uuid=self._instance.uuid)
mock_get_ds.assert_called_once_with(self._session, ds_ref)
@mock.patch.object(vm_util, 'get_vmdk_info')
@mock.patch.object(ds_util, 'get_datastore')
def test_find_datastore_for_migration_other(self, mock_get_ds,
mock_get_vmdk):
def mock_call_method(module, method, *args, **kwargs):
ds1 = vmwareapi_fake.ManagedObjectReference(value='datastore-10')
ds2 = vmwareapi_fake.ManagedObjectReference(value='datastore-12')
ds3 = vmwareapi_fake.ManagedObjectReference(value='datastore-15')
ret = mock.MagicMock()
ret.ManagedObjectReference = [ds1, ds2, ds3]
return ret
ds_ref = vmwareapi_fake.ManagedObjectReference(value='datastore-18')
vmdk_dev = mock.MagicMock()
vmdk_dev.device.backing.datastore = ds_ref
mock_get_vmdk.return_value = vmdk_dev
ds = ds_obj.Datastore(ds_ref, 'datastore1')
mock_get_ds.return_value = ds
with mock.patch.object(self._session, '_call_method',
mock_call_method):
ret = self._vmops._find_datastore_for_migration(self._instance,
'fake_vm', 'cluster_ref',
None)
self.assertIs(ds, ret)
mock_get_vmdk.assert_called_once_with(self._session, 'fake_vm',
uuid=self._instance.uuid)
mock_get_ds.assert_called_once_with(self._session, 'cluster_ref',
None)
@mock.patch.object(vm_util, 'relocate_vm')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_vm')
@mock.patch.object(vm_util, 'get_cluster_ref_by_name',
return_value='fake_cluster')
@mock.patch.object(vm_util, 'get_res_pool_ref', return_value='fake_pool')
@mock.patch.object(vmops.VMwareVMOps, '_find_datastore_for_migration')
@mock.patch.object(vmops.VMwareVMOps, '_find_esx_host',
return_value='fake_host')
def test_live_migration(self, mock_find_host, mock_find_datastore,
mock_get_respool, mock_get_cluster, mock_get_vm,
mock_relocate):
post_method = mock.MagicMock()
migrate_data = objects.VMwareLiveMigrateData()
migrate_data.cluster_name = 'fake-cluster'
migrate_data.datastore_regex = 'ds1|ds2'
mock_find_datastore.return_value = ds_obj.Datastore('ds_ref', 'ds')
with mock.patch.object(self._session, '_call_method',
return_value='hardware-devices'):
self._vmops.live_migration(
self._context, self._instance, 'fake-host',
post_method, None, False, migrate_data)
mock_get_vm.assert_called_once_with(self._session, self._instance)
mock_get_cluster.assert_called_once_with(self._session, 'fake-cluster')
mock_find_datastore.assert_called_once_with(self._instance, 'fake_vm',
'fake_cluster', mock.ANY)
mock_find_host.assert_called_once_with('fake_cluster', 'ds_ref')
mock_relocate.assert_called_once_with(self._session, 'fake_vm',
'fake_pool', 'ds_ref', 'fake_host',
devices=[])
post_method.assert_called_once_with(self._context, self._instance,
'fake-host', False, migrate_data)
@mock.patch.object(vmops.VMwareVMOps, '_get_instance_metadata')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_vm_resize_spec',
return_value='fake-spec')
def test_resize_vm(self, fake_resize_spec, fake_reconfigure,
fake_get_extra_specs, fake_get_metadata):
extra_specs = vm_util.ExtraSpecs()
fake_get_extra_specs.return_value = extra_specs
fake_get_metadata.return_value = self._metadata
flavor = objects.Flavor(name='m1.small',
memory_mb=1024,
vcpus=2,
extra_specs={})
self._vmops._resize_vm(self._context, self._instance, 'vm-ref', flavor,
None)
fake_get_metadata.assert_called_once_with(self._context,
self._instance,
flavor=flavor)
fake_resize_spec.assert_called_once_with(
self._session.vim.client.factory, 2, 1024, extra_specs,
metadata=self._metadata)
fake_reconfigure.assert_called_once_with(self._session,
'vm-ref', 'fake-spec')
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'disk_copy')
def test_resize_disk(self, fake_disk_copy, fake_disk_move,
fake_get_extra_specs, fake_extend):
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
self._instance.flavor.root_gb * units.Gi,
device)
dc_info = ds_util.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
with mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info) as fake_get_dc_ref_and_name:
self._vmops._volumeops = mock.Mock()
mock_attach_disk = self._vmops._volumeops.attach_disk_to_vm
mock_detach_disk = self._vmops._volumeops.detach_disk_from_vm
extra_specs = vm_util.ExtraSpecs()
fake_get_extra_specs.return_value = extra_specs
flavor = fake_flavor.fake_flavor_obj(self._context,
root_gb=self._instance.flavor.root_gb + 1)
self._vmops._resize_disk(self._instance, 'fake-ref', vmdk, flavor)
fake_get_dc_ref_and_name.assert_called_once_with(datastore.ref)
fake_disk_copy.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/root.vmdk',
'[fake] uuid/resized.vmdk')
mock_detach_disk.assert_called_once_with('fake-ref',
self._instance,
device)
fake_extend.assert_called_once_with(
self._instance, flavor['root_gb'] * units.Mi,
'[fake] uuid/resized.vmdk', dc_info.ref)
calls = [
mock.call(self._session, dc_info.ref,
'[fake] uuid/root.vmdk',
'[fake] uuid/original.vmdk'),
mock.call(self._session, dc_info.ref,
'[fake] uuid/resized.vmdk',
'[fake] uuid/root.vmdk')]
fake_disk_move.assert_has_calls(calls)
mock_attach_disk.assert_called_once_with(
'fake-ref', self._instance, 'fake-adapter', 'fake-disk',
'[fake] uuid/root.vmdk',
disk_io_limits=extra_specs.disk_io_limits)
@mock.patch.object(vm_util, 'detach_devices_from_vm')
@mock.patch.object(vm_util, 'get_swap')
@mock.patch.object(vm_util, 'get_ephemerals')
def test_remove_ephemerals_and_swap(self, get_ephemerals, get_swap,
detach_devices):
get_ephemerals.return_value = [mock.sentinel.ephemeral0,
mock.sentinel.ephemeral1]
get_swap.return_value = mock.sentinel.swap
devices = [mock.sentinel.ephemeral0, mock.sentinel.ephemeral1,
mock.sentinel.swap]
self._vmops._remove_ephemerals_and_swap(mock.sentinel.vm_ref)
detach_devices.assert_called_once_with(self._vmops._session,
mock.sentinel.vm_ref, devices)
@mock.patch.object(ds_util, 'disk_delete')
@mock.patch.object(ds_util, 'file_exists',
return_value=True)
@mock.patch.object(vmops.VMwareVMOps, '_get_ds_browser',
return_value='fake-browser')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_confirm_migration(self, fake_get_vm_ref, fake_get_browser,
fake_original_exists,
fake_disk_delete):
"""Tests the confirm_migration method on vmops."""
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
dc_info = ds_util.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
with test.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (fake_get_dc_ref_and_name, fake_get_vmdk_info):
self._vmops.confirm_migration(None,
self._instance,
None)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_get_vmdk_info.assert_called_once_with(
self._session, 'fake-ref', uuid=self._instance.uuid)
fake_get_browser.assert_called_once_with('fake-ref')
fake_original_exists.assert_called_once_with(
self._session, 'fake-browser',
ds_obj.DatastorePath(datastore.name, 'uuid'),
'original.vmdk')
fake_disk_delete.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/original.vmdk')
def test_migrate_disk_and_power_off(self):
self._test_migrate_disk_and_power_off(
flavor_root_gb=self._instance.flavor.root_gb + 1)
def test_migrate_disk_and_power_off_zero_disk_flavor(self):
self._instance.flavor.root_gb = 0
self._test_migrate_disk_and_power_off(flavor_root_gb=0)
def test_migrate_disk_and_power_off_disk_shrink(self):
self.assertRaises(exception.InstanceFaultRollback,
self._test_migrate_disk_and_power_off,
flavor_root_gb=self._instance.flavor.root_gb - 1)
@mock.patch.object(vmops.VMwareVMOps, "_remove_ephemerals_and_swap")
@mock.patch.object(vm_util, 'get_vmdk_info')
@mock.patch.object(vmops.VMwareVMOps, "_resize_disk")
@mock.patch.object(vmops.VMwareVMOps, "_resize_vm")
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(vmops.VMwareVMOps, "_update_instance_progress")
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def _test_migrate_disk_and_power_off(self, fake_get_vm_ref, fake_progress,
fake_power_off, fake_resize_vm,
fake_resize_disk, fake_get_vmdk_info,
fake_remove_ephemerals_and_swap,
flavor_root_gb):
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
self._instance.flavor.root_gb * units.Gi,
'fake-device')
fake_get_vmdk_info.return_value = vmdk
flavor = fake_flavor.fake_flavor_obj(self._context,
root_gb=flavor_root_gb)
self._vmops.migrate_disk_and_power_off(self._context,
self._instance,
None,
flavor)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_power_off.assert_called_once_with(self._session,
self._instance,
'fake-ref')
fake_resize_vm.assert_called_once_with(self._context, self._instance,
'fake-ref', flavor, mock.ANY)
fake_resize_disk.assert_called_once_with(self._instance, 'fake-ref',
vmdk, flavor)
calls = [mock.call(self._context, self._instance, step=i,
total_steps=vmops.RESIZE_TOTAL_STEPS)
for i in range(4)]
fake_progress.assert_has_calls(calls)
@mock.patch.object(vutil, 'get_inventory_path', return_value='fake_path')
@mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
@mock.patch.object(vmops.VMwareVMOps, '_create_config_drive')
def test_configure_config_drive(self,
mock_create_config_drive,
mock_attach_cdrom_to_vm,
mock_get_inventory_path):
injected_files = mock.Mock()
admin_password = mock.Mock()
network_info = mock.Mock()
vm_ref = mock.Mock()
mock_create_config_drive.return_value = "fake_iso_path"
self._vmops._configure_config_drive(
self._context, self._instance, vm_ref, self._dc_info, self._ds,
injected_files, admin_password, network_info)
upload_iso_path = self._ds.build_path("fake_iso_path")
mock_get_inventory_path.assert_called_once_with(self._session.vim,
self._dc_info.ref)
mock_create_config_drive.assert_called_once_with(
self._context, self._instance, injected_files, admin_password,
network_info, self._ds.name, 'fake_path', self._instance.uuid,
"Fake-CookieJar")
mock_attach_cdrom_to_vm.assert_called_once_with(
vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
def test_prepare_for_spawn_invalid_ram(self):
instance = self._instance.obj_clone()
flavor = objects.Flavor(vcpus=1, memory_mb=6, ephemeral_gb=1,
swap=1024, extra_specs={})
instance.flavor = flavor
self.assertRaises(exception.InstanceUnacceptable,
self._vmops.prepare_for_spawn, instance)
@mock.patch('nova.image.glance.API.get')
@mock.patch.object(vmops.LOG, 'debug')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.lockutils, 'lock')
def test_spawn_mask_block_device_info_password(self, mock_lock,
mock_build_virtual_machine, mock_get_vm_config_info,
mock_fetch_image_if_missing, mock_debug, mock_glance):
# Very simple test that just ensures block_device_info auth_password
# is masked when logged; the rest of the test just fails out early.
data = {'auth_password': 'scrubme'}
bdm = [{'boot_index': 0, 'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
self.password_logged = False
# Tests that the parameters to the to_xml method are sanitized for
# passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.password_logged = True
self.assertNotIn('scrubme', args[0])
mock_debug.side_effect = fake_debug
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
mock_vi = mock.Mock()
mock_vi.root_gb = 1
mock_vi.ii.file_size = 2 * units.Gi
mock_vi.instance.flavor.root_gb = 1
mock_get_vm_config_info.return_value = mock_vi
# Call spawn(). We don't care what it does as long as it generates
# the log message, which we check below.
with mock.patch.object(self._vmops, '_volumeops') as mock_vo:
mock_vo.attach_root_volume.side_effect = test.TestingException
try:
self._vmops.spawn(
self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi
)
except test.TestingException:
pass
# Check that the relevant log message was generated, and therefore
# that we checked it was scrubbed
self.assertTrue(self.password_logged)
def _get_metadata(self, is_image_used=True):
return ("name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.small\n"
"flavor:memory_mb:512\n"
"flavor:vcpus:1\n"
"flavor:ephemeral_gb:0\n"
"flavor:root_gb:10\n"
"flavor:swap:0\n"
"imageid:%(image_id)s\n"
"package:%(version)s\n" % {
'image_id': uuids.image if is_image_used else None,
'version': version.version_string_with_package()})
@mock.patch.object(vm_util, 'rename_vm')
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, '_use_disk_image_as_linked_clone')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(images.VMwareImage, 'from_image')
def test_spawn_non_root_block_device(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
enlist_image, fetch_image,
use_disk_image,
power_on_instance,
create_folders,
rename_vm):
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'}
connection_info2 = {'data': 'fake-data2', 'serial': 'volume-fake-id2'}
bdm = [{'connection_info': connection_info1,
'disk_bus': constants.ADAPTER_TYPE_IDE,
'mount_device': '/dev/sdb'},
{'connection_info': connection_info2,
'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'mount_device': '/dev/sdc'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
image_size = (self._instance.flavor.root_gb) * units.Gi / 2
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
with mock.patch.object(self._vmops, '_volumeops') as volumeops:
self._vmops.spawn(self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(self._context,
self._instance.image_ref,
self._image_meta)
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata())
enlist_image.assert_called_once_with(image_info.image_id,
vi.datastore, vi.dc_info.ref)
fetch_image.assert_called_once_with(self._context, vi)
use_disk_image.assert_called_once_with('fake-vm-ref', vi)
volumeops.attach_volume.assert_any_call(
connection_info1, self._instance, constants.ADAPTER_TYPE_IDE)
volumeops.attach_volume.assert_any_call(
connection_info2, self._instance,
constants.DEFAULT_ADAPTER_TYPE)
@mock.patch.object(vm_util, 'rename_vm')
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(images.VMwareImage, 'from_image')
def test_spawn_with_no_image_and_block_devices(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
power_on_instance,
create_folders,
rename_vm):
self._instance.image_ref = None
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'}
connection_info2 = {'data': 'fake-data2', 'serial': 'volume-fake-id2'}
connection_info3 = {'data': 'fake-data3', 'serial': 'volume-fake-id3'}
bdm = [{'boot_index': 0,
'connection_info': connection_info1,
'disk_bus': constants.ADAPTER_TYPE_IDE},
{'boot_index': 1,
'connection_info': connection_info2,
'disk_bus': constants.DEFAULT_ADAPTER_TYPE},
{'boot_index': 2,
'connection_info': connection_info3,
'disk_bus': constants.ADAPTER_TYPE_LSILOGICSAS}]
bdi = {'block_device_mapping': bdm}
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
image_info = mock.sentinel.image_info
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
with mock.patch.object(self._vmops, '_volumeops') as volumeops:
self._vmops.spawn(self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(self._context,
self._instance.image_ref,
self._image_meta)
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata(is_image_used=False))
volumeops.attach_root_volume.assert_called_once_with(
connection_info1, self._instance, vi.datastore.ref,
constants.ADAPTER_TYPE_IDE)
volumeops.attach_volume.assert_any_call(
connection_info2, self._instance,
constants.DEFAULT_ADAPTER_TYPE)
volumeops.attach_volume.assert_any_call(
connection_info3, self._instance,
constants.ADAPTER_TYPE_LSILOGICSAS)
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(images.VMwareImage, 'from_image')
def test_spawn_unsupported_hardware(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
power_on_instance,
create_folders):
self._instance.image_ref = None
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'boot_index': 0,
'connection_info': connection_info,
'disk_bus': 'invalid_adapter_type'}]
bdi = {'block_device_mapping': bdm}
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
image_info = mock.sentinel.image_info
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
self.assertRaises(exception.UnsupportedHardware, self._vmops.spawn,
self._context, self._instance, self._image_meta,
injected_files=None,
admin_password=None, network_info=[],
block_device_info=bdi)
from_image.assert_called_once_with(self._context,
self._instance.image_ref,
self._image_meta)
get_vm_config_info.assert_called_once_with(
self._instance, image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata(is_image_used=False))
def test_get_ds_browser(self):
cache = self._vmops._datastore_browser_mapping
ds_browser = mock.Mock()
moref = vmwareapi_fake.ManagedObjectReference(value='datastore-100')
self.assertIsNone(cache.get(moref.value))
mock_call_method = mock.Mock(return_value=ds_browser)
with mock.patch.object(self._session, '_call_method',
mock_call_method):
ret = self._vmops._get_ds_browser(moref)
mock_call_method.assert_called_once_with(vutil,
'get_object_property', moref, 'browser')
self.assertIs(ds_browser, ret)
self.assertIs(ds_browser, cache.get(moref.value))
@mock.patch.object(
vmops.VMwareVMOps, '_sized_image_exists', return_value=False)
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(vm_util, 'copy_virtual_disk')
def _test_use_disk_image_as_linked_clone(self,
mock_copy_virtual_disk,
mock_extend_virtual_disk,
mock_sized_image_exists,
flavor_fits_image=False):
extra_specs = vm_util.ExtraSpecs()
file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=file_size,
linked_clone=False)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache, extra_specs)
sized_cached_image_ds_loc = cache_root_folder.join(
"%s.%s.vmdk" % (self._image_id, vi.root_gb))
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_disk_image_as_linked_clone("fake_vm_ref", vi)
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
str(vi.cache_image_path),
str(sized_cached_image_ds_loc))
if not flavor_fits_image:
mock_extend_virtual_disk.assert_called_once_with(
self._instance, vi.root_gb * units.Mi,
str(sized_cached_image_ds_loc),
self._dc_info.ref)
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance, vi.ii.adapter_type,
vi.ii.disk_type,
str(sized_cached_image_ds_loc),
vi.root_gb * units.Mi, False,
disk_io_limits=vi._extra_specs.disk_io_limits)
def test_use_disk_image_as_linked_clone(self):
self._test_use_disk_image_as_linked_clone()
def test_use_disk_image_as_linked_clone_flavor_fits_image(self):
self._test_use_disk_image_as_linked_clone(flavor_fits_image=True)
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(vm_util, 'copy_virtual_disk')
def _test_use_disk_image_as_full_clone(self,
mock_copy_virtual_disk,
mock_extend_virtual_disk,
flavor_fits_image=False):
extra_specs = vm_util.ExtraSpecs()
file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=file_size,
linked_clone=False)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache,
extra_specs)
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_disk_image_as_full_clone("fake_vm_ref", vi)
fake_path = '[fake_ds] %(uuid)s/%(uuid)s.vmdk' % {'uuid': self._uuid}
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
str(vi.cache_image_path),
fake_path)
if not flavor_fits_image:
mock_extend_virtual_disk.assert_called_once_with(
self._instance, vi.root_gb * units.Mi,
fake_path, self._dc_info.ref)
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance, vi.ii.adapter_type,
vi.ii.disk_type, fake_path,
vi.root_gb * units.Mi, False,
disk_io_limits=vi._extra_specs.disk_io_limits)
def test_use_disk_image_as_full_clone(self):
self._test_use_disk_image_as_full_clone()
def test_use_disk_image_as_full_clone_image_too_big(self):
self._test_use_disk_image_as_full_clone(flavor_fits_image=True)
@mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
@mock.patch.object(vm_util, 'create_virtual_disk')
def _test_use_iso_image(self,
mock_create_virtual_disk,
mock_attach_cdrom,
with_root_disk):
extra_specs = vm_util.ExtraSpecs()
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=10 * units.Mi,
linked_clone=True)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache, extra_specs)
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_iso_image("fake_vm_ref", vi)
mock_attach_cdrom.assert_called_once_with(
"fake_vm_ref", self._instance, self._ds.ref,
str(vi.cache_image_path))
fake_path = '[fake_ds] %(uuid)s/%(uuid)s.vmdk' % {'uuid': self._uuid}
if with_root_disk:
mock_create_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
vi.ii.adapter_type, vi.ii.disk_type,
fake_path,
vi.root_gb * units.Mi)
linked_clone = False
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance,
vi.ii.adapter_type, vi.ii.disk_type,
fake_path,
vi.root_gb * units.Mi, linked_clone,
disk_io_limits=vi._extra_specs.disk_io_limits)
def test_use_iso_image_with_root_disk(self):
self._test_use_iso_image(with_root_disk=True)
def test_use_iso_image_without_root_disk(self):
self._test_use_iso_image(with_root_disk=False)
def _verify_spawn_method_calls(self, mock_call_method, extras=None):
# TODO(vui): More explicit assertions of spawn() behavior
# are waiting on additional refactoring pertaining to image
# handling/manipulation. Till then, we continue to assert on the
# sequence of VIM operations invoked.
expected_methods = ['get_object_property',
'SearchDatastore_Task',
'CreateVirtualDisk_Task',
'DeleteDatastoreFile_Task',
'MoveDatastoreFile_Task',
'DeleteDatastoreFile_Task',
'SearchDatastore_Task',
'ExtendVirtualDisk_Task',
]
if extras:
expected_methods.extend(extras)
# Last call should be renaming the instance
expected_methods.append('Rename_Task')
recorded_methods = [c[1][1] for c in mock_call_method.mock_calls]
self.assertEqual(expected_methods, recorded_methods)
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps._update_vnic_index')
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps._configure_config_drive')
@mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps.get_datacenter_ref_and_name')
@mock.patch('nova.virt.vmwareapi.vif.get_vif_info',
return_value=[])
@mock.patch('nova.virt.vmwareapi.vm_util.get_vm_create_spec',
return_value='fake_create_spec')
@mock.patch('nova.virt.vmwareapi.vm_util.create_vm',
return_value='fake_vm_ref')
@mock.patch('nova.virt.vmwareapi.ds_util.mkdir')
@mock.patch('nova.virt.vmwareapi.vmops.VMwareVMOps._set_machine_id')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, '_get_and_set_vnc_config')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch('nova.virt.vmwareapi.vm_util.copy_virtual_disk')
# TODO(dims): Need to add tests for create_virtual_disk after the
# disk/image code in spawn gets refactored
def _test_spawn(self,
mock_copy_virtual_disk,
mock_power_on_instance,
mock_get_and_set_vnc_config,
mock_enlist_image,
mock_set_machine_id,
mock_mkdir,
mock_create_vm,
mock_get_create_spec,
mock_get_vif_info,
mock_get_datacenter_ref_and_name,
mock_get_datastore,
mock_configure_config_drive,
mock_update_vnic_index,
mock_create_folders,
block_device_info=None,
extra_specs=None,
config_drive=False):
if extra_specs is None:
extra_specs = vm_util.ExtraSpecs()
image_size = (self._instance.flavor.root_gb) * units.Gi / 2
image = {
'id': self._image_id,
'disk_format': 'vmdk',
'size': image_size,
}
image = objects.ImageMeta.from_dict(image)
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = self._vmops._get_vm_config_info(
self._instance, image_info, extra_specs)
self._vmops._volumeops = mock.Mock()
network_info = mock.Mock()
mock_get_datastore.return_value = self._ds
mock_get_datacenter_ref_and_name.return_value = self._dc_info
mock_call_method = mock.Mock(return_value='fake_task')
if extra_specs is None:
extra_specs = vm_util.ExtraSpecs()
with test.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method',
mock_call_method),
mock.patch.object(uuidutils, 'generate_uuid',
return_value='tmp-uuid'),
mock.patch.object(images, 'fetch_image'),
mock.patch('nova.image.glance.API.get'),
mock.patch.object(vutil, 'get_inventory_path',
return_value=self._dc_info.name),
mock.patch.object(self._vmops, '_get_extra_specs',
return_value=extra_specs),
mock.patch.object(self._vmops, '_get_instance_metadata',
return_value='fake-metadata')
) as (_wait_for_task, _call_method, _generate_uuid, _fetch_image,
_get_img_svc, _get_inventory_path, _get_extra_specs,
_get_instance_metadata):
self._vmops.spawn(self._context, self._instance, image,
injected_files='fake_files',
admin_password='password',
network_info=network_info,
block_device_info=block_device_info)
self.assertEqual(2, mock_mkdir.call_count)
mock_get_vif_info.assert_called_once_with(
self._session, self._cluster.obj,
constants.DEFAULT_VIF_MODEL, network_info)
mock_get_create_spec.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
'fake_ds',
[],
extra_specs,
constants.DEFAULT_OS_TYPE,
profile_spec=None,
metadata='fake-metadata')
mock_create_vm.assert_called_once_with(
self._session,
self._instance,
'fake_vm_folder',
'fake_create_spec',
self._cluster.resourcePool)
mock_get_and_set_vnc_config.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
'fake_vm_ref')
mock_set_machine_id.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
network_info,
vm_ref='fake_vm_ref')
mock_power_on_instance.assert_called_once_with(
self._session, self._instance, vm_ref='fake_vm_ref')
if (block_device_info and
'block_device_mapping' in block_device_info):
bdms = block_device_info['block_device_mapping']
for bdm in bdms:
mock_attach_root = (
self._vmops._volumeops.attach_root_volume)
mock_attach = self._vmops._volumeops.attach_volume
adapter_type = bdm.get('disk_bus') or vi.ii.adapter_type
if bdm.get('boot_index') == 0:
mock_attach_root.assert_any_call(
bdm['connection_info'], self._instance,
self._ds.ref, adapter_type)
else:
mock_attach.assert_any_call(
bdm['connection_info'], self._instance,
self._ds.ref, adapter_type)
mock_enlist_image.assert_called_once_with(
self._image_id, self._ds, self._dc_info.ref)
upload_file_name = 'vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
self._image_id, self._image_id)
_fetch_image.assert_called_once_with(
self._context,
self._instance,
self._session._host,
self._session._port,
self._dc_info.name,
self._ds.name,
upload_file_name,
cookies='Fake-CookieJar')
self.assertGreater(len(_wait_for_task.mock_calls), 0)
_get_inventory_path.call_count = 1
extras = None
if block_device_info and ('ephemerals' in block_device_info or
'swap' in block_device_info):
extras = ['CreateVirtualDisk_Task']
self._verify_spawn_method_calls(_call_method, extras)
dc_ref = 'fake_dc_ref'
source_file = ('[fake_ds] vmware_base/%s/%s.vmdk' %
(self._image_id, self._image_id))
dest_file = ('[fake_ds] vmware_base/%s/%s.%d.vmdk' %
(self._image_id, self._image_id,
self._instance['root_gb']))
# TODO(dims): add more tests for copy_virtual_disk after
# the disk/image code in spawn gets refactored
mock_copy_virtual_disk.assert_called_with(self._session,
dc_ref,
source_file,
dest_file)
if config_drive:
mock_configure_config_drive.assert_called_once_with(
self._context, self._instance, 'fake_vm_ref',
self._dc_info, self._ds, 'fake_files', 'password',
network_info)
mock_update_vnic_index.assert_called_once_with(
self._context, self._instance, network_info)
@mock.patch.object(ds_util, 'get_datastore')
@mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name')
def _test_get_spawn_vm_config_info(self,
mock_get_datacenter_ref_and_name,
mock_get_datastore,
image_size_bytes=0):
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size_bytes,
linked_clone=True)
mock_get_datastore.return_value = self._ds
mock_get_datacenter_ref_and_name.return_value = self._dc_info
extra_specs = vm_util.ExtraSpecs()
vi = self._vmops._get_vm_config_info(self._instance, image_info,
extra_specs)
self.assertEqual(image_info, vi.ii)
self.assertEqual(self._ds, vi.datastore)
self.assertEqual(self._instance.flavor.root_gb, vi.root_gb)
self.assertEqual(self._instance, vi.instance)
self.assertEqual(self._instance.uuid, vi.instance.uuid)
self.assertEqual(extra_specs, vi._extra_specs)
cache_image_path = '[%s] vmware_base/%s/%s.vmdk' % (
self._ds.name, self._image_id, self._image_id)
self.assertEqual(cache_image_path, str(vi.cache_image_path))
cache_image_folder = '[%s] vmware_base/%s' % (
self._ds.name, self._image_id)
self.assertEqual(cache_image_folder, str(vi.cache_image_folder))
def test_get_spawn_vm_config_info(self):
image_size = (self._instance.flavor.root_gb) * units.Gi / 2
self._test_get_spawn_vm_config_info(image_size_bytes=image_size)
def test_get_spawn_vm_config_info_image_too_big(self):
image_size = (self._instance.flavor.root_gb + 1) * units.Gi
self.assertRaises(exception.InstanceUnacceptable,
self._test_get_spawn_vm_config_info,
image_size_bytes=image_size)
def test_spawn(self):
self._test_spawn()
def test_spawn_config_drive_enabled(self):
self.flags(force_config_drive=True)
self._test_spawn(config_drive=True)
def test_spawn_with_block_device_info(self):
block_device_info = {
'block_device_mapping': [{'boot_index': 0,
'connection_info': 'fake',
'mount_device': '/dev/vda'}]
}
self._test_spawn(block_device_info=block_device_info)
def test_spawn_with_block_device_info_with_config_drive(self):
self.flags(force_config_drive=True)
block_device_info = {
'block_device_mapping': [{'boot_index': 0,
'connection_info': 'fake',
'mount_device': '/dev/vda'}]
}
self._test_spawn(block_device_info=block_device_info,
config_drive=True)
def _spawn_with_block_device_info_ephemerals(self, ephemerals):
block_device_info = {'ephemerals': ephemerals}
self._test_spawn(block_device_info=block_device_info)
def test_spawn_with_block_device_info_ephemerals(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
self._spawn_with_block_device_info_ephemerals(ephemerals)
def test_spawn_with_block_device_info_ephemerals_no_disk_bus(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': None,
'device_name': '/dev/vdb',
'size': 1}]
self._spawn_with_block_device_info_ephemerals(ephemerals)
def test_spawn_with_block_device_info_swap(self):
block_device_info = {'swap': {'disk_bus': None,
'swap_size': 512,
'device_name': '/dev/sdb'}}
self._test_spawn(block_device_info=block_device_info)
@mock.patch.object(vm_util, 'rename_vm')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, '_create_and_attach_thin_disk')
@mock.patch.object(vmops.VMwareVMOps, '_use_disk_image_as_linked_clone')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(images.VMwareImage, 'from_image')
def test_spawn_with_ephemerals_and_swap(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
enlist_image,
fetch_image,
use_disk_image,
create_and_attach_thin_disk,
power_on_instance,
rename_vm):
self._instance.flavor = objects.Flavor(vcpus=1, memory_mb=512,
name="m1.tiny", root_gb=1,
ephemeral_gb=1, swap=512,
extra_specs={})
extra_specs = self._vmops._get_extra_specs(self._instance.flavor)
ephemerals = [{'device_type': 'disk',
'disk_bus': None,
'device_name': '/dev/vdb',
'size': 1},
{'device_type': 'disk',
'disk_bus': None,
'device_name': '/dev/vdc',
'size': 1}]
swap = {'disk_bus': None, 'swap_size': 512, 'device_name': '/dev/vdd'}
bdi = {'block_device_mapping': [], 'root_device_name': '/dev/sda',
'ephemerals': ephemerals, 'swap': swap}
metadata = self._vmops._get_instance_metadata(self._context,
self._instance)
self.flags(enabled=False, group='vnc')
self.flags(flat_injected=False)
image_size = (self._instance.flavor.root_gb) * units.Gi / 2
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
self._vmops.spawn(self._context, self._instance, {},
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(
self._context, self._instance.image_ref, {})
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [], extra_specs, metadata)
enlist_image.assert_called_once_with(image_info.image_id,
vi.datastore, vi.dc_info.ref)
fetch_image.assert_called_once_with(self._context, vi)
use_disk_image.assert_called_once_with('fake-vm-ref', vi)
# _create_and_attach_thin_disk should be called for each ephemeral
# and swap disk
eph0_path = str(ds_obj.DatastorePath(vi.datastore.name,
self._uuid,
'ephemeral_0.vmdk'))
eph1_path = str(ds_obj.DatastorePath(vi.datastore.name,
self._uuid,
'ephemeral_1.vmdk'))
swap_path = str(ds_obj.DatastorePath(vi.datastore.name,
self._uuid,
'swap.vmdk'))
create_and_attach_thin_disk.assert_has_calls([
mock.call(self._instance, 'fake-vm-ref', vi.dc_info,
ephemerals[0]['size'] * units.Mi, vi.ii.adapter_type,
eph0_path),
mock.call(self._instance, 'fake-vm-ref', vi.dc_info,
ephemerals[1]['size'] * units.Mi, vi.ii.adapter_type,
eph1_path),
mock.call(self._instance, 'fake-vm-ref', vi.dc_info,
swap['swap_size'] * units.Ki, vi.ii.adapter_type,
swap_path)
])
power_on_instance.assert_called_once_with(self._session,
self._instance,
vm_ref='fake-vm-ref')
def _get_fake_vi(self):
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=7,
linked_clone=False)
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock.Mock())
return vi
@mock.patch.object(vm_util, 'create_virtual_disk')
def test_create_and_attach_thin_disk(self, mock_create):
vi = self._get_fake_vi()
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
path = str(ds_obj.DatastorePath(vi.datastore.name, self._uuid,
'fake-filename'))
self._vmops._create_and_attach_thin_disk(self._instance,
'fake-vm-ref',
vi.dc_info, 1,
'fake-adapter-type',
path)
mock_create.assert_called_once_with(
self._session, self._dc_info.ref, 'fake-adapter-type',
'thin', path, 1)
mock_attach_disk_to_vm.assert_called_once_with(
'fake-vm-ref', self._instance, 'fake-adapter-type',
'thin', path, 1, False)
def test_create_ephemeral_with_bdi(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {'ephemerals': ephemerals}
vi = self._get_fake_vi()
with mock.patch.object(
self._vmops, '_create_and_attach_thin_disk') as mock_caa:
self._vmops._create_ephemeral(block_device_info,
self._instance,
'fake-vm-ref',
vi.dc_info, vi.datastore,
self._uuid,
vi.ii.adapter_type)
mock_caa.assert_called_once_with(
self._instance, 'fake-vm-ref',
vi.dc_info, 1 * units.Mi, 'virtio',
'[fake_ds] %s/ephemeral_0.vmdk' % self._uuid)
def _test_create_ephemeral_from_instance(self, bdi):
vi = self._get_fake_vi()
with mock.patch.object(
self._vmops, '_create_and_attach_thin_disk') as mock_caa:
self._vmops._create_ephemeral(bdi,
self._instance,
'fake-vm-ref',
vi.dc_info, vi.datastore,
self._uuid,
vi.ii.adapter_type)
mock_caa.assert_called_once_with(
self._instance, 'fake-vm-ref',
vi.dc_info, 1 * units.Mi, constants.DEFAULT_ADAPTER_TYPE,
'[fake_ds] %s/ephemeral_0.vmdk' % self._uuid)
def test_create_ephemeral_with_bdi_but_no_ephemerals(self):
block_device_info = {'ephemerals': []}
self._instance.flavor.ephemeral_gb = 1
self._test_create_ephemeral_from_instance(block_device_info)
def test_create_ephemeral_with_no_bdi(self):
self._instance.flavor.ephemeral_gb = 1
self._test_create_ephemeral_from_instance(None)
def _test_create_swap_from_instance(self, bdi):
vi = self._get_fake_vi()
flavor = objects.Flavor(vcpus=1, memory_mb=1024, ephemeral_gb=1,
swap=1024, extra_specs={})
self._instance.flavor = flavor
with mock.patch.object(
self._vmops, '_create_and_attach_thin_disk'
) as create_and_attach:
self._vmops._create_swap(bdi, self._instance, 'fake-vm-ref',
vi.dc_info, vi.datastore, self._uuid,
'lsiLogic')
size = flavor.swap * units.Ki
if bdi is not None:
swap = bdi.get('swap', {})
size = swap.get('swap_size', 0) * units.Ki
path = str(ds_obj.DatastorePath(vi.datastore.name, self._uuid,
'swap.vmdk'))
create_and_attach.assert_called_once_with(self._instance,
'fake-vm-ref', vi.dc_info, size, 'lsiLogic', path)
def test_create_swap_with_bdi(self):
block_device_info = {'swap': {'disk_bus': None,
'swap_size': 512,
'device_name': '/dev/sdb'}}
self._test_create_swap_from_instance(block_device_info)
def test_create_swap_with_no_bdi(self):
self._test_create_swap_from_instance(None)
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
def test_build_virtual_machine(self, mock_create_folder):
image = images.VMwareImage(image_id=self._image_id)
extra_specs = vm_util.ExtraSpecs()
vm_ref = self._vmops.build_virtual_machine(self._instance,
image, self._dc_info,
self._ds,
self.network_info,
extra_specs,
self._metadata)
vm = vmwareapi_fake.get_object(vm_ref)
# Test basic VM parameters
self.assertEqual(self._instance.uuid, vm.name)
self.assertEqual(self._instance.uuid,
vm.get('summary.config.instanceUuid'))
self.assertEqual(self._instance_values['vcpus'],
vm.get('summary.config.numCpu'))
self.assertEqual(self._instance_values['memory_mb'],
vm.get('summary.config.memorySizeMB'))
# Test NSX config
for optval in vm.get('config.extraConfig').OptionValue:
if optval.key == 'nvp.vm-uuid':
self.assertEqual(self._instance_values['uuid'], optval.value)
break
else:
self.fail('nvp.vm-uuid not found in extraConfig')
# Test that the VM is associated with the specified datastore
datastores = vm.datastore.ManagedObjectReference
self.assertEqual(1, len(datastores))
datastore = vmwareapi_fake.get_object(datastores[0])
self.assertEqual(self._ds.name, datastore.get('summary.name'))
# Test that the VM's network is configured as specified
devices = vm.get('config.hardware.device').VirtualDevice
for device in devices:
if device.obj_name != 'ns0:VirtualE1000':
continue
self.assertEqual(self._network_values['address'],
device.macAddress)
break
else:
self.fail('NIC not configured')
def test_spawn_cpu_limit(self):
cpu_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_reservation(self):
cpu_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_allocations(self):
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_shares_level(self):
cpu_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_shares_custom(self):
cpu_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_limit(self):
memory_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_reservation(self):
memory_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_allocations(self):
memory_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_shares_level(self):
memory_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_shares_custom(self):
memory_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_vif_limit(self):
vif_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_vif_reservation(self):
vif_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_vif_shares_level(self):
vif_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_vif_shares_custom(self):
vif_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self._test_spawn(extra_specs=extra_specs)
def _validate_extra_specs(self, expected, actual):
self.assertEqual(expected.cpu_limits.limit,
actual.cpu_limits.limit)
self.assertEqual(expected.cpu_limits.reservation,
actual.cpu_limits.reservation)
self.assertEqual(expected.cpu_limits.shares_level,
actual.cpu_limits.shares_level)
self.assertEqual(expected.cpu_limits.shares_share,
actual.cpu_limits.shares_share)
def _validate_flavor_extra_specs(self, flavor_extra_specs, expected):
# Validate that the extra specs are parsed correctly
flavor = objects.Flavor(name='my-flavor',
memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=flavor_extra_specs)
flavor_extra_specs = self._vmops._get_extra_specs(flavor, None)
self._validate_extra_specs(expected, flavor_extra_specs)
"""
The test covers the negative failure scenario, where `hw_video_ram`,
coming from the image is bigger than the maximum allowed video ram from
the flavor.
"""
def test_video_ram(self):
meta_dict = {'id': self._image_id, 'properties': {'hw_video_ram': 120}}
image_meta, flavor = self._get_image_and_flavor_for_test_video(
meta_dict)
self.assertRaises(exception.RequestedVRamTooHigh,
self._vmops._get_extra_specs,
flavor,
image_meta)
"""
Testing VM provisioning result in the case where `hw_video_ram`,
coming from the image is not specified. This is a success scenario,
in the case where `hw_video_ram` property is not set.
"""
def test_video_ram_if_none(self):
meta_dict = {'id': self._image_id, 'properties': {}}
image_meta, flavor = self._get_image_and_flavor_for_test_video(
meta_dict)
extra_specs = self._vmops._get_extra_specs(flavor, image_meta)
self.assertIsNone(extra_specs.hw_video_ram)
"""
Testing VM provisioning result in the case where `hw_video:ram_max_mb`,
coming from the flavor is not specified. This is a success scenario,
in the case where `hw_video_ram` property is not set.
"""
def test_max_video_ram_none(self):
meta_dict = {'id': self._image_id, 'properties': {'hw_video_ram': 120}}
image_meta = objects.ImageMeta.from_dict(meta_dict)
flavor_extra_specs = {'quota:cpu_limit': 7,
'quota:cpu_reservation': 6}
flavor = objects.Flavor(name='my-flavor',
memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=flavor_extra_specs)
self.assertRaises(exception.RequestedVRamTooHigh,
self._vmops._get_extra_specs,
flavor,
image_meta)
"""
Testing VM provisioning result in the case where `hw_video_ram`,
coming from the image is less than the maximum allowed video ram from
the flavor. This is a success scenario, in the case where `hw_video_ram`
property is set in the extra spec.
"""
def test_success_video_ram(self):
expected_video_ram = 90
meta_dict = {'id': self._image_id, 'properties': {
'hw_video_ram': expected_video_ram}}
image_meta, flavor = self._get_image_and_flavor_for_test_video(
meta_dict)
extra_specs = self._vmops._get_extra_specs(flavor, image_meta)
self.assertEqual(self._calculate_expected_fake_video_ram(
expected_video_ram), extra_specs.hw_video_ram)
"""
Testing VM provisioning result in the case where `hw_video_ram`,
coming from the image is equal to 0. This is a success scenario, in the
case where `hw_video_ram` property is not set in the extra spec.
"""
def test_zero_video_ram(self):
meta_dict = {'id': self._image_id, 'properties': {'hw_video_ram': 0}}
image_meta, flavor = self._get_image_and_flavor_for_test_video(
meta_dict)
extra_specs = self._vmops._get_extra_specs(flavor, image_meta)
self.assertIsNone(extra_specs.hw_video_ram)
def _calculate_expected_fake_video_ram(self, amount):
return amount * units.Mi / units.Ki
def _get_image_and_flavor_for_test_video(self, meta_dict):
image_meta = objects.ImageMeta.from_dict(meta_dict)
flavor_extra_specs = {'quota:cpu_limit': 7,
'quota:cpu_reservation': 6,
'hw_video:ram_max_mb': 100}
flavor = objects.Flavor(name='my-flavor',
memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=flavor_extra_specs)
return image_meta, flavor
def test_extra_specs_cpu_limit(self):
flavor_extra_specs = {'quota:cpu_limit': 7}
cpu_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_reservations(self):
flavor_extra_specs = {'quota:cpu_reservation': 7}
cpu_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_allocations(self):
flavor_extra_specs = {'quota:cpu_limit': 7,
'quota:cpu_reservation': 6}
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_shares_level(self):
flavor_extra_specs = {'quota:cpu_shares_level': 'high'}
cpu_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_shares_custom(self):
flavor_extra_specs = {'quota:cpu_shares_level': 'custom',
'quota:cpu_shares_share': 1948}
cpu_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_vif_shares_custom_pos01(self):
flavor_extra_specs = {'quota:vif_shares_level': 'custom',
'quota:vif_shares_share': 40}
vif_limits = vm_util.Limits(shares_level='custom',
shares_share=40)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_vif_shares_with_invalid_level(self):
flavor_extra_specs = {'quota:vif_shares_level': 'high',
'quota:vif_shares_share': 40}
vif_limits = vm_util.Limits(shares_level='custom',
shares_share=40)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self.assertRaises(exception.InvalidInput,
self._validate_flavor_extra_specs, flavor_extra_specs, extra_specs)
def _make_vm_config_info(self, is_iso=False, is_sparse_disk=False,
vsphere_location=None):
disk_type = (constants.DISK_TYPE_SPARSE if is_sparse_disk
else constants.DEFAULT_DISK_TYPE)
file_type = (constants.DISK_FORMAT_ISO if is_iso
else constants.DEFAULT_DISK_FORMAT)
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=10 * units.Mi,
file_type=file_type,
disk_type=disk_type,
linked_clone=True,
vsphere_location=vsphere_location)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache)
return vi
@mock.patch.object(vmops.VMwareVMOps, 'check_cache_folder')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_as_file')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_iso_image')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_sparse_image')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_flat_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_iso_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_sparse_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_flat_image')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
@mock.patch.object(vmops.VMwareVMOps, '_update_image_size')
def _test_fetch_image_if_missing(self,
mock_update_image_size,
mock_delete_datastore_file,
mock_cache_flat_image,
mock_cache_sparse_image,
mock_cache_iso_image,
mock_prepare_flat_image,
mock_prepare_sparse_image,
mock_prepare_iso_image,
mock_fetch_image_as_file,
mock_check_cache_folder,
is_iso=False,
is_sparse_disk=False):
tmp_dir_path = mock.Mock()
tmp_image_path = mock.Mock()
if is_iso:
mock_prepare = mock_prepare_iso_image
mock_cache = mock_cache_iso_image
elif is_sparse_disk:
mock_prepare = mock_prepare_sparse_image
mock_cache = mock_cache_sparse_image
else:
mock_prepare = mock_prepare_flat_image
mock_cache = mock_cache_flat_image
mock_prepare.return_value = tmp_dir_path, tmp_image_path
vi = self._make_vm_config_info(is_iso, is_sparse_disk)
self._vmops._fetch_image_if_missing(self._context, vi)
mock_check_cache_folder.assert_called_once_with(
self._ds.name, self._ds.ref)
mock_prepare.assert_called_once_with(vi)
mock_fetch_image_as_file.assert_called_once_with(
self._context, vi, tmp_image_path)
mock_cache.assert_called_once_with(vi, tmp_image_path)
mock_delete_datastore_file.assert_called_once_with(
str(tmp_dir_path), self._dc_info.ref)
if is_sparse_disk:
mock_update_image_size.assert_called_once_with(vi)
def test_fetch_image_if_missing(self):
self._test_fetch_image_if_missing()
def test_fetch_image_if_missing_with_sparse(self):
self._test_fetch_image_if_missing(
is_sparse_disk=True)
def test_fetch_image_if_missing_with_iso(self):
self._test_fetch_image_if_missing(
is_iso=True)
def test_get_esx_host_and_cookies(self):
datastore = mock.Mock()
datastore.get_connected_hosts.return_value = ['fira-host']
file_path = mock.Mock()
def fake_invoke(module, method, *args, **kwargs):
if method == 'AcquireGenericServiceTicket':
ticket = mock.Mock()
ticket.id = 'fira-ticket'
return ticket
elif method == 'get_object_property':
return 'fira-host'
with mock.patch.object(self._session, 'invoke_api', fake_invoke):
result = self._vmops._get_esx_host_and_cookies(datastore,
'ha-datacenter',
file_path)
self.assertEqual('fira-host', result[0])
cookies = result[1]
self.assertEqual(1, len(cookies))
self.assertEqual('vmware_cgi_ticket', cookies[0].name)
self.assertEqual('"fira-ticket"', cookies[0].value)
def test_fetch_vsphere_image(self):
vsphere_location = 'vsphere://my?dcPath=mycenter&dsName=mystore'
vi = self._make_vm_config_info(vsphere_location=vsphere_location)
image_ds_loc = mock.Mock()
datacenter_moref = mock.Mock()
fake_copy_task = mock.Mock()
with test.nested(
mock.patch.object(
self._session, 'invoke_api',
side_effect=[datacenter_moref, fake_copy_task]),
mock.patch.object(self._session, '_wait_for_task')) as (
invoke_api, wait_for_task):
self._vmops._fetch_vsphere_image(self._context, vi, image_ds_loc)
expected_calls = [
mock.call(
self._session.vim, 'FindByInventoryPath',
self._session.vim.service_content.searchIndex,
inventoryPath='mycenter'),
mock.call(self._session.vim, 'CopyDatastoreFile_Task',
self._session.vim.service_content.fileManager,
destinationDatacenter=self._dc_info.ref,
destinationName=str(image_ds_loc),
sourceDatacenter=datacenter_moref,
sourceName='[mystore]')]
invoke_api.assert_has_calls(expected_calls)
wait_for_task.assert_called_once_with(fake_copy_task)
@mock.patch.object(images, 'fetch_image')
@mock.patch.object(vmops.VMwareVMOps, '_get_esx_host_and_cookies')
def test_fetch_image_as_file(self,
mock_get_esx_host_and_cookies,
mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
host = mock.Mock()
dc_name = 'ha-datacenter'
cookies = mock.Mock()
mock_get_esx_host_and_cookies.return_value = host, cookies
self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
mock_get_esx_host_and_cookies.assert_called_once_with(
vi.datastore,
dc_name,
image_ds_loc.rel_path)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
host,
self._session._port,
dc_name,
self._ds.name,
image_ds_loc.rel_path,
cookies=cookies)
@mock.patch.object(vutil, 'get_inventory_path')
@mock.patch.object(images, 'fetch_image')
@mock.patch.object(vmops.VMwareVMOps, '_get_esx_host_and_cookies')
def test_fetch_image_as_file_exception(self,
mock_get_esx_host_and_cookies,
mock_fetch_image,
mock_get_inventory_path):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
dc_name = 'ha-datacenter'
mock_get_esx_host_and_cookies.side_effect = \
exception.HostNotFound(host='')
mock_get_inventory_path.return_value = self._dc_info.name
self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
mock_get_esx_host_and_cookies.assert_called_once_with(
vi.datastore,
dc_name,
image_ds_loc.rel_path)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
self._session._host,
self._session._port,
self._dc_info.name,
self._ds.name,
image_ds_loc.rel_path,
cookies='Fake-CookieJar')
@mock.patch.object(images, 'fetch_image_stream_optimized',
return_value=123)
def test_fetch_image_as_vapp(self, mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
image_ds_loc.parent.basename = 'fake-name'
self._vmops._fetch_image_as_vapp(self._context, vi, image_ds_loc)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
self._session,
'fake-name',
self._ds.name,
vi.dc_info.vmFolder,
self._vmops._root_resource_pool)
self.assertEqual(vi.ii.file_size, 123)
@mock.patch.object(images, 'fetch_image_ova', return_value=123)
def test_fetch_image_as_ova(self, mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
image_ds_loc.parent.basename = 'fake-name'
self._vmops._fetch_image_as_ova(self._context, vi, image_ds_loc)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
self._session,
'fake-name',
self._ds.name,
vi.dc_info.vmFolder,
self._vmops._root_resource_pool)
self.assertEqual(vi.ii.file_size, 123)
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_iso_image(self, mock_generate_uuid):
vi = self._make_vm_config_info(is_iso=True)
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_iso_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s.iso' % (
self._ds.name, self._image_id, self._image_id)
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
@mock.patch.object(ds_util, 'mkdir')
def test_prepare_sparse_image(self, mock_mkdir, mock_generate_uuid):
vi = self._make_vm_config_info(is_sparse_disk=True)
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_sparse_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s' % (
self._ds.name, self._image_id, "tmp-sparse.vmdk")
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
mock_mkdir.assert_called_once_with(self._session,
tmp_image_ds_loc.parent,
vi.dc_info.ref)
@mock.patch.object(ds_util, 'mkdir')
@mock.patch.object(vm_util, 'create_virtual_disk')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_flat_image(self,
mock_generate_uuid,
mock_delete_datastore_file,
mock_create_virtual_disk,
mock_mkdir):
vi = self._make_vm_config_info()
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_flat_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
self._ds.name, self._image_id, self._image_id)
expected_image_path_parent = '[%s] vmware_temp/tmp-uuid/%s' % (
self._ds.name, self._image_id)
expected_path_to_create = '[%s] vmware_temp/tmp-uuid/%s/%s.vmdk' % (
self._ds.name, self._image_id, self._image_id)
mock_mkdir.assert_called_once_with(
self._session, DsPathMatcher(expected_image_path_parent),
self._dc_info.ref)
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
image_info = vi.ii
mock_create_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
image_info.adapter_type,
image_info.disk_type,
DsPathMatcher(expected_path_to_create),
image_info.file_size_in_kb)
mock_delete_datastore_file.assert_called_once_with(
DsPathMatcher(expected_image_path),
self._dc_info.ref)
@mock.patch.object(ds_util, 'file_move')
def test_cache_iso_image(self, mock_file_move):
vi = self._make_vm_config_info(is_iso=True)
tmp_image_ds_loc = mock.Mock()
self._vmops._cache_iso_image(vi, tmp_image_ds_loc)
mock_file_move.assert_called_once_with(
self._session, self._dc_info.ref,
tmp_image_ds_loc.parent,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
@mock.patch.object(ds_util, 'file_move')
def test_cache_flat_image(self, mock_file_move):
vi = self._make_vm_config_info()
tmp_image_ds_loc = mock.Mock()
self._vmops._cache_flat_image(vi, tmp_image_ds_loc)
mock_file_move.assert_called_once_with(
self._session, self._dc_info.ref,
tmp_image_ds_loc.parent,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'mkdir')
def test_cache_stream_optimized_image(self, mock_mkdir, mock_disk_move):
vi = self._make_vm_config_info()
self._vmops._cache_stream_optimized_image(vi, mock.sentinel.tmp_image)
mock_mkdir.assert_called_once_with(
self._session,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id),
self._dc_info.ref)
mock_disk_move.assert_called_once_with(
self._session, self._dc_info.ref,
mock.sentinel.tmp_image,
DsPathMatcher('[fake_ds] vmware_base/%s/%s.vmdk' %
(self._image_id, self._image_id)))
@mock.patch.object(ds_util, 'file_move')
@mock.patch.object(vm_util, 'copy_virtual_disk')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
def test_cache_sparse_image(self,
mock_delete_datastore_file,
mock_copy_virtual_disk,
mock_file_move):
vi = self._make_vm_config_info(is_sparse_disk=True)
sparse_disk_path = "[%s] vmware_temp/tmp-uuid/%s/tmp-sparse.vmdk" % (
self._ds.name, self._image_id)
tmp_image_ds_loc = ds_obj.DatastorePath.parse(sparse_disk_path)
self._vmops._cache_sparse_image(vi, tmp_image_ds_loc)
target_disk_path = "[%s] vmware_temp/tmp-uuid/%s/%s.vmdk" % (
self._ds.name,
self._image_id, self._image_id)
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
sparse_disk_path,
DsPathMatcher(target_disk_path))
def test_get_storage_policy_none(self):
flavor = objects.Flavor(name='m1.small',
memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
self.flags(pbm_enabled=True,
pbm_default_policy='fake-policy', group='vmware')
extra_specs = self._vmops._get_extra_specs(flavor, None)
self.assertEqual('fake-policy', extra_specs.storage_policy)
def test_get_storage_policy_extra_specs(self):
extra_specs = {'vmware:storage_policy': 'flavor-policy'}
flavor = objects.Flavor(name='m1.small',
memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=extra_specs)
self.flags(pbm_enabled=True,
pbm_default_policy='default-policy', group='vmware')
extra_specs = self._vmops._get_extra_specs(flavor, None)
self.assertEqual('flavor-policy', extra_specs.storage_policy)
def test_get_base_folder_not_set(self):
self.flags(subdirectory_name='vmware_base', group='image_cache')
base_folder = self._vmops._get_base_folder()
self.assertEqual('vmware_base', base_folder)
def test_get_base_folder_host_ip(self):
self.flags(my_ip='7.7.7.7')
self.flags(subdirectory_name='_base', group='image_cache')
base_folder = self._vmops._get_base_folder()
self.assertEqual('7.7.7.7_base', base_folder)
def test_get_base_folder_cache_prefix(self):
self.flags(cache_prefix='my_prefix', group='vmware')
self.flags(subdirectory_name='_base', group='image_cache')
base_folder = self._vmops._get_base_folder()
self.assertEqual('my_prefix_base', base_folder)
def _test_reboot_vm(self, reboot_type="SOFT", tool_status=True):
expected_methods = ['get_object_properties_dict']
if reboot_type == "SOFT":
expected_methods.append('RebootGuest')
else:
expected_methods.append('ResetVM_Task')
def fake_call_method(module, method, *args, **kwargs):
expected_method = expected_methods.pop(0)
self.assertEqual(expected_method, method)
if expected_method == 'get_object_properties_dict' and tool_status:
return {
"runtime.powerState": "poweredOn",
"summary.guest.toolsStatus": "toolsOk",
"summary.guest.toolsRunningStatus": "guestToolsRunning"}
elif expected_method == 'get_object_properties_dict':
return {"runtime.powerState": "poweredOn"}
elif expected_method == 'ResetVM_Task':
return 'fake-task'
with test.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(self._session, "_call_method",
fake_call_method),
mock.patch.object(self._session, "_wait_for_task")
) as (_get_vm_ref, fake_call_method, _wait_for_task):
self._vmops.reboot(self._instance, self.network_info, reboot_type)
_get_vm_ref.assert_called_once_with(self._session,
self._instance)
if reboot_type == "HARD":
_wait_for_task.assert_has_calls([
mock.call('fake-task')])
def test_reboot_vm_soft(self):
self._test_reboot_vm()
def test_reboot_vm_hard_toolstatus(self):
self._test_reboot_vm(reboot_type="HARD", tool_status=False)
def test_reboot_vm_hard(self):
self._test_reboot_vm(reboot_type="HARD")
def test_get_instance_metadata(self):
flavor = objects.Flavor(id=7,
name='m1.small',
memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
self._instance.flavor = flavor
metadata = self._vmops._get_instance_metadata(
self._context, self._instance)
expected = ("name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.small\n"
"flavor:memory_mb:8\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
"flavor:swap:33550336\n"
"imageid:%s\n"
"package:%s\n" % (
uuids.image,
version.version_string_with_package()))
self.assertEqual(expected, metadata)
def test_get_instance_metadata_flavor(self):
# Construct a flavor different from instance.flavor
flavor_int_meta_fields = ['memory_mb',
'vcpus',
'root_gb',
'ephemeral_gb',
'swap']
flavor = self._instance.flavor.obj_clone()
for field in flavor_int_meta_fields:
# Set int fields of flavor to instance.flavor value + 1
setattr(flavor, field, getattr(self._instance.flavor, field) + 1)
flavor.name = self._instance.flavor.name + '1'
metadata = self._vmops._get_instance_metadata(
self._context, self._instance, flavor)
# Verify metadata contains the values from flavor parameter
meta_lines = metadata.split('\n')
flavor_meta_fields = flavor_int_meta_fields[:]
flavor_meta_fields.append('name')
for field in flavor_meta_fields:
meta_repr = 'flavor:%s:%s' % (field, getattr(flavor, field))
self.assertIn(meta_repr, meta_lines)
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_network_attach_config_spec',
return_value='fake-attach-spec')
@mock.patch.object(vm_util, 'get_attach_port_index', return_value=1)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_attach_interface(self, mock_get_vm_ref,
mock_get_attach_port_index,
mock_get_network_attach_config_spec,
mock_reconfigure_vm,
mock_extra_specs):
_network_api = mock.Mock()
self._vmops._network_api = _network_api
vif_info = vif.get_vif_dict(self._session, self._cluster,
'VirtualE1000', self._network_values)
extra_specs = vm_util.ExtraSpecs()
mock_extra_specs.return_value = extra_specs
self._vmops.attach_interface(self._context, self._instance,
self._image_meta, self._network_values)
mock_get_vm_ref.assert_called_once_with(self._session, self._instance)
mock_get_attach_port_index.assert_called_once_with(self._session,
'fake-ref')
mock_get_network_attach_config_spec.assert_called_once_with(
self._session.vim.client.factory, vif_info, 1,
extra_specs.vif_limits)
mock_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-attach-spec')
_network_api.update_instance_vnic_index.assert_called_once_with(
mock.ANY, self._instance, self._network_values, 1)
@mock.patch.object(vif, 'get_network_device', return_value='device')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_network_detach_config_spec',
return_value='fake-detach-spec')
@mock.patch.object(vm_util, 'get_vm_detach_port_index', return_value=1)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_detach_interface(self, mock_get_vm_ref,
mock_get_detach_port_index,
mock_get_network_detach_config_spec,
mock_reconfigure_vm,
mock_get_network_device):
_network_api = mock.Mock()
self._vmops._network_api = _network_api
with mock.patch.object(self._session, '_call_method',
return_value='hardware-devices'):
self._vmops.detach_interface(self._context, self._instance,
self._network_values)
mock_get_vm_ref.assert_called_once_with(self._session, self._instance)
mock_get_detach_port_index.assert_called_once_with(self._session,
'fake-ref', None)
mock_get_network_detach_config_spec.assert_called_once_with(
self._session.vim.client.factory, 'device', 1)
mock_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-detach-spec')
_network_api.update_instance_vnic_index.assert_called_once_with(
mock.ANY, self._instance, self._network_values, None)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_get_mks_console(self, mock_get_vm_ref):
ticket = mock.MagicMock()
ticket.host = 'esx1'
ticket.port = 902
ticket.ticket = 'fira'
ticket.sslThumbprint = 'aa:bb:cc:dd:ee:ff'
ticket.cfgFile = '[ds1] fira/foo.vmx'
with mock.patch.object(self._session, '_call_method',
return_value=ticket):
console = self._vmops.get_mks_console(self._instance)
self.assertEqual('esx1', console.host)
self.assertEqual(902, console.port)
path = jsonutils.loads(console.internal_access_path)
self.assertEqual('fira', path['ticket'])
self.assertEqual('aabbccddeeff', path['thumbprint'])
self.assertEqual('[ds1] fira/foo.vmx', path['cfgFile'])
def test_get_cores_per_socket(self):
extra_specs = {'hw:cpu_sockets': 7}
flavor = objects.Flavor(name='m1.small',
memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=extra_specs)
extra_specs = self._vmops._get_extra_specs(flavor, None)
self.assertEqual(4, int(extra_specs.cores_per_socket))
def test_get_folder_name(self):
uuid = uuidutils.generate_uuid()
name = 'fira'
expected = 'fira (%s)' % uuid
folder_name = self._vmops._get_folder_name(name, uuid)
self.assertEqual(expected, folder_name)
name = 'X' * 255
expected = '%s (%s)' % ('X' * 40, uuid)
folder_name = self._vmops._get_folder_name(name, uuid)
self.assertEqual(expected, folder_name)
self.assertEqual(79, len(folder_name))
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_network_attach_config_spec',
return_value='fake-attach-spec')
@mock.patch.object(vm_util, 'get_attach_port_index', return_value=1)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_attach_interface_with_limits(self, mock_get_vm_ref,
mock_get_attach_port_index,
mock_get_network_attach_config_spec,
mock_reconfigure_vm,
mock_extra_specs):
_network_api = mock.Mock()
self._vmops._network_api = _network_api
vif_info = vif.get_vif_dict(self._session, self._cluster,
'VirtualE1000', self._network_values)
vif_limits = vm_util.Limits(shares_level='custom',
shares_share=40)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
mock_extra_specs.return_value = extra_specs
self._vmops.attach_interface(self._context, self._instance,
self._image_meta,
self._network_values)
mock_get_vm_ref.assert_called_once_with(self._session, self._instance)
mock_get_attach_port_index.assert_called_once_with(self._session,
'fake-ref')
mock_get_network_attach_config_spec.assert_called_once_with(
self._session.vim.client.factory, vif_info, 1,
extra_specs.vif_limits)
mock_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-attach-spec')
_network_api.update_instance_vnic_index.assert_called_once_with(
mock.ANY, self._instance, self._network_values, 1)
| 48.352665
| 79
| 0.582192
|
adce03f2f3fc91dd267ac242fdcee4f18ea7a73e
| 1,924
|
py
|
Python
|
CountFingers_v2.py
|
VDHARV/hand-tracking
|
03653f5b0a0a6f0f362047d86c94b0624e8e6a43
|
[
"MIT"
] | null | null | null |
CountFingers_v2.py
|
VDHARV/hand-tracking
|
03653f5b0a0a6f0f362047d86c94b0624e8e6a43
|
[
"MIT"
] | null | null | null |
CountFingers_v2.py
|
VDHARV/hand-tracking
|
03653f5b0a0a6f0f362047d86c94b0624e8e6a43
|
[
"MIT"
] | null | null | null |
import cv2
from HandDetector import HandDetector
def CountFingersV2(detector, vid):
detector = HandDetector(detectionCon = 0.75, trackCon = 0.75)
vid = cv2.VideoCapture(0)
i = 0
while True:
success, img = vid.read()
img = detector.find_hands(img)
landmark_list = detector.find_position(img)
if landmark_list:
fingers = [(landmark_list[8][2], landmark_list[7][2], 0),
(landmark_list[12][2], landmark_list[11][2], 1),
(landmark_list[16][2], landmark_list[15][2], 2),
(landmark_list[20][2], landmark_list[19][2], 3),
(landmark_list[5][1], landmark_list[4][1], 4)]
finger_up = [0, 0, 0, 0, 0]
for finger in fingers:
if finger[0] < finger[1]:
finger_up[finger[2]] = 1
if finger_up[0]:
if finger_up[1]:
if finger_up[2]:
if finger_up[3]:
if finger_up[4]:
cv2.putText(img, '5', (500, 500), cv2.FONT_HERSHEY_TRIPLEX, 5, (250, 0, 250), 3)
else:
cv2.putText(img, '4', (500, 500), cv2.FONT_HERSHEY_TRIPLEX, 5, (250, 0, 250), 3)
else:
cv2.putText(img, '3', (500, 500), cv2.FONT_HERSHEY_TRIPLEX, 5, (250, 0, 250), 3)
else:
cv2.putText(img, '2', (500, 500), cv2.FONT_HERSHEY_TRIPLEX, 5, (250, 0, 250), 3)
else:
cv2.putText(img, '1', (500, 500), cv2.FONT_HERSHEY_TRIPLEX, 5, (250, 0, 250), 3)
else:
cv2.putText(img, '0', (500, 500), cv2.FONT_HERSHEY_TRIPLEX, 5, (250, 0, 250), 3)
cv2.imshow('Finger Count', img)
if cv2.waitKey(1) and 0xFF == ord('q'):
break
| 45.809524
| 112
| 0.476611
|
de21d33245a062cdc438482f3d2e51c3d1e0a7cc
| 510
|
py
|
Python
|
thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t019lexer.py
|
mail2nsrajesh/congress
|
a724dfb59c43a5e88e2b03e714a5f962d6976762
|
[
"Apache-2.0"
] | 3,266
|
2017-08-06T16:51:46.000Z
|
2022-03-30T07:34:24.000Z
|
thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t019lexer.py
|
mail2nsrajesh/congress
|
a724dfb59c43a5e88e2b03e714a5f962d6976762
|
[
"Apache-2.0"
] | 150
|
2017-08-28T14:59:36.000Z
|
2022-03-11T23:21:35.000Z
|
thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t019lexer.py
|
mail2nsrajesh/congress
|
a724dfb59c43a5e88e2b03e714a5f962d6976762
|
[
"Apache-2.0"
] | 1,449
|
2017-08-06T17:40:59.000Z
|
2022-03-31T12:03:24.000Z
|
import os
import antlr3
import testbase
import unittest
class t019lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid(self):
inputPath = os.path.splitext(__file__)[0] + '.input'
stream = antlr3.StringStream(open(inputPath).read())
lexer = self.getLexer(stream)
while True:
token = lexer.nextToken()
if token.type == antlr3.EOF:
break
if __name__ == '__main__':
unittest.main()
| 22.173913
| 60
| 0.605882
|
8f6a26173e6f16cb64da1a758c8a68859a440133
| 22,250
|
py
|
Python
|
python/backup/_app.py
|
katie0809/2021AiHub-ODQA
|
a6377efd336217afab5de6797e0449ebce5837a2
|
[
"MIT"
] | null | null | null |
python/backup/_app.py
|
katie0809/2021AiHub-ODQA
|
a6377efd336217afab5de6797e0449ebce5837a2
|
[
"MIT"
] | null | null | null |
python/backup/_app.py
|
katie0809/2021AiHub-ODQA
|
a6377efd336217afab5de6797e0449ebce5837a2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow_addons as tfa
import os
import re
import numpy as np
import pandas as pd
import pickle
import random
import collections
import json
from datetime import datetime
import sentencepiece as spm
from flask import Flask, request
from flask_api import status
random_seed = 1234
random.seed(random_seed)
np.random.seed(random_seed)
tf.random.set_seed(random_seed)
class Config(dict):
"""
json을 config 형태로 사용하기 위한 Class
:param dict: config dictionary
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
args = Config({
'max_seq_length': 384,
'max_query_length': 64,
})
def get_vocab():
vocab = spm.SentencePieceProcessor()
vocab.load(
f"../models/ko_32000.model")
return vocab
vocab = get_vocab()
# 유틸리티 함수들
def get_pad_mask(tokens, i_pad=0):
"""
pad mask 계산하는 함수
:param tokens: tokens (bs, n_seq)
:param i_pad: id of pad
:return mask: pad mask (pad: 1, other: 0)
"""
mask = tf.cast(tf.math.equal(tokens, i_pad), tf.float32)
mask = tf.expand_dims(mask, axis=1)
return mask
def get_ahead_mask(tokens, i_pad=0):
"""
ahead mask 계산하는 함수
:param tokens: tokens (bs, n_seq)
:param i_pad: id of pad
:return mask: ahead and pad mask (ahead or pad: 1, other: 0)
"""
n_seq = tf.shape(tokens)[1]
ahead_mask = 1 - tf.linalg.band_part(tf.ones((n_seq, n_seq)), -1, 0)
ahead_mask = tf.expand_dims(ahead_mask, axis=0)
pad_mask = get_pad_mask(tokens, i_pad)
mask = tf.maximum(ahead_mask, pad_mask)
return mask
@tf.function(experimental_relax_shapes=True)
def gelu(x):
"""
gelu activation 함수
:param x: 입력 값
:return: gelu activation result
"""
return 0.5 * x * (1 + K.tanh(x * 0.7978845608 * (1 + 0.044715 * x * x)))
def kernel_initializer(stddev=0.02):
"""
parameter initializer 생성
:param stddev: 생성할 랜덤 변수의 표준편차
"""
return tf.keras.initializers.TruncatedNormal(stddev=stddev)
def bias_initializer():
"""
bias initializer 생성
"""
return tf.zeros_initializer
class Config(dict):
"""
json을 config 형태로 사용하기 위한 Class
:param dict: config dictionary
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
@classmethod
def load(cls, file):
"""
file에서 Config를 생성 함
:param file: filename
"""
with open(file, 'r') as f:
config = json.loads(f.read())
return Config(config)
# mode == "embedding" 일 경우 Token Embedding Layer 로 사용되는 layer 클래스입니다.
class SharedEmbedding(tf.keras.layers.Layer):
"""
Weighed Shared Embedding Class
"""
def __init__(self, config, name="weight_shared_embedding"):
"""
생성자
:param config: Config 객체
:param name: layer name
"""
super().__init__(name=name)
self.n_vocab = config.n_vocab
self.d_model = config.d_model
def build(self, input_shape):
"""
shared weight 생성
:param input_shape: Tensor Shape (not used)
"""
with tf.name_scope("shared_embedding_weight"):
self.shared_weights = self.add_weight(
"weights",
shape=[self.n_vocab, self.d_model],
initializer=kernel_initializer()
)
def call(self, inputs, mode="embedding"):
"""
layer 실행
:param inputs: 입력
:param mode: 실행 모드
:return: embedding or linear 실행 결과
"""
# mode가 embedding일 경우 embedding lookup 실행
if mode == "embedding":
return self._embedding(inputs)
# mode가 linear일 경우 linear 실행
elif mode == "linear":
return self._linear(inputs)
# mode가 기타일 경우 오류 발생
else:
raise ValueError(f"mode {mode} is not valid.")
def _embedding(self, inputs):
"""
embedding lookup
:param inputs: 입력
"""
embed = tf.gather(self.shared_weights, tf.cast(inputs, tf.int32))
return embed
def _linear(self, inputs): # (bs, n_seq, d_model)
"""
linear 실행
:param inputs: 입력
"""
n_batch = tf.shape(inputs)[0]
n_seq = tf.shape(inputs)[1]
# (bs * n_seq, d_model)
inputs = tf.reshape(inputs, [-1, self.d_model])
outputs = tf.matmul(inputs, self.shared_weights, transpose_b=True)
# (bs, n_seq, n_vocab)
outputs = tf.reshape(outputs, [n_batch, n_seq, self.n_vocab])
return outputs
class PositionalEmbedding(tf.keras.layers.Layer):
"""
Positional Embedding Class
"""
def __init__(self, config, name="position_embedding"):
"""
생성자
:param config: Config 객체
:param name: layer name
"""
super().__init__(name=name)
self.embedding = tf.keras.layers.Embedding(
config.n_seq, config.d_model, embeddings_initializer=kernel_initializer())
def call(self, inputs):
"""
layer 실행
:param inputs: 입력
:return embed: positional embedding lookup 결과
"""
position = tf.cast(tf.math.cumsum(tf.ones_like(
inputs), axis=1, exclusive=True), tf.int32)
embed = self.embedding(position)
return embed
class ScaleDotProductAttention(tf.keras.layers.Layer):
"""
Scale Dot Product Attention Class
"""
def __init__(self, name="scale_dot_product_attention"):
"""
생성자
:param name: layer name
"""
super().__init__(name=name)
def call(self, Q, K, V, attn_mask):
"""
layer 실행
:param Q: Q value
:param K: K value
:param V: V value
:param attn_mask: 실행 모드
:return attn_out: attention 실행 결과
"""
attn_score = tf.matmul(Q, K, transpose_b=True)
scale = tf.math.sqrt(tf.cast(tf.shape(K)[-1], tf.float32))
attn_scale = tf.math.divide(attn_score, scale)
attn_scale -= 1.e9 * attn_mask
attn_prob = tf.nn.softmax(attn_scale, axis=-1)
attn_out = tf.matmul(attn_prob, V)
return attn_out
class MultiHeadAttention(tf.keras.layers.Layer):
"""
Multi Head Attention Class
"""
def __init__(self, config, name="multi_head_attention"):
"""
생성자
:param config: Config 객체
:param name: layer name
"""
super().__init__(name=name)
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
# Q, K, V input dense layer
self.W_Q = tf.keras.layers.Dense(
config.n_head * config.d_head, kernel_initializer=kernel_initializer(), bias_initializer=bias_initializer())
self.W_K = tf.keras.layers.Dense(
config.n_head * config.d_head, kernel_initializer=kernel_initializer(), bias_initializer=bias_initializer())
self.W_V = tf.keras.layers.Dense(
config.n_head * config.d_head, kernel_initializer=kernel_initializer(), bias_initializer=bias_initializer())
# Scale Dot Product Attention class
self.attention = ScaleDotProductAttention(name="self_attention")
# output dense layer
self.W_O = tf.keras.layers.Dense(
config.d_model, kernel_initializer=kernel_initializer(), bias_initializer=bias_initializer())
def call(self, Q, K, V, attn_mask):
"""
layer 실행
:param Q: Q value
:param K: K value
:param V: V value
:param attn_mask: 실행 모드
:return attn_out: attention 실행 결과
"""
# reshape Q, K, V, attn_mask
batch_size = tf.shape(Q)[0]
Q_m = tf.transpose(tf.reshape(self.W_Q(Q), [
batch_size, -1, self.n_head, self.d_head]), [0, 2, 1, 3]) # (bs, n_head, Q_len, d_head)
K_m = tf.transpose(tf.reshape(self.W_K(K), [
batch_size, -1, self.n_head, self.d_head]), [0, 2, 1, 3]) # (bs, n_head, K_len, d_head)
V_m = tf.transpose(tf.reshape(self.W_V(V), [
batch_size, -1, self.n_head, self.d_head]), [0, 2, 1, 3]) # (bs, n_head, K_len, d_head)
attn_mask_m = tf.expand_dims(attn_mask, axis=1)
# Scale Dot Product Attention with multi head Q, K, V, attn_mask
# (bs, n_head, Q_len, d_head)
attn_out = self.attention(Q_m, K_m, V_m, attn_mask_m)
# transpose and liner
# (bs, Q_len, n_head, d_head)
attn_out_m = tf.transpose(attn_out, perm=[0, 2, 1, 3])
# (bs, Q_len, d_model)
attn_out = tf.reshape(
attn_out_m, [batch_size, -1, config.n_head * config.d_head])
attn_out = self.W_O(attn_out) # (bs, Q_len, d_model)
return attn_out
class PositionWiseFeedForward(tf.keras.layers.Layer):
"""
Position Wise Feed Forward Class
"""
def __init__(self, config, name="feed_forward"):
"""
생성자
:param config: Config 객체
:param name: layer name
"""
super().__init__(name=name)
self.W_1 = tf.keras.layers.Dense(
config.d_ff, activation=gelu, kernel_initializer=kernel_initializer(), bias_initializer=bias_initializer())
self.W_2 = tf.keras.layers.Dense(
config.d_model, kernel_initializer=kernel_initializer(), bias_initializer=bias_initializer())
def call(self, inputs):
"""
layer 실행
:param inputs: inputs
:return ff_val: feed forward 실행 결과
"""
ff_val = self.W_2(self.W_1(inputs))
return ff_val
class EncoderLayer(tf.keras.layers.Layer):
"""
Encoder Layer Class
"""
def __init__(self, config, name="encoder_layer"):
"""
생성자
:param config: Config 객체
:param name: layer name
"""
super().__init__(name=name)
self.self_attention = MultiHeadAttention(config)
self.norm1 = tf.keras.layers.LayerNormalization(
epsilon=config.layernorm_epsilon)
self.ffn = PositionWiseFeedForward(config)
self.norm2 = tf.keras.layers.LayerNormalization(
epsilon=config.layernorm_epsilon)
self.dropout = tf.keras.layers.Dropout(config.dropout)
def call(self, enc_embed, self_mask):
"""
layer 실행
:param enc_embed: enc_embed 또는 이전 EncoderLayer의 출력
:param self_mask: enc_tokens의 pad mask
:return enc_out: EncoderLayer 실행 결과
"""
self_attn_val = self.self_attention(
enc_embed, enc_embed, enc_embed, self_mask)
norm1_val = self.norm1(enc_embed + self.dropout(self_attn_val))
ffn_val = self.ffn(norm1_val)
enc_out = self.norm2(norm1_val + self.dropout(ffn_val))
return enc_out
class BERT(tf.keras.layers.Layer):
"""
BERT Class
"""
def __init__(self, config, name="bert"):
"""
생성자
:param config: Config 객체
:param name: layer name
"""
super().__init__(name=name)
self.i_pad = config.i_pad
self.embedding = SharedEmbedding(config)
self.position = PositionalEmbedding(config)
self.segment = tf.keras.layers.Embedding(
2, config.d_model, embeddings_initializer=kernel_initializer())
self.norm = tf.keras.layers.LayerNormalization(
epsilon=config.layernorm_epsilon)
self.encoder_layers = [EncoderLayer(
config, name=f"encoder_layer_{i}") for i in range(config.n_layer)]
self.dropout = tf.keras.layers.Dropout(config.dropout)
def call(self, enc_tokens, segments):
"""
layer 실행
:param enc_tokens: encoder tokens
:param segments: token segments
:return logits_cls: CLS 결과 logits
:return logits_lm: LM 결과 logits
"""
enc_self_mask = get_pad_mask(enc_tokens, self.i_pad)
enc_embed = self.get_embedding(enc_tokens, segments)
enc_out = self.dropout(enc_embed)
for encoder_layer in self.encoder_layers:
enc_out = encoder_layer(enc_out, enc_self_mask)
logits_cls = enc_out[:, 0]
logits_lm = enc_out
return logits_cls, logits_lm
def get_embedding(self, tokens, segments):
"""
token embedding, position embedding lookup
:param tokens: 입력 tokens
:param segments: 입력 segments
:return embed: embedding 결과
"""
embed = self.embedding(tokens) + \
self.position(tokens) + self.segment(segments)
embed = self.norm(embed)
return embed
class BERT4KorQuAD(tf.keras.Model):
def __init__(self, config):
super().__init__(name='BERT4KorQuAD')
self.bert = BERT(config)
self.dense = tf.keras.layers.Dense(2)
def call(self, enc_tokens, segments):
logits_cls, logits_lm = self.bert(enc_tokens, segments)
hidden = self.dense(logits_lm) # (bs, n_seq, 2)
start_logits, end_logits = tf.split(
hidden, 2, axis=-1) # (bs, n_seq, 1), (bs, n_seq, 1)
start_logits = tf.squeeze(start_logits, axis=-1)
start_outputs = tf.keras.layers.Softmax(name="start")(start_logits)
end_logits = tf.squeeze(end_logits, axis=-1)
end_outputs = tf.keras.layers.Softmax(name="end")(end_logits)
return start_outputs, end_outputs
config = Config({"d_model": 256, "n_head": 4, "d_head": 64, "dropout": 0.1, "d_ff": 1024,
"layernorm_epsilon": 0.001, "n_layer": 3, "n_seq": 384, "n_vocab": 0, "i_pad": 0})
config.n_vocab = len(vocab)
config.i_pad = vocab.pad_id()
config
bert_batch_size = 32
model = BERT4KorQuAD(config)
def train_epoch(model, dataset, loss_fn, acc_fn, optimizer):
metric_start_loss = tf.keras.metrics.Mean(name='start_loss')
metric_end_loss = tf.keras.metrics.Mean(name='end_loss')
metric_start_acc = tf.keras.metrics.Mean(name='start_acc')
metric_end_acc = tf.keras.metrics.Mean(name='end_acc')
p_bar = dataset
for batch, ((enc_tokens, segments), (start_labels, end_labels)) in enumerate(p_bar):
with tf.GradientTape() as tape:
start_outputs, end_outputs = model(enc_tokens, segments)
start_loss = loss_fn(start_labels, start_outputs)
end_loss = loss_fn(end_labels, end_outputs)
loss = start_loss + end_loss
start_acc = acc_fn(start_labels, start_outputs)
end_acc = acc_fn(end_labels, end_outputs)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
metric_start_loss(start_loss)
metric_end_loss(end_loss)
metric_start_acc(start_acc)
metric_end_acc(end_acc)
return metric_start_loss.result(), metric_end_loss.result(), metric_start_acc.result(), metric_end_acc.result()
def eval_epoch(model, dataset, loss_fn, acc_fn):
metric_start_loss = tf.keras.metrics.Mean(name='start_loss')
metric_end_loss = tf.keras.metrics.Mean(name='end_loss')
metric_start_acc = tf.keras.metrics.Mean(name='start_acc')
metric_end_acc = tf.keras.metrics.Mean(name='end_acc')
for batch, ((enc_tokens, segments), (start_labels, end_labels)) in enumerate(dataset):
start_outputs, end_outputs = model(enc_tokens, segments)
start_loss = loss_fn(start_labels, start_outputs)
end_loss = loss_fn(end_labels, end_outputs)
start_acc = acc_fn(start_labels, start_outputs)
end_acc = acc_fn(end_labels, end_outputs)
metric_start_loss(start_loss)
metric_end_loss(end_loss)
metric_start_acc(start_acc)
metric_end_acc(end_acc)
return metric_start_loss.result(), metric_end_loss.result(), metric_start_acc.result(), metric_end_acc.result()
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def _improve_span(vocab, context_tokens, token_start, token_end, char_answer):
token_answer = " ".join(vocab.encode_as_pieces(char_answer))
for new_start in range(token_start, token_end + 1):
for new_end in range(token_end, new_start - 1, -1):
text_span = " ".join(context_tokens[new_start : (new_end + 1)])
if text_span == token_answer:
return (new_start, new_end)
return (token_start, token_end)
def _tokenize_vocab(vocab, context_words):
word_to_token = []
context_tokens = []
for (i, word) in enumerate(context_words):
word_to_token.append(len(context_tokens))
tokens = vocab.encode_as_pieces(word)
for token in tokens:
context_tokens.append(token)
return context_tokens, word_to_token
def _tokenize_whitespace(string):
word_tokens = []
char_to_word = []
prev_is_whitespace = True
for c in string:
if _is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
word_tokens.append(c)
else:
word_tokens[-1] += c
prev_is_whitespace = False
char_to_word.append(len(word_tokens) - 1)
return word_tokens, char_to_word
def get_context_tokens(vocab, context):
context_words, char_to_word = _tokenize_whitespace(context)
context_tokens, word_to_token = _tokenize_vocab(vocab, context_words)
return context_tokens
def get_encoded_question_(vocab, question):
return vocab.encode_as_pieces(question)
def load_data_and_check(count=10):
rootdir = "/data/qa"
onlyfiles = [f for f in os.listdir(
rootdir)[:count] if os.path.isfile(os.path.join(rootdir, f))]
for file in onlyfiles:
filePath = rootdir + '/' + file
with open(filePath, "r") as f:
print(f'read {filePath}')
for i, line in enumerate(f):
data = json.loads(line)
#question = vocab.decode_pieces(data['qas'][0]['question'])
#context = vocab.decode_pieces(data['context'])
#answer = data['answer']
context = data['context']
for qa in data['qas']:
question = qa['question']
answer = qa['answer']['answer_text']
answer_predict = do_predict(model, question, context)
# if answer in answer_predict:
print(i)
print("질문 : ", question)
# print("지문 : ", context)
print("정답 : ", answer)
print("예측 : ", answer_predict, "\n")
def preprocess(text):
text = text.strip()
text = re.sub(r'\n', ' ', text)
text = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', text)
# text = re.sub(r"\\n", " ", text)
# text = re.sub(r"\s+", " ", text)
# text = re.sub(r'#', ' ', text)
text = re.sub(r"[^a-zA-Z0-9가-힣ㄱ-ㅎㅏ-ㅣぁ-ゔァ-ヴー々〆〤一-龥<>()\s\.\?!》《≪≫\'<>〈〉:‘’%,『』「」<>・\"-“”∧]", "", text)
text = re.sub(r' ', '', text)
return text
def do_predict(model, question, context):
"""
입력에 대한 답변 생성하는 함수
:param model: model
:param question: 입력 문자열
:param context: 입력 문자열
"""
# args.max_seq_length = 500
# context = get_context_tokens(vocab, context)
#전처리
context_words, char_to_word = _tokenize_whitespace(context)
context_tokens, word_to_token = _tokenize_vocab(vocab, context_words)
question = vocab.encode_as_pieces(question)
question = vocab.decode_pieces(question)
context = vocab.decode_pieces(context_tokens)
q_tokens = vocab.encode_as_pieces(question)
c_tokens = vocab.encode_as_pieces(context)[:args.max_seq_length - len(q_tokens) - 3]
tokens = ['[CLS]'] + q_tokens + ['[SEP]'] + c_tokens + ['[SEP]']
token_ids = [vocab.piece_to_id(token) for token in tokens]
segments = [0] * (len(q_tokens) + 2) + [1] * (len(c_tokens) + 1)
y_start, y_end = model(np.array([token_ids]), np.array([segments]))
# print(y_start, y_end)
y_start_idx = K.argmax(y_start, axis=-1)[0].numpy()
y_end_idx = K.argmax(y_end, axis=-1)[0].numpy()
answer_tokens = tokens[y_start_idx:y_end_idx + 1]
return vocab.decode_pieces(answer_tokens)
model_dir = "../models"
checkpoint_file = os.path.join(model_dir, 'korquad_bert_jihoon_pretrain2.hdf5')
model = BERT4KorQuAD(config)
if os.path.exists(checkpoint_file):
# model 을 로드하기 위해 먼저 모델이 생성되어 있어야 한다.
enc_tokens = np.random.randint(0, len(vocab), (4, 10))
segments = np.random.randint(0, 2, (4, 10))
model(enc_tokens, segments)
# checkpoint 파일로부터 필요한 layer를 불러온다.
model.load_weights(checkpoint_file, by_name=True)
model.summary()
else:
print('NO Pretrained Model')
loss_fn = tf.keras.losses.sparse_categorical_crossentropy
acc_fn = tf.keras.metrics.sparse_categorical_accuracy
optimizer = tf.keras.optimizers.Adam(learning_rate=5e-4)
after_pretrained_loss_raw = []
after_pretrained_acc_raw = []
best_acc = .0
patience = 0
app = Flask(__name__)
@app.route('/')
def greeting():
return "This is Tensorflow Python API ! "
@app.route('/predict', methods=['POST'])
def get_predict():
if request.method == 'POST':
print("request", request)
question = request.json["question"]
print("question", question)
context = request.json["context"]
print("context", context)
answer = do_predict(model,question,context)
print("answer", answer)
result = json.dumps(answer)
return result, status.HTTP_200_OK, {"Content-Type": "application/json; charset=utf-8", "Access-Control-Allow-Origin": "*"}
# def main():
# # def main(args):
# # ------------- 모델사용 --------------------------------
# # do_predict(model, question, context):
# if __name__ == '__main__':
# # parser = argparse.ArgumentParser()
# # parser.add_argument('--question', type=str)
# # parser.add_argument('--context', type=str)
# # args = parser.parse_args()
# # main(args)
# main()
| 31.293952
| 126
| 0.617843
|
5112280a8e6c20b8239624c5df00423f183d2cec
| 11,169
|
py
|
Python
|
cellpy/utils/batch_tools/batch_helpers.py
|
jepegit/cellpy
|
b9ddb7afa3f7453bfb5f2f24a3268279bccf24c6
|
[
"MIT"
] | 38
|
2016-08-16T10:54:56.000Z
|
2022-03-03T04:43:20.000Z
|
cellpy/utils/batch_tools/batch_helpers.py
|
jepegit/cellpy
|
b9ddb7afa3f7453bfb5f2f24a3268279bccf24c6
|
[
"MIT"
] | 88
|
2016-08-16T13:10:27.000Z
|
2022-03-29T10:36:39.000Z
|
cellpy/utils/batch_tools/batch_helpers.py
|
jepegit/cellpy
|
b9ddb7afa3f7453bfb5f2f24a3268279bccf24c6
|
[
"MIT"
] | 13
|
2019-01-02T03:57:52.000Z
|
2022-01-19T08:06:49.000Z
|
import logging
import os
import warnings
import pandas as pd
import csv
import itertools
from cellpy import filefinder, prms
from cellpy.exceptions import ExportFailed, NullData, WrongFileVersion
import cellpy.parameters.internal_settings
# logger = logging.getLogger(__name__)
hdr_summary = cellpy.parameters.internal_settings.get_headers_summary()
hdr_journal = cellpy.parameters.internal_settings.get_headers_journal()
def look_up_and_get(cellpy_file_name, table_name, root=None):
"""Extracts table from cellpy hdf5-file."""
# infoname = '/CellpyData/info'
# dataname = '/CellpyData/dfdata'
# summaryname = '/CellpyData/dfsummary'
# fidname = '/CellpyData/fidtable'
# stepname = '/CellpyData/step_table'
if root is None:
root = "/CellpyData"
table_path = "/".join([root, table_name])
logging.debug(f"look_up_and_get({cellpy_file_name}, {table_name}")
store = pd.HDFStore(cellpy_file_name)
try:
table = store.select(table_path)
store.close()
except KeyError as e:
logging.warning("Could not read the table")
store.close()
raise WrongFileVersion(e)
return table
def create_folder_structure(project_name, batch_name):
"""This function creates a folder structure for the batch project.
The folder structure consists of main working folder ``project_name`
located in the ``outdatadir`` (as defined in the cellpy configuration file)
with a sub-folder named ``batch_name``. It also creates a folder
inside the ``batch_name`` folder for storing the raw data.
If the folders does not exist, they will be made. The function also returns
the name of the info-df.
Args:
project_name: name of the project
batch_name: name of the batch
Returns: (info_file, (project_dir, batch_dir, raw_dir))
"""
out_data_dir = prms.Paths["outdatadir"]
project_dir = os.path.join(out_data_dir, project_name)
batch_dir = os.path.join(project_dir, batch_name)
raw_dir = os.path.join(batch_dir, "raw_data")
# create folders
if not os.path.isdir(project_dir):
os.mkdir(project_dir)
if not os.path.isdir(batch_dir):
os.mkdir(batch_dir)
if not os.path.isdir(raw_dir):
os.mkdir(raw_dir)
# create file-name for the info_df (json)
info_file = "cellpy_batch_%s.json" % batch_name
info_file = os.path.join(project_dir, info_file)
return info_file, (project_dir, batch_dir, raw_dir)
def find_files(info_dict, file_list=None, pre_path=None, **kwargs):
"""Find files using cellpy.filefinder.
Args:
info_dict: journal pages.
file_list: list of files names to search through.
pre_path: path to prepend found files from file_list (if file_list is given).
**kwargs: sent to filefinder.search_for_files.
Returns:
info_dict
"""
# searches for the raw data files and the cellpyfile-name
# TODO: implement faster file searching
# TODO: implement option for not searching for raw-file names if force_cellpy is True
for run_name in info_dict[hdr_journal["filename"]]:
logging.debug(f"checking for {run_name}")
raw_files, cellpyfile = filefinder.search_for_files(
run_name, file_list=file_list, pre_path=pre_path, **kwargs
)
if not raw_files:
raw_files = None
info_dict[hdr_journal["raw_file_names"]].append(raw_files)
info_dict[hdr_journal["cellpy_file_name"]].append(cellpyfile)
return info_dict
def fix_groups(groups):
"""Takes care of strange group numbers."""
_groups = []
for g in groups:
try:
if not float(g) > 0:
_groups.append(1000)
else:
_groups.append(int(g))
except TypeError as e:
logging.info("Error in reading group number (check your db)")
logging.debug(g)
logging.debug(e)
_groups.append(1000)
return _groups
def save_multi(data, file_name, sep=";"):
"""Convenience function for storing data column-wise in a csv-file."""
logging.debug("saving multi")
with open(file_name, "w", newline="") as f:
logging.debug(f"{file_name} opened")
writer = csv.writer(f, delimiter=sep)
try:
writer.writerows(itertools.zip_longest(*data))
logging.info(f"{file_name} OK")
except Exception as e:
logging.info(f"Exception encountered in batch._save_multi: {e}")
raise ExportFailed
logging.debug("wrote rows using itertools in _save_multi")
def make_unique_groups(info_df):
"""This function cleans up the group numbers a bit."""
# fixes group numbering
unique_g = info_df[hdr_journal.group].unique()
unique_g = sorted(unique_g)
new_unique_g = list(range(len(unique_g)))
info_df[hdr_journal.sub_group] = info_df[hdr_journal.group] * 0
for i, j in zip(unique_g, new_unique_g):
counter = 1
for indx, row in info_df.loc[info_df[hdr_journal.group] == i].iterrows():
info_df.at[indx, hdr_journal.sub_group] = counter
counter += 1
info_df.loc[info_df[hdr_journal.group] == i, hdr_journal.group] = j + 1
return info_df
def _remove_date_and_celltype(label,):
parts = label.split("_")
parts.pop(0)
if parts[-1] in ["cc", "ec", "eth"]:
parts.pop(-1)
return "_".join(parts)
def create_labels(label, *args):
"""Returns a re-formatted label (currently it only removes the dates
from the run-name)"""
return _remove_date_and_celltype(label)
def create_selected_summaries_dict(summaries_list):
"""Creates a dictionary with summary column headers.
Examples:
>>> summaries_to_output = ["discharge_capacity", "charge_capacity"]
>>> summaries_to_output_dict = create_selected_summaries_dict(
>>> summaries_to_output
>>> )
>>> print(summaries_to_output_dict)
{'discharge_capacity': "Discharge_Capacity(mAh/g)",
'charge_capacity': "Charge_Capacity(mAh/g)}
Args:
summaries_list: list containing cellpy summary column id names
Returns: dictionary of the form {cellpy id name: cellpy summary
header name,}
"""
selected_summaries = dict()
for h in summaries_list:
selected_summaries[h] = hdr_summary[h]
return selected_summaries
def pick_summary_data(key, summary_df, selected_summaries):
"""picks the selected pandas.DataFrame"""
selected_summaries_dict = create_selected_summaries_dict(selected_summaries)
value = selected_summaries_dict[key]
return summary_df.iloc[:, summary_df.columns.get_level_values(1) == value]
def join_summaries(summary_frames, selected_summaries, keep_old_header=False):
"""parse the summaries and combine based on column (selected_summaries)"""
if not summary_frames:
raise NullData("No summaries available to join")
selected_summaries_dict = create_selected_summaries_dict(selected_summaries)
frames = []
keys = [] # test-name
for key in summary_frames:
keys.append(key)
if summary_frames[key].empty:
logging.debug("Empty summary_frame encountered")
frames.append(summary_frames[key])
out = []
summary_df = pd.concat(frames, keys=keys, axis=1)
for key, value in selected_summaries_dict.items():
_summary_df = summary_df.iloc[
:, summary_df.columns.get_level_values(1) == value
]
_summary_df.name = key
if not keep_old_header:
try:
_summary_df.columns = _summary_df.columns.droplevel(-1)
except AttributeError as e:
logging.debug("could not drop level from frame")
logging.debug(e)
out.append(_summary_df)
logging.debug("finished joining summaries")
return out
def generate_folder_names(name, project):
"""Creates sensible folder names."""
out_data_dir = prms.Paths.outdatadir
project_dir = os.path.join(out_data_dir, project)
batch_dir = os.path.join(project_dir, name)
raw_dir = os.path.join(batch_dir, "raw_data")
return out_data_dir, project_dir, batch_dir, raw_dir
def _extract_dqdv(cell_data, extract_func, last_cycle):
"""Simple wrapper around the cellpy.utils.ica.dqdv function."""
from cellpy.utils.ica import dqdv
list_of_cycles = cell_data.get_cycle_numbers()
if last_cycle is not None:
list_of_cycles = [c for c in list_of_cycles if c <= int(last_cycle)]
logging.debug(f"only processing up to cycle {last_cycle}")
logging.debug(f"you have {len(list_of_cycles)} cycles to process")
out_data = []
for cycle in list_of_cycles:
try:
c, v = extract_func(cycle)
v, dq = dqdv(v, c)
v = v.tolist()
dq = dq.tolist()
except NullData as e:
v = list()
dq = list()
logging.info(" Ups! Could not process this (cycle %i)" % cycle)
logging.info(" %s" % e)
header_x = "dQ cycle_no %i" % cycle
header_y = "voltage cycle_no %i" % cycle
dq.insert(0, header_x)
v.insert(0, header_y)
out_data.append(v)
out_data.append(dq)
return out_data
def export_dqdv(cell_data, savedir, sep, last_cycle=None):
"""Exports dQ/dV data from a CellpyData instance.
Args:
cell_data: CellpyData instance
savedir: path to the folder where the files should be saved
sep: separator for the .csv-files.
last_cycle: only export up to this cycle (if not None)
"""
logging.debug("exporting dqdv")
filename = cell_data.cell.loaded_from
no_merged_sets = ""
firstname, extension = os.path.splitext(filename)
firstname += no_merged_sets
if savedir:
firstname = os.path.join(savedir, os.path.basename(firstname))
logging.debug(f"savedir is true: {firstname}")
outname_charge = firstname + "_dqdv_charge.csv"
outname_discharge = firstname + "_dqdv_discharge.csv"
list_of_cycles = cell_data.get_cycle_numbers()
number_of_cycles = len(list_of_cycles)
logging.debug("%s: you have %i cycles" % (filename, number_of_cycles))
# extracting charge
out_data = _extract_dqdv(cell_data, cell_data.get_ccap, last_cycle)
logging.debug("extracted ica for charge")
try:
save_multi(data=out_data, file_name=outname_charge, sep=sep)
except ExportFailed as e:
logging.info("could not export ica for charge")
warnings.warn(f"ExportFailed exception raised: {e}")
else:
logging.debug("saved ica for charge")
# extracting discharge
out_data = _extract_dqdv(cell_data, cell_data.get_dcap, last_cycle)
logging.debug("extracted ica for discharge")
try:
save_multi(data=out_data, file_name=outname_discharge, sep=sep)
except ExportFailed as e:
logging.info("could not export ica for discharge")
warnings.warn(f"ExportFailed exception raised: {e}")
else:
logging.debug("saved ica for discharge")
| 34.155963
| 89
| 0.669532
|
70f3c6a650e0bb3477dbf33fa3c09be95644645b
| 3,383
|
py
|
Python
|
mavlinkAPI/unloadDrone.py
|
CopterExpress/DronePoint-home
|
2800c77fd0aaaab8080e157ffc9bee875171db48
|
[
"Apache-2.0"
] | null | null | null |
mavlinkAPI/unloadDrone.py
|
CopterExpress/DronePoint-home
|
2800c77fd0aaaab8080e157ffc9bee875171db48
|
[
"Apache-2.0"
] | null | null | null |
mavlinkAPI/unloadDrone.py
|
CopterExpress/DronePoint-home
|
2800c77fd0aaaab8080e157ffc9bee875171db48
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time, os, sys
from pymavlink import mavutil
import threading
import json
#CUSTOM_MODE
CUSTOM_MODE_UNKNOWN=0
CUSTOM_MODE_COVER_INSTALLATION=2
CUSTOM_MODE_COVER_REMOVAL=4
CUSTOM_MODE_LOADING_DRONE = 5
CUSTOM_MODE_UNLOADING_DRONE = 6
CUSTOM_MODE_GETTING_FROM_USER = 7
CUSTOM_MODE_UNLOADING_TO_USER = 8
CUSTOM_MODE_CONTAINER_UNLOADING = 9
SERVICE = 10
RESET = 11
STANDBY = 12
ERROR = 13
#CUSTOM_SUBMODE
LOCK_RELEASE = 0
LOCK_LOCK = 1
LOCK_STOP = 2
OPEN_TOP_HATCH = 3
CLOSE_TOP_HATCH = 4
GOTO_CELL = 5
LOAD_CHARGING_CELL = 6
UNLOAD_CHARGING_CELL = 7
LOAD_PAYLOAD_CELL = 8
UNLOAD_PAYLOAD_CELL = 9
GET_FROM_USER = 10
UNLOAD_TO_USER = 11
STOP = 12
GET_PAYLOAD_FROM_DRONE = 13
INSERT_PAYLOAD_INTO_DRONE = 14
LOCK_PAYLOAD = 15
RELEASE_PAYLOAD = 16
LOCK_CHARGING_CELL_LOCK = 17
RELEASE_CHARGING_CELL_LOCK = 18
LOCK_TOP_HATCH_LOCK = 19
RELEASE_TOP_HATCH_LOCK = 20
OPEN_BOTTOM_HATCH = 21
CLOSE_BOTTOM_HATCH = 22
LOCK_USER_CELL_LOCK = 23
RELEASE_USER_CELL_LOCK = 24
GOTO_CHARGING_CELL = 25
CCSM=12
NUMO=0
master = mavutil.mavlink_connection('udpout:127.0.0.1:14590')
def telemet():
#show incoming mavlink messages
global CCSM
while True:
msg = master.recv_match(type = 'HEARTBEAT', blocking = False)
if not msg:
continue
else:
#print(msg)
try:
state = msg.to_dict()
#print(state)
if state.get("type")==31:
#print(state)
CCSM = state.get("custom_mode")
print("CUSTOM_MODE =", CCSM)
except ValueError as e: # Incorrect message
print(e)
def sendheard():
'''heartbeat bistro'''
while True:
try:
master.mav.heartbeat_send(0,0,0,0,0)
time.sleep(1)
except ValueError as e: # nemogu message send
print(e)
task1 = threading.Thread(target=telemet)
task2 = threading.Thread(target=sendheard)
task1.start()
task2.start()
while True:
try: #LOAD_DRONE CUSTOM_MODE операция меняет стэйт
#vynet iz iacheyki snimet kryshku, zagruzit v dron
print("LOAD_DRONE X0Y3Z2Kr0")
master.mav.command_long_send(master.target_system, master.target_component,
mavutil.mavlink.MAV_CMD_DO_SET_MODE, 1,
mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,
CUSTOM_MODE_LOADING_DRONE, #5 стэйт
0, 3, 2, 0, 0)
#X, Y, Z, Kr0
time.sleep(5)
while CCSM!=12:
pass
time.sleep(7)
#UNLOAD_DRONE CUSTOM_MODE операция меняет стэйт
#snimet s drona vstavit krishku, zagruzut v iacheyku
print("UNLOAD_DRONE X0Y3Z2Kr0")
master.mav.command_long_send(master.target_system, master.target_component,
mavutil.mavlink.MAV_CMD_DO_SET_MODE, 1,
mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,
CUSTOM_MODE_UNLOADING_DRONE, #6 стэйт
0, 3, 2, 0, 0)
time.sleep(5)
while CCSM!=12:
pass
time.sleep(7)
except BaseException as e:
print(e)
time.sleep(3)
print('OK')
| 27.064
| 84
| 0.619568
|
2dce049f24dfa2c170d50681a93b451eac694eef
| 20,640
|
py
|
Python
|
auth-api/src/auth_api/services/invitation.py
|
mengdong19/sbc-auth
|
66fbd94a79d6de18102d3db29743ffeab89ea161
|
[
"Apache-2.0"
] | null | null | null |
auth-api/src/auth_api/services/invitation.py
|
mengdong19/sbc-auth
|
66fbd94a79d6de18102d3db29743ffeab89ea161
|
[
"Apache-2.0"
] | null | null | null |
auth-api/src/auth_api/services/invitation.py
|
mengdong19/sbc-auth
|
66fbd94a79d6de18102d3db29743ffeab89ea161
|
[
"Apache-2.0"
] | 1
|
2019-07-25T18:20:41.000Z
|
2019-07-25T18:20:41.000Z
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service for managing Invitation data."""
from datetime import datetime
from typing import Dict
from flask import current_app
from itsdangerous import URLSafeTimedSerializer
from jinja2 import Environment, FileSystemLoader
from sbc_common_components.tracing.service_tracing import ServiceTracing # noqa: I001
from auth_api.config import get_named_config
from auth_api.exceptions import BusinessException
from auth_api.exceptions.errors import Error
from auth_api.models import AccountLoginOptions as AccountLoginOptionsModel
from auth_api.models import Documents as DocumentsModel
from auth_api.models import Invitation as InvitationModel
from auth_api.models import InvitationStatus as InvitationStatusModel
from auth_api.models import Membership as MembershipModel
from auth_api.models.org import Org as OrgModel
from auth_api.schemas import InvitationSchema
from auth_api.services.user import User as UserService
from auth_api.utils.enums import AccessType, DocumentType, InvitationStatus, InvitationType, Status, LoginSource, \
OrgStatus as OrgStatusEnum
from auth_api.utils.roles import ADMIN, COORDINATOR, STAFF, USER
from auth_api.utils.constants import GROUP_GOV_ACCOUNT_USERS
from .authorization import check_auth
from .keycloak import KeycloakService
from .membership import Membership as MembershipService
from .notification import send_email
from ..utils.account_mailer import publish_to_mailer
from ..utils.util import escape_wam_friendly_url
ENV = Environment(loader=FileSystemLoader('.'), autoescape=True)
CONFIG = get_named_config()
class Invitation:
"""Manages Invitation data.
This service manages creating, updating, and retrieving Invitation data via the Invitation model.
"""
def __init__(self, model):
"""Return an invitation service instance."""
self._model = model
@ServiceTracing.disable_tracing
def as_dict(self):
"""Return the internal Invitation model as a dictionary."""
invitation_schema = InvitationSchema()
obj = invitation_schema.dump(self._model, many=False)
return obj
@staticmethod
def create_invitation(invitation_info: Dict, user, # pylint: disable=too-many-locals
token_info: Dict, invitation_origin):
"""Create a new invitation."""
# Ensure that the current user is ADMIN or COORDINATOR on each org being invited to
context_path = CONFIG.AUTH_WEB_TOKEN_CONFIRM_PATH
org_id = invitation_info['membership'][0]['orgId']
# get the org and check the access_type
org: OrgModel = OrgModel.find_by_org_id(org_id)
if not org:
raise BusinessException(Error.DATA_NOT_FOUND, None)
check_auth(token_info, org_id=org_id, one_of_roles=(ADMIN, COORDINATOR, STAFF))
org_name = org.name
invitation_type = Invitation._get_inv_type(org)
if org.access_type == AccessType.ANONYMOUS.value: # anonymous account never get bceid or bcsc choices
mandatory_login_source = LoginSource.BCROS.value
elif org.access_type == AccessType.GOVM.value:
mandatory_login_source = LoginSource.STAFF.value
else:
default_login_option_based_on_accesstype = LoginSource.BCSC.value if \
org.access_type == AccessType.REGULAR.value else LoginSource.BCEID.value
role = invitation_info['membership'][0]['membershipType']
account_login_options = AccountLoginOptionsModel.find_active_by_org_id(org.id)
mandatory_login_source = LoginSource.BCSC.value if \
role == ADMIN else getattr(account_login_options, 'login_source',
default_login_option_based_on_accesstype)
invitation = InvitationModel.create_from_dict(invitation_info, user.identifier, invitation_type)
confirmation_token = Invitation.generate_confirmation_token(invitation.id, invitation.type)
invitation.token = confirmation_token
invitation.login_source = mandatory_login_source
invitation.save()
Invitation.send_invitation(invitation, org_name, user.as_dict(),
'{}/{}'.format(invitation_origin, context_path), mandatory_login_source,
org_status=org.status_code)
# notify admin if staff adds team members
is_staff_access = token_info and 'staff' in token_info.get('realm_access', {}).get('roles', None)
if is_staff_access and invitation_type == InvitationType.STANDARD.value:
publish_to_mailer(notification_type='teamMemberInvited', org_id=org_id)
return Invitation(invitation)
@staticmethod
def _get_inv_type(org):
"""Return the correct invitation type."""
inv_types = {
AccessType.GOVM.value: InvitationType.GOVM.value,
AccessType.ANONYMOUS.value: InvitationType.DIRECTOR_SEARCH.value,
AccessType.REGULAR.value: InvitationType.STANDARD.value
}
return inv_types.get(org.access_type, InvitationType.STANDARD.value)
def update_invitation(self, user, token_info: Dict, invitation_origin):
"""Update the specified invitation with new data."""
# Ensure that the current user is ADMIN or COORDINATOR on each org being re-invited to
context_path = CONFIG.AUTH_WEB_TOKEN_CONFIRM_PATH
for membership in self._model.membership:
org_id = membership.org_id
check_auth(token_info, org_id=org_id, one_of_roles=(ADMIN, COORDINATOR, STAFF))
# TODO doesnt work when invited to multiple teams.. Re-work the logic when multiple teams introduced
confirmation_token = Invitation.generate_confirmation_token(self._model.id, self._model.type)
self._model.token = confirmation_token
updated_invitation = self._model.update_invitation_as_retried()
org_name = OrgModel.find_by_org_id(self._model.membership[0].org_id).name
Invitation.send_invitation(updated_invitation, org_name, user.as_dict(),
'{}/{}'.format(invitation_origin, context_path), self._model.login_source)
return Invitation(updated_invitation)
@staticmethod
def delete_invitation(invitation_id, token_info: Dict = None):
"""Delete the specified invitation."""
# Ensure that the current user is ADMIN or COORDINATOR for each org in the invitation
invitation = InvitationModel.find_invitation_by_id(invitation_id)
if invitation is None:
raise BusinessException(Error.DATA_NOT_FOUND, None)
for membership in invitation.membership:
org_id = membership.org_id
check_auth(token_info, org_id=org_id, one_of_roles=(ADMIN, COORDINATOR, STAFF))
invitation.delete()
@staticmethod
def get_invitations_for_org(org_id, status=None, token_info: Dict = None):
"""Get invitations for an org."""
org_model = OrgModel.find_by_org_id(org_id)
if not org_model:
return None
if status:
status = InvitationStatus[status]
# If staff return full list
if 'staff' in token_info.get('realm_access').get('roles'):
return InvitationModel.find_pending_invitations_by_org(org_id)
current_user: UserService = UserService.find_by_jwt_token(token_info)
current_user_membership: MembershipModel = \
MembershipModel.find_membership_by_user_and_org(user_id=current_user.identifier, org_id=org_id)
# If no active membership return empty array
if current_user_membership is None or \
current_user_membership.status != Status.ACTIVE.value:
return []
# Ensure either ADMIN or COORDINATOR
if current_user_membership.membership_type_code == USER:
return []
return InvitationModel.find_invitations_by_org(org_id=org_id, status=status)
@staticmethod
def find_invitation_by_id(invitation_id, token_info: Dict = None):
"""Find an existing invitation with the provided id."""
if invitation_id is None:
return None
invitation = InvitationModel.find_invitation_by_id(invitation_id)
if not invitation:
return None
# Ensure that the current user is an ADMIN or COORDINATOR on each org in the invite being retrieved
for membership in invitation.membership:
org_id = membership.org_id
check_auth(token_info, org_id=org_id, one_of_roles=(ADMIN, COORDINATOR, STAFF))
return Invitation(invitation)
@staticmethod
def send_admin_notification(user, url, recipient_email_list, org_name):
"""Send the admin email notification."""
subject = '[BC Registries and Online Services] {} {} has responded for the invitation to join the account {}'. \
format(user['firstname'], user['firstname'], org_name)
sender = CONFIG.MAIL_FROM_ID
try:
template = ENV.get_template('email_templates/admin_notification_email.html')
except Exception: # NOQA # pylint: disable=broad-except
raise BusinessException(Error.FAILED_INVITATION, None)
sent_response = send_email(subject, sender, recipient_email_list,
template.render(url=url, user=user, org_name=org_name,
logo_url=f'{url}/{CONFIG.REGISTRIES_LOGO_IMAGE_NAME}'))
if not sent_response:
# invitation.invitation_status_code = 'FAILED'
# invitation.save()
raise BusinessException(Error.FAILED_INVITATION, None)
@staticmethod
def send_invitation(invitation: InvitationModel, org_name, user, # pylint: disable=too-many-arguments
app_url, login_source, org_status=None):
"""Send the email notification."""
current_app.logger.debug('<send_invitation')
mail_configs = Invitation._get_invitation_configs(org_name, login_source, org_status)
subject = mail_configs.get('subject').format(user['firstname'], user['lastname'])
sender = CONFIG.MAIL_FROM_ID
recipient = invitation.recipient_email
token_confirm_url = '{}/{}/{}'.format(app_url, mail_configs.get('token_confirm_path'), invitation.token)
template = ENV.get_template(f"email_templates/{mail_configs.get('template_name')}.html")
sent_response = send_email(subject, sender, recipient,
template.render(invitation=invitation,
url=token_confirm_url,
user=user,
org_name=org_name,
logo_url=f'{app_url}/{CONFIG.REGISTRIES_LOGO_IMAGE_NAME}'))
if not sent_response:
invitation.invitation_status_code = 'FAILED'
invitation.save()
current_app.logger.debug('>send_invitation failed')
raise BusinessException(Error.FAILED_INVITATION, None)
current_app.logger.debug('>send_invitation')
@staticmethod
def _get_invitation_configs(org_name, login_source, org_status=None):
"""Get the config for different email types."""
login_source = login_source or LoginSource.BCSC.value
escape_url = escape_wam_friendly_url(org_name)
token_confirm_path = f'{escape_url}/validatetoken/{login_source}'
if login_source == LoginSource.STAFF.value:
# for GOVM accounts , there are two kinda of invitation. Its same login source
# if its first invitation to org , its an account set up invitation else normal joining invite
login_source = 'IDIR/ACCOUNTSETUP' if Invitation._is_first_user_to_a_gov_accnt(org_status) else login_source
govm_setup_configs = {
'token_confirm_path': token_confirm_path,
'template_name': 'govm_business_invitation_email',
'subject': '[BC Registries and Online Services] You’ve been invited to create a BC Registries account',
}
govm_member_configs = {
'token_confirm_path': token_confirm_path,
'template_name': 'govm_member_invitation_email',
'subject': '[BC Registries and Online Services] You have been added as a team member.',
}
director_search_configs = {
'token_confirm_path': token_confirm_path,
'template_name': 'dirsearch_business_invitation_email',
'subject': 'Your BC Registries Account has been created',
}
bceid_configs = {
'token_confirm_path': token_confirm_path,
'template_name': 'business_invitation_email_for_bceid',
'subject': '[BC Registries and Online Services] {} {} has invited you to join an account',
}
default_configs = {
'token_confirm_path': token_confirm_path,
'template_name': 'business_invitation_email',
'subject': '[BC Registries and Online Services] {} {} has invited you to join an account',
}
mail_configs = {
'BCROS': director_search_configs,
'BCEID': bceid_configs,
'IDIR': govm_member_configs,
'IDIR/ACCOUNTSETUP': govm_setup_configs
}
return mail_configs.get(login_source, default_configs)
@staticmethod
def generate_confirmation_token(invitation_id, invitation_type=''):
"""Generate the token to be sent in the email."""
serializer = URLSafeTimedSerializer(CONFIG.EMAIL_TOKEN_SECRET_KEY)
token = {'id': invitation_id, 'type': invitation_type}
return serializer.dumps(token, salt=CONFIG.EMAIL_SECURITY_PASSWORD_SALT)
@staticmethod
def _is_first_user_to_a_gov_accnt(org_status: str) -> bool:
return org_status == OrgStatusEnum.PENDING_INVITE_ACCEPT.value
@staticmethod
def validate_token(token):
"""Check whether the passed token is valid."""
serializer = URLSafeTimedSerializer(CONFIG.EMAIL_TOKEN_SECRET_KEY)
token_valid_for = int(CONFIG.TOKEN_EXPIRY_PERIOD) * 3600 * 24 if CONFIG.TOKEN_EXPIRY_PERIOD else 3600 * 24 * 7
try:
invitation_id = serializer.loads(token, salt=CONFIG.EMAIL_SECURITY_PASSWORD_SALT,
max_age=token_valid_for).get('id')
except: # noqa: E722
raise BusinessException(Error.EXPIRED_INVITATION, None)
invitation: InvitationModel = InvitationModel.find_invitation_by_id(invitation_id)
if invitation is None:
raise BusinessException(Error.DATA_NOT_FOUND, None)
if invitation.invitation_status_code == 'ACCEPTED':
raise BusinessException(Error.ACTIONED_INVITATION, None)
if invitation.invitation_status_code == 'EXPIRED':
raise BusinessException(Error.EXPIRED_INVITATION, None)
return Invitation(invitation)
@staticmethod
def notify_admin(user, invitation_id, membership_id, invitation_origin):
"""Admins should be notified if user has responded to invitation."""
current_app.logger.debug('<notify_admin')
admin_list = UserService.get_admins_for_membership(membership_id)
invitation: InvitationModel = InvitationModel.find_invitation_by_id(invitation_id)
context_path = CONFIG.AUTH_WEB_TOKEN_CONFIRM_PATH
# Don't send email in case no admin exist in the org. (staff sent invitation)
if len(admin_list) >= 1:
admin_emails = ','.join([str(x.contacts[0].contact.email) for x in admin_list if x.contacts])
else:
# No admin, find Sender email to notify sender (staff)
admin_emails = invitation.sender.email
if admin_emails != '':
Invitation.send_admin_notification(user.as_dict(),
'{}/{}'.format(invitation_origin, context_path),
admin_emails, invitation.membership[0].org.name)
current_app.logger.debug('>notify_admin')
return Invitation(invitation)
@staticmethod
def accept_invitation(invitation_id, user: UserService, origin, add_membership: bool = True,
token_info: Dict = None):
"""Add user, role and org from the invitation to membership."""
current_app.logger.debug('>accept_invitation')
invitation: InvitationModel = InvitationModel.find_invitation_by_id(invitation_id)
if invitation is None:
raise BusinessException(Error.DATA_NOT_FOUND, None)
if invitation.invitation_status_code == 'ACCEPTED':
raise BusinessException(Error.ACTIONED_INVITATION, None)
if invitation.invitation_status_code == 'EXPIRED':
raise BusinessException(Error.EXPIRED_INVITATION, None)
if getattr(token_info, 'loginSource', None) is not None: # bcros comes with out token
login_source = token_info.get('loginSource', None)
if invitation.login_source != login_source:
raise BusinessException(Error.INVALID_USER_CREDENTIALS, None)
if add_membership:
for membership in invitation.membership:
membership_model = MembershipModel()
membership_model.org_id = membership.org_id
membership_model.user_id = user.identifier
membership_model.membership_type = membership.membership_type
# check to ensure an invitation for this user/org has not already been processed
existing_membership = MembershipService \
.get_membership_for_org_and_user(org_id=membership_model.org_id, user_id=membership_model.user_id)
if existing_membership:
raise BusinessException(Error.DATA_ALREADY_EXISTS, None)
org_model: OrgModel = OrgModel.find_by_org_id(membership.org_id)
# GOVM users gets direct approval since they are IDIR users.
membership_model.status = Invitation._get_status_based_on_org(org_model)
membership_model.save()
try:
# skip notifying admin if it auto approved
# for now , auto approval happens for GOVM.If more auto approval comes , just check if its GOVM
if membership_model.status != Status.ACTIVE.value:
Invitation.notify_admin(user, invitation_id, membership_model.id, origin)
except BusinessException as exception:
current_app.logger.error('<send_notification_to_admin failed', exception.message)
invitation.accepted_date = datetime.now()
invitation.invitation_status = InvitationStatusModel.get_status_by_code('ACCEPTED')
invitation.save()
# Call keycloak to add the user to the group.
if user:
group_name: str = KeycloakService.join_users_group(token_info)
KeycloakService.join_account_holders_group(user.keycloak_guid)
if group_name == GROUP_GOV_ACCOUNT_USERS:
# TODO Remove this if gov account users needs Terms of Use.
tos_document = DocumentsModel.fetch_latest_document_by_type(DocumentType.TERMS_OF_USE.value)
user.update_terms_of_use(token_info, True, tos_document.version_id)
# Add contact to the user.
user.add_contact(token_info, dict(email=token_info.get('email', None)),
throw_error_for_duplicates=False)
current_app.logger.debug('<accept_invitation')
return Invitation(invitation)
@staticmethod
def _get_status_based_on_org(org_model: OrgModel):
if org_model.access_type == AccessType.GOVM.value:
return Status.ACTIVE.value
return Status.PENDING_APPROVAL.value
| 50.341463
| 120
| 0.680233
|
9a58237bcdc7bd5843d51e6ffd9a17f45794da09
| 8,563
|
py
|
Python
|
src/pyth2/fs/StagingFileSystem.py
|
gnomeberry/pyth2
|
532d89e4ed22b4f9427069bf187ab836e2c2f538
|
[
"MIT"
] | null | null | null |
src/pyth2/fs/StagingFileSystem.py
|
gnomeberry/pyth2
|
532d89e4ed22b4f9427069bf187ab836e2c2f538
|
[
"MIT"
] | null | null | null |
src/pyth2/fs/StagingFileSystem.py
|
gnomeberry/pyth2
|
532d89e4ed22b4f9427069bf187ab836e2c2f538
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
'''
Created on 2016/01/24
@author: _
'''
import codecs
import json
import os
import sys
FILESYSTEM_CHARACTER_ENCODING = sys.getfilesystemencoding()
STAGING_CONTEXT_FILE_PATTERN = "%s_context"
def ensureDirectory(path):
if not os.path.isdir(path):
os.mkdir(path)
if not os.path.isdir(path):
raise ValueError("Cannot ensure directory(s) %s" % path)
return True
def ensureFile(path):
if os.path.isfile(path):
return True
ensureDirectory(os.path.join(path, os.pardir))
with open(path, "w") as f: # @UnusedVariable
pass
if os.path.isfile(path):
return True
else:
raise ValueError("Cannot ensure file %s" % path)
def fileDateComparator(dateFunctor):
def comparator(f1, f2):
return dateFunctor(f1) < dateFunctor(f2)
return comparator
def fileRegexComparator(regex, onlyBaseName = True, errorOnMismatch = True, *foundTranslators):
import re
pat = re.compile(regex, re.DOTALL)
def comparator(f1, f2):
m1 = pat.match(os.path.basename(f1) if onlyBaseName else f1)
m2 = pat.match(os.path.basename(f2) if onlyBaseName else f2)
if errorOnMismatch:
if not m1:
raise ValueError("Mismatch %s for %s" % (f1, regex))
if not m2:
raise ValueError("Mismatch %s for %s" % (f2, regex))
g1 = [] if not m1 else m1.groups()
g2 = [] if not m2 else m2.groups()
if len(g1) != len(g2):
return len(g1) - len(g2)
else:
for i, s in enumerate(zip(g1, g2)):
s = map(foundTranslators[i], s) if i < len(foundTranslators) else s
c = cmp(s[0], s[1])
if c != 0:
return c
return 0
return comparator
class FilesystemBoundObject(object):
def __init__(self, isFileObject = True):
self.isFileObject = isFileObject
def assocFile(self):
raise ValueError("Abstract method")
def ensureFile(self):
if self.isFileObject:
ensureFile(self.assocFile())
else:
ensureDirectory(self.assocFile())
class FilesystemView(FilesystemBoundObject):
def __init__(self, stage, files, autoCommit):
super(FilesystemView, self).__init__(False)
self.__stage = stage
self.__files = tuple(files)
self.autoCommit = autoCommit
def assocFile(self):
return self.__stage.assocFile()
def listFiles(self, sort_comparator = None):
if callable(sort_comparator):
return tuple(sorted(self.__files, cmp = sort_comparator))
else:
return self.__files
def commit(self):
self.__stage.__commit_currentView(self)
def __enter__(self):
pass
def __exit__(self, excType, excInstance, excTrace):
if not excInstance:
raise
elif self.autoCommit:
self.commit()
class Stages(object):
'''
Stageをまとめたもの
'''
baseDirectory = None
stages = []
def __init__(self, baseDirectory):
self.baseDirectory = unicode(baseDirectory, FILESYSTEM_CHARACTER_ENCODING) if not isinstance(baseDirectory, unicode) else baseDirectory
ensureDirectory(self.baseDirectory)
def addStage(self, stageName = "", changeBaseDir = None):
if changeBaseDir:
changeBaseDir = unicode(changeBaseDir, FILESYSTEM_CHARACTER_ENCODING) if not isinstance(changeBaseDir, unicode) else changeBaseDir
ensureDirectory(changeBaseDir)
else:
changeBaseDir = self.baseDirectory
stage = Stage(self, changeBaseDir, stageName)
if stage in self.stages:
raise ValueError("%s is already exist in %s" % (stage, self.stages))
self.stages.append(stage)
return stage
def __contains__(self, val):
return val in self.stages
def findStageIndex(self, stageName):
for i, s in enumerate(self.stages):
if s._name == stageName:
return i
return None
class Stage(FilesystemBoundObject):
__stageManager = None
_name = ""
class StageContext(FilesystemBoundObject):
def __init__(self, stage):
super(stage.StageContext, self).__init__(True)
self.stage = stage
self.__frozen = self.__dict__.keys() + ["_StageContext__frozen"]
def assocFile(self):
return os.path.join(self.stage._baseDirectory, STAGING_CONTEXT_FILE_PATTERN % self.stage._name)
def __attributes(self):
return {k: getattr(self, k) for k in self.__dict__ if not k in self.__frozen}
def __contains__(self, k):
return hasattr(self, k)
def clear(self):
for k in [_ for _ in self.__dict__ if not _ in self.__frozen]:
delattr(self, k)
def store(self):
self.ensureFile()
with codecs.open(self.assocFile(), "wb", "utf-8", buffering = 1) as fp:
json.dump(self.__attributes(), fp, indent = 4)
def load(self):
self.ensureFile()
self.clear()
with codecs.open(self.assocFile(), "rb", "utf-8", buffering = 1) as fp:
for k, v in json.load(fp).items():
setattr(self, k, v)
def __init__(self, stageManager, baseDirectory, name):
super(Stage, self).__init__(False)
if not stageManager:
raise ValueError("Must specify stage manager")
if not name:
raise ValueError("%s is not valid for stage name" % name)
self.__stageManager = stageManager
self._name = unicode(name, FILESYSTEM_CHARACTER_ENCODING) if not isinstance(name, unicode) else name
self._baseDirectory = baseDirectory
self.__stageDirectory = os.path.join(self._baseDirectory, self._name)
self.__context = self.StageContext(self)
ensureDirectory(self.__stageDirectory)
def __str__(self, *args, **kwargs):
return "Stage[%s, dir=%s]" % (self._name, self.__stageDirectory)
def __eq__(self, other):
return isinstance(other, Stage) and other._name == self._name
def __hash__(self, *args, **kwargs):
return self._name.__hash__()
def assocFile(self):
return self.__stageDirectory
def stageManager(self):
return self.__stageManager
def stageName(self):
return self._name
def previousStage(self):
index = self.__stageManager.findStageIndex(self._name)
if index:
return self.__stageManager.stage[index - 1] if index >= 1 else None
def nextStage(self):
index = self.__stageManager.findStageIndex(self._name)
if index:
return self.__stageManager.stage[index + 1] if index <= len(self.__stageManager) - 1 else None
def context(self):
return self.__context
def currentView(self, pathSelector = None, autoCommit = False):
files = (os.path.join(self.__stageDirectory, (unicode(fpath, FILESYSTEM_CHARACTER_ENCODING) if not isinstance(fpath, unicode) else fpath)) for fpath in os.listdir(self.assocFile()))
if pathSelector and callable(pathSelector):
files = (fpath for fpath in files if pathSelector(fpath))
return FilesystemView(self, list(files), autoCommit)
def __commit_currentView(self, filesystemView):
print "Commit current view", filesystemView.listFiles()
nextStage = self.nextStage()
print "Next stage=%s" % nextStage
if nextStage:
pass
else:
# delete?
pass
if __name__ == "__main__":
x=Stages("z:\\hoge")
print x.baseDirectory
stage1 = x.addStage("stage1")
ctx = stage1.context()
ctx.load()
if "initial" in ctx:
print ctx.val1, ctx.val2
ctx.initial = False
ctx.val1 = 1
ctx.val2 = "abc"
ctx.store()
ctx.clear()
print ctx.val1
fsv = stage1.currentView()
for fn in fsv.listFiles(fileRegexComparator(r"(.*)(\d+).*$", True, False, unicode, int)):
print fn
| 33.580392
| 190
| 0.589046
|
2c3d4c73931373fdd1d07e3bc986e28ba95b1307
| 1,564
|
py
|
Python
|
src/gui/MainWindow/MainRadioButton.py
|
bochkovoi/AHP
|
b51dc598f8f7a65a2ade039d887dccfa6d070f1e
|
[
"MIT"
] | null | null | null |
src/gui/MainWindow/MainRadioButton.py
|
bochkovoi/AHP
|
b51dc598f8f7a65a2ade039d887dccfa6d070f1e
|
[
"MIT"
] | null | null | null |
src/gui/MainWindow/MainRadioButton.py
|
bochkovoi/AHP
|
b51dc598f8f7a65a2ade039d887dccfa6d070f1e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSignal
import sys, os.path as op
path1 = op.join( op.abspath(op.dirname(__file__)), '..', '..')
path2 = op.join( op.abspath(op.dirname(__file__)), '..')
sys.path.append(path1)
sys.path.append(path2)
from Structure import *
from .MainCategoriesVision import *
from .MainAlternativesVision import *
from SubCriteriasVision import *
class MainRadioButton( QtWidgets.QWidget ):
''' Окно режима объекта'''
is_changed = pyqtSignal()
def __init__( self, main_obj, parent=None ):
super().__init__( parent=parent )
self.main_obj = main_obj
#Делаем надпись и две радиокнопки
label = QtWidgets.QLabel("Сложность модели")
setTrueButton = QtWidgets.QRadioButton("Две категории")
setFalseButton = QtWidgets.QRadioButton("Четыре категории")
#Соединяем одну из кнопок с методов и ставим указатель на ней
setTrueButton.toggled.connect( self.setTrue )
setTrueButton.setChecked(True)
box = QtWidgets.QHBoxLayout()
box.addWidget(setTrueButton)
box.addWidget(setFalseButton)
form = QtWidgets.QFormLayout()
form.addRow(label)
form.addRow(box)
self.setLayout( form )
def setTrue( self, is_right ):
if is_right:
self.main_obj.is_simple = True
else:
self.main_obj.is_simple = False
self.is_changed.emit()
| 30.076923
| 69
| 0.634271
|
c0223ac12b2bb0b17c73ec8c84482a7b47cb243f
| 618
|
py
|
Python
|
eth/tools/rlp.py
|
jin10086/py-evm
|
da04e8de42fdbf3bc5ca596f5f6b3d810c1afea8
|
[
"MIT"
] | 5
|
2018-09-28T20:01:42.000Z
|
2022-02-22T19:54:46.000Z
|
eth/tools/rlp.py
|
jin10086/py-evm
|
da04e8de42fdbf3bc5ca596f5f6b3d810c1afea8
|
[
"MIT"
] | null | null | null |
eth/tools/rlp.py
|
jin10086/py-evm
|
da04e8de42fdbf3bc5ca596f5f6b3d810c1afea8
|
[
"MIT"
] | 2
|
2018-12-09T15:58:11.000Z
|
2020-09-29T07:10:21.000Z
|
from eth_utils import (
replace_exceptions,
ValidationError,
)
from eth.utils.rlp import (
validate_rlp_equal,
)
assert_imported_genesis_header_unchanged = replace_exceptions({
ValidationError: AssertionError,
})(validate_rlp_equal(obj_a_name='genesis header', obj_b_name='imported header'))
assert_mined_block_unchanged = replace_exceptions({
ValidationError: AssertionError,
})(validate_rlp_equal(obj_a_name='block', obj_b_name='mined block'))
assert_headers_eq = replace_exceptions({
ValidationError: AssertionError,
})(validate_rlp_equal(obj_a_name='expected', obj_b_name='actual'))
| 25.75
| 81
| 0.788026
|
8f30d2d9e347bcd18899497d2eb129dde3e0de29
| 42,344
|
py
|
Python
|
plugin.video.xbmcfilm/resources/lib/mrknow_Pageparser.py
|
mrknow/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 105
|
2015-11-28T00:03:11.000Z
|
2021-05-05T20:47:42.000Z
|
plugin.video.xbmcfilm/resources/lib/mrknow_Pageparser.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 918
|
2015-11-28T14:12:40.000Z
|
2022-03-23T20:24:49.000Z
|
plugin.video.xbmcfilm/resources/lib/mrknow_Pageparser.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 111
|
2015-12-01T14:06:10.000Z
|
2020-08-01T10:44:39.000Z
|
# -*- coding: utf-8 -*-
import cookielib, os, string, StringIO
import os, time, base64, logging, calendar
import urllib, urllib2, re, sys, math
import xbmcaddon, xbmc, xbmcgui
try:
import simplejson as json
except ImportError:
import json
import urlparse, httplib, random, string
ptv = xbmcaddon.Addon()
scriptID = ptv.getAddonInfo('id')
scriptname = ptv.getAddonInfo('name')
#dbg = ptv.getSetting('default_debug') in ('true')
ptv = xbmcaddon.Addon(scriptID)
import mrknow_pLog, mrknow_pCommon, mrknow_urlparser, mrknow_utils
from BeautifulSoup import BeautifulSoup
log = mrknow_pLog.pLog()
class mrknow_Pageparser:
def __init__(self):
self.cm = mrknow_pCommon.common()
self.up = mrknow_urlparser.mrknow_urlparser()
def hostSelect(self, v):
hostUrl = False
d = xbmcgui.Dialog()
if len(v) > 0:
valTab = []
for i in range(len(v)):
valTab.append(str(i+1) + '. ' + self.getHostName(v[i], True))
item = d.select("Wybor hostingu", valTab)
if item >= 0: hostUrl = v[item]
else: d.ok ('Brak linkow','Przykro nam, ale nie znalezlismy zadnego linku do video.', 'Sproboj ponownie za jakis czas')
return hostUrl
def getHostName(self, url, nameOnly = False):
hostName = ''
match = re.search('http[s]?://(.+?)/',url)
if match:
hostName = match.group(1)
if (nameOnly):
n = hostName.split('.')
hostName = n[-2]
return hostName
def getVideoLink(self, url, referer=''):
nUrl=''
host = self.getHostName(url)
log.info("PAGEPARSER video hosted by: " + host)
if host == 'livemecz.com':
nUrl = self.livemecz(url)
print "Self",nUrl
if host == 'www.drhtv.com.pl':
nUrl = self.drhtv(url)
elif host == 'www.realtv.com.pl':
nUrl = self.realtv(url)
elif host == 'www.transmisje.info':
nUrl = self.transmisjeinfo(url)
elif host == '79.96.137.217' or host == 'http://178.216.200.26':
nUrl = self.azap(url)
elif host == 'bbpolska.webd.pl':
nUrl = self.bbpolska(url)
elif host == 'fotosend.pl':
nUrl = self.azap(url)
elif host == 'typertv.com' or host == 'www.typertv.com.pl':
nUrl = self.typertv(url)
elif host == 'streamon.pl':
nUrl = self.streamon(url)
elif host == 'goodcast.tv':
nUrl = self.goodcasttv(url)
elif host == 'mecz.tv':
nUrl = self.mecztv(url)
elif host == 'www.fupptv.pl':
nUrl = self.fupptvpl(url)
elif host == 'team-cast.pl':
nUrl = self.teamcastpl(url)
elif host == 'www.yousat.tv':
nUrl = self.yousattv(url)
elif host == 'zobacztv.beep.pl':
nUrl = self.zobaczxyz(url)
elif host == 'alltube.tv':
nUrl = self.alltubetv(url,referer='')
elif host == 'zobaczto.tv':
nUrl = self.zobacztotv(url, referer='')
elif host == 'zalukaj.tv':
nUrl = self.zalukajtv(url, referer='')
elif host == 'zalukaj.com':
nUrl = self.zalukajtv(url, referer='')
elif host == 'www.efilmy.tv':
nUrl = self.efilmytv(url, referer='')
elif host == 'www.filmydokumentalne.eu':
nUrl = self.filmydokumentalneeu(url, referer='')
elif host == 'www.tvseriesonline.pl':
nUrl = self.tvseriesonline(url, referer='')
elif 'looknij.tv' in host:
nUrl = self.looknijtv(url, referer='')
elif 'ustream.tv' in host:
nUrl = self.ustream(url, referer='')
elif 'telewizjoner.pl' in host:
nUrl = self.nettvpw(url, referer='')
elif 'screen-tv.pl' in host:
nUrl = self.screentv(url, referer='')
elif nUrl == '':
print "Jedziemy na ELSE - "+ url+ "Host" + host
nUrl = self.pageanalyze(url,host)
print ("Link:",nUrl)
return nUrl
def efilmytv(self,url,referer):
COOKIEFILE = ptv.getAddonInfo('path') + os.path.sep + "cookies" + os.path.sep + "efilmytv.cookie"
IMAGEFILE = ptv.getAddonInfo('path') + os.path.sep + "cookies" + os.path.sep + "efilmytv.jpg"
linkVideo=''
query_data = { 'url': url, 'use_host': False, 'use_cookie': True, 'cookiefile': COOKIEFILE, 'load_cookie': True, 'save_cookie': True, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
myfile1 = re.compile('<div id="(.*?)" alt="n" class="embedbg"><img src="(.*?)"/></div><div class="versionholder">').findall(link)
print("m",myfile1)
if len(myfile1)>0:
print("url", 'http://www.efilmy.tv/seriale.php?cmd=show_player&id=' + myfile1[0][0] )
HEADER = {'Referer' : 'http://www.efilmy.tv/seriale.php?cmd=show_player&id=' + myfile1[0][0], 'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:43.0) Gecko/20100101 Firefox/43.0' }
query_data = { 'url': 'http://www.efilmy.tv/seriale.php?cmd=show_player&id=' + myfile1[0][0], 'use_host': False, 'use_header': True, 'header': HEADER,'use_cookie': True, 'cookiefile': COOKIEFILE, 'load_cookie': True, 'save_cookie': False, 'use_post': False, 'return_data': True }
link2 = self.cm.getURLRequestData(query_data)
print("link2",link2)
if '<p><strong>Zabezpieczenie przeciwko robotom</strong></p>' in link2:
print("link",link2)
mymatch=re.compile('<input type="hidden" name="id" value=(\d+) />\r\n<input type="hidden" name="mode" value=(\w+) />').findall(link2)
print(("mymatch",mymatch))
query_data = { 'url': 'http://www.efilmy.tv//mirrory.php?cmd=generate_captcha&time=' +str(random.randint(1, 1000)), 'use_host': False, 'use_header': True, 'header': HEADER,'use_cookie': True, 'cookiefile': COOKIEFILE, 'load_cookie': True, 'save_cookie': False, 'use_post': False, 'return_data': True }
link20 = self.cm.getURLRequestData(query_data)
with open(IMAGEFILE, 'wb') as f:
f.write(link20)
img = xbmcgui.ControlImage(450, 0, 400, 130, IMAGEFILE)
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
kb = xbmc.Keyboard('', 'Type the letters in the image', False)
kb.doModal()
if (kb.isConfirmed()):
solution = kb.getText()
if solution == '':
raise Exception('You must enter text in the image to access video')
else:
dialog = xbmcgui.Dialog()
dialog.ok(" Problem"," Nie wprowadzono kodu Captcha")
return ''
xbmc.sleep(2 * 1000)
query_data = { 'url': 'http://www.efilmy.tv//mirrory.php?cmd=check_captcha', 'use_host': False, 'use_header': True, 'header': HEADER,'use_cookie': True, 'cookiefile': COOKIEFILE, 'load_cookie': True, 'save_cookie': True, 'use_post': True, 'return_data': True }
postdata = {'captcha':solution,"id":str(mymatch[0][0]),"mode":str(mymatch[0][1])}
link2 = self.cm.getURLRequestData(query_data, postdata)
myfile2 = re.compile('Base64.decode\("(.*?)"\)').findall(link2)
print("m2",myfile2 )
if len(myfile2)>0:
import base64
decode = base64.b64decode(myfile2[0])
print("myfile",decode)
myfile3 = re.compile('<IFRAME SRC="([^"]+)".*?>').findall(decode)
myfile4 = re.compile('<iframe src="([^"]+)".*?>').findall(decode)
if len(myfile3)>0:
linkVideo = self.up.getVideoLink(myfile3[0])
if len(myfile4)>0:
query_data = { 'url': myfile4[0] , 'use_host': False, 'use_header': True, 'header': HEADER,'use_cookie': True, 'cookiefile': COOKIEFILE, 'load_cookie': True, 'save_cookie': False, 'use_post': False, 'return_data': True }
link20 = self.cm.getURLRequestData(query_data)
mymatch1=re.compile(' <a href="(.*?)" style="display:block;width:100%;height:320px" id="player">').findall(link20)
linkVideo = mymatch1[0]
return linkVideo
def zalukajtv(self,url,referer):
linkVideo=''
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
#myfile1 = re.compile('<a style="color:white;font-size:20px;font-weight:bold;" href="(.*?)" target="_blank">(.*?)</a><br />').findall(link)
myfile1 = re.compile('<iframe allowTransparency="true" src="(.*?)" width="490" height="370" scrolling="no" frameborder="0">').findall(link)
#
#log("m %s" % str(link))
log("m %s" % myfile1)
if len(myfile1)>0:
log.info("url %s " % myfile1[0][0] )
query_data = { 'url': 'http://zalukaj.tv' + myfile1[0], 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link2 = self.cm.getURLRequestData(query_data)
myfile2 = re.compile('<a href="(.*?)">').findall(link2)
log("m2 %s" % myfile2)
if len(myfile2)>0:
if len(myfile2)==1:
query_data = { 'url': 'http://zalukaj.tv' + myfile2[0], 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link3 = self.cm.getURLRequestData(query_data)
myfile3 = re.compile('<iframe src="([^"]+)".*?>').findall(link3)
log("myfile %s" % myfile3[0])
if len(myfile3)>0:
return self.up.getVideoLink(myfile3[0])
linkVideo = self.up.getVideoLink(myfile1[0][0])
return linkVideo
def filmydokumentalneeu(self, url, referer):
linkVideo=''
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
#match1=re.compile('<div id="news">\n \t<h1><span>(.*?)</span>(.*?)</h1>\n\t\t\t\n\n<div class="fb-social-plugin fb-follow" data-font="lucida grande" data-href="(.*?)" data-width="450"></div>\n\n<div class="fb-social-plugin fb-like" data-font="lucida grande" data-ref="above-post" data-href="(.*?)" data-width="450"></div>\n<p>(.*)</p>\n<p><iframe(.*)></iframe>').findall(link)
match1=re.compile('<p><iframe(.*)></iframe>').findall(link)
match10=re.compile('<embed(.*)>').findall(link)
if len(match1)>0:
match2=re.compile('src="(.*?)"').findall(match1[0])
if len(match2)>0:
linkVideo = self.up.getVideoLink(self.cm.html_special_chars(match2[0]))
elif len(match10)>0:
match2=re.compile('src="(.*?)"').findall(match10[0])
if len(match2)>0:
linkVideo = self.up.getVideoLink(self.cm.html_special_chars(match2[0]))
return linkVideo
def alltubetv(self, url, referer):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match1=re.compile('<td><img src="(.*?)" alt="(.*?)"> (.*?)</td>\n <td class="text-center">(.*?)</td>\n <td class="text-center"><a class="watch" data-urlhost="(.*?)" data-iframe="(.*?)" data-version="(.*?)" data-short="(.*?)" data-size="(.*?)" (.*?)>(.*?)</a>\n </td>').findall(link)
#print("Match1",match1)
tab = []
tab2 = []
if match1:
for i in range(len(match1)):
#print("Link", match1[i])
tab.append(match1[i][6] +' - ' + self.getHostName(match1[i][4]) )
tab2.append(match1[i][4])
d = xbmcgui.Dialog()
video_menu = d.select("Wybór strony video", tab)
if video_menu != "":
linkVideo = self.up.getVideoLink(tab2[video_menu],url)
return linkVideo
else:
return ''
def tvseriesonline(self, url, referer):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
#Sprawdamy linki napisy
linki_lektor = mrknow_utils.soup_get_links(link, "li", {"id": "lektor_pl"})
linki_pl = mrknow_utils.soup_get_links(link, "li", {"id": "napisy_pl"})
linki_en = mrknow_utils.soup_get_links(link, "li", {"id": "wersja_eng"})
linki_all = linki_lektor + linki_pl + linki_en
tab = []
tab2 = []
if len(linki_all)>0:
for i in range(len(linki_all)):
#print("Link", linki_all[i]['text'], linki_all[i]['id']['id'])
tab.append(linki_all[i]['id']['id'] + ' - ' + mrknow_utils.getHostName(linki_all[i]['text']) )
tab2.append(linki_all[i]['link'])
d = xbmcgui.Dialog()
video_menu = d.select("Wybór strony video", tab)
if video_menu != "":
linkVideo = self.up.getVideoLink(tab2[video_menu],url)
return linkVideo
else:
return ''
def zobacztotv(self, url, referer):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match1=re.compile('<div class="play-free" id="loading-(.*?)">Oglądaj na:<br />(.*?)</div>').findall(link)
tab = []
tab2 = []
if len(match1)>0:
for i in range(len(match1)):
match2 = re.compile("\$\('#(.*?)-"+match1[i][0]+"'\).load\('(.*?)'\);").findall(link)
if len(match2)>0:
tab.append('Strona - ' + match2[0][0] )
tab2.append(match2[0][1])
d = xbmcgui.Dialog()
video_menu = d.select("Wybór strony video", tab)
if video_menu != "":
query_data = {'url': tab2[video_menu], 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True}
link = self.cm.getURLRequestData(query_data)
match = re.search("""<iframe src="(.*?)" (.*?)></iframe>""", link)
if match:
linkVideo = self.up.getVideoLink(match.group(1),url)
return linkVideo
else:
return ''
else:
return ''
def screentv(self, url, referer):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
# <iframe width="720" height="490" frameborder="0" scrolling="no" src="http://www.typertv.com.pl/emded/canal.php" allowfullscreen>
match1=re.compile('<iframe name="stream" id="stream-frame-iframe" src="embed/(.*?)"scrolling="no"> </iframe>').findall(link)
if match1:
mylink = 'http://screen-tv.pl/embed/' + match1[0]
return self.pageanalyze(mylink, url)
def typertv(self, url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
# <iframe width="720" height="490" frameborder="0" scrolling="no" src="http://www.typertv.com.pl/emded/canal.php" allowfullscreen>
match1=re.compile('<iframe (.*?)src="(.*?)/emded/(.*?)" (.*?)></iframe>').findall(link)
if match1:
mylink = match1[0][1] + '/emded/' + match1[0][2]
return self.pageanalyze(mylink, url)
def nettvpw(self, url, referer=''):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match1=re.compile('<embed src="(.*?)" width="700" height="418" (.*?)></embed>').findall(link)
if len(match1)>0:
return self.getVideoLink(match1[0][0],match1[0][0])
else:
return self.pageanalyze(url, url)
def zobaczxyz(self, url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match1=re.compile('<iframe(.*?)width="700px" height="500px" src="(.*?)" allowfullscreen="" scrolling="no" frameborder="0"></iframe>').findall(link)
if len(match1)>0:
nUrl = self.pageanalyze(match1[0][1],match1[0][1])
return nUrl
else:
return ''
def looknijtv(self,url, referer):
import looknijtv
self.looklink = looknijtv.looknijtv()
link= self.looklink.getMovieLinkFromXML(url)
return link
def ustream(self,url, referer):
video_id = '0'
query = urlparse.urlparse(url)
channel = query.path
p = urlparse.parse_qs(query.query)
params = query.path.split("/")
if query.path[:16] == '/swf/live/viewer':
video_id = p['cid'][0]
if query.path[:9] == '/channel/':
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match1=re.compile('<meta name="ustream:channel_id" content="(.*?)"').findall(link)
video_id=match1[0]
query_data = { 'url': 'https://api.ustream.tv/channels/'+video_id+'.json', 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
data = json.loads(link)
if video_id != '0':
if data['channel']['status'] == u'live' and video_id != '0':
nUrl = data['channel']['stream']['hls']
return nUrl
else:
return ''
else:
return ''
def yousattv(self,url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match1=re.compile('<a href="(.*?)"(.*?)><span class="belka1a">(.*?)</span></a>').findall(link)
if len(match1[0][0])>0:
nUrl = self.getVideoLink(match1[0][0])
return nUrl
else:
return ''
def fupptvpl(self,url):
nUrl = self.up.getVideoLink(url, url)
return nUrl
def teamcastpl(self,url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match1=re.compile('<iframe(.*?)src="(.*?)"(.*?)></iframe>').findall(link)
if len(match1)>0:
nUrl = self.pageanalyze(match1[0][1],url)
return nUrl
else:
nUrl = self.pageanalyze(url,url)
return nUrl
def mecztv(self,url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match1=re.compile('<iframe frameborder="0" height="480" marginheight="0px" marginwidth="0px" name="livemecz.com" scrolling="no" src="(.*?)" width="640"></iframe>').findall(link)
if len(match1[1])>0:
query_data = { 'url': match1[0], 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match2=re.compile('<iframe marginheight="0" marginwidth="0" name="mecz.tv" src="(.*?)" frameborder="0" height="480" scrolling="no" width="640"></iframe>').findall(link)
if len(match2[0])>0:
query_data = { 'url': match2[0], 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match3=re.compile('<iframe(.*?)src="(.*?)"(.*?)>').findall(link)
if len(match3)>0:
nUrl = self.pageanalyze(match3[0][1],url)
else:
nUrl = self.pageanalyze(match2[0],url)
#nUrl = self.pageanalyze('http://goodcast.tv/' + match1[0][0], 'http://goodcast.tv/' + match1[0][0])
#return nUrl
return False
def goodcasttv(self,url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match1=re.compile('<iframe frameborder="0" width="630" height="360" margin="0px" name="goodcast.tv" scrolling="no" src="(.*?)"></iframe>').findall(link)
query_data = { 'url': match1[0], 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match2=re.compile('<iframe width="630px" height="350px" scrolling="no" frameborder="0" src="(.*?)"></iframe>').findall(link)
match3=re.compile("file: '(.*?)',").findall(link)
if len(match2)>0:
nUrl = self.up.getVideoLink(match2[0], url)
return nUrl
if len(match3)>0:
nUrl = self.up.getVideoLink(match1[0], url)
return nUrl
def streamon(self,url):
self.COOKIEFILE = ptv.getAddonInfo('path') + os.path.sep + "cookies" + os.path.sep + "streamon.cookie"
nUrl = self.pageanalyze(url,url)
return nUrl
def azap(self,url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match1=re.compile('<meta http-equiv="Refresh" content="(.*?); url=(.*?)" />').findall(link)
if len(match1)>0:
url = match1[0][1]
nUrl = self.up.getVideoLink(url)
return nUrl
else:
return self.pageanalyze(match1[0])
def bbpolska(self,url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match=re.compile('<div id="player">(.*?)</div>').findall(link)
if len(match)>0:
match1=re.compile('src="(.*?)"').findall(match[0])
return self.pageanalyze(match1[0],match1[0])
else:
return False
match=re.compile('<iframe width="(.*?)" height="(.*?)" src="(.*?)" scrolling="no" frameborder="0" style="border: 0px none transparent;">').findall(link)
return self.pageanalyze('http://www.transmisje.info'+match[0][2],'http://www.transmisje.info')
def transmisjeinfo(self,url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match=re.compile('<iframe width="(.*?)" height="(.*?)" src="(.*?)" scrolling="no" frameborder="0" style="border: 0px none transparent;">').findall(link)
return self.pageanalyze('http://www.transmisje.info'+match[0][2],'http://www.transmisje.info')
def realtv(self,url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match=re.compile('<iframe frameborder="0" height="420" marginheight="0px" marginwidth="0px" name="RealTV.com.pl" scrolling="no" src="(.*?)" width="650">').findall(link)
return self.pageanalyze(match[0],'http://www.realtv.com.pl')
def livemecz(self,url):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match=re.compile('<iframe frameborder="0" height="480" marginheight="0px" marginwidth="0px" name="livemecz.com" scrolling="no" src="(.+?)" width="640"></iframe>').findall(link)
query_data = { 'url': match[0], 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match=re.compile('<iframe marginheight="0" marginwidth="0" name="livemecz.com" src="(.*?)" frameborder="0" height="480" scrolling="no" width="640">').findall(link)
videolink = self.pageanalyze(match[0],'http://livemecz.com/')
return videolink
def drhtv(self,url):
self.COOKIEFILE = ptv.getAddonInfo('path') + os.path.sep + "cookies" + os.path.sep + "streamon.cookie"
return self.pageanalyze(url,url,'','Accept-Encoding: gzip, deflate')
def pageanalyze(self,url,referer='',cookie='',headers=''):
print ('DANE',url,referer,cookie,headers)
if cookie != '':
query_data = { 'url': url, 'use_host': False, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': cookie, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
elif headers != '':
query_data = { 'url': url, 'use_host': True, 'host': headers, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
elif referer != '':
print "Refe"
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True, 'header' : {'Referer': referer, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0'}}
link = self.cm.getURLRequestData(query_data)
else:
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True}
link = self.cm.getURLRequestData(query_data)
match=re.compile('<script type="text/javascript"> channel="(.*?)"; width="(.*?)"; height="(.*?)";</script><script type="text/javascript" src="http://yukons.net/share.js"></script>').findall(link)
match1000=re.compile('<script type="text/javascript"> channel="(.*?)"; width="(.*?)"; height="(.*?)";</script>\n<script type="text/javascript" src="http://yukons.net/share.js"></script>').findall(link)
match1=re.compile("<script type='text/javascript'>fid='(.*?)'; v_width=(.*?); v_height=(.*?);</script><script type='text/javascript' src='http://www.reyhq.com/player.js'></script>").findall(link)
match2=re.compile("<script type='text/javascript' src='http://www.sawlive.tv/embed/(.*?)'>").findall(link)
match3=re.compile("<script type='text/javascript' src='http://sawlive.tv/embed/(.*?)'>").findall(link)
match4=re.compile('<script type="text/javascript" src="http://www.ilive.to/embed/(.*?)">').findall(link)
match5=re.compile("<script type='text/javascript'> channel='(.*?)'; user='(.*?)'; width='640'; height='400';</script><script type='text/javascript' src='http://jimey.tv/player/jimeytv_embed.js'>").findall(link)
match6=re.compile("<script type='text/javascript'> width=(.*?), height=(.*?), channel='(.*?)', e='(.*?)';</script><script type='text/javascript' src='http://www.mips.tv/content/scripts/mipsEmbed.js'>").findall(link)
match7=re.compile('<script type="text/javascript">fid="(.*?)"; v_width=(.*?); v_height=(.*?);</script><script type="text/javascript" src="http://www.ukcast.tv/embed.js"></script>').findall(link)
match8=re.compile('<script type="text/javascript"> channel="(.*?)"; vwidth="(.*?)"; vheight="(.*?)";</script><script type="text/javascript" src="http://castamp.com/embed.js"></script>').findall(link)
match9=re.compile("<script type='text/javascript'>id='(.*?)'; width='(.*?)'; height='(.*?)';</script><script type='text/javascript' src='http://liveview365.tv/js/player.js'></script>").findall(link)
match10=re.compile('<script type="text/javascript"> channel="(.*?)"; width="(.*?)"; height="(.*?)";</script>\r\n<script type="text/javascript" src="http://yukons.net/share.js"></script>').findall(link)
match11=re.compile('<iframe width="600px" height="400px" scrolling="no" frameborder="0" src="http://www.putlive.in/(.*?)"></iframe>').findall(link)
match12=re.compile('<iframe frameborder=0 marginheight=0 marginwidth=0 scrolling=\'no\'src="(.*?)" width="(.*?)" height="(.*?)">').findall(link)
match13=re.compile("<script type='text/javascript'> width=640, height=480, channel='(.*?)', g='(.*?)';</script><script type='text/javascript' src='http://www.ucaster.eu/static/scripts/ucaster.js'></script>").findall(link)
match14=re.compile("<script type='text/javascript'>fid='(.*?)'; v_width=(.*?); v_height=(.*?);</script><script type='text/javascript' src='http://www.flashwiz.tv/player.js'></script>").findall(link)
match15=re.compile('<script type="text/javascript"> fid="(.*?)"; v_width=(.*?); v_height=(.*?);</script><script type="text/javascript" src="http://www.yycast.com/javascript/embedPlayer.js"></script>').findall(link)
match16=re.compile("<script type='text/javascript'> width=(.*?), height=(.*?), channel='(.*?)', g='(.*?)';</script><script type='text/javascript' src='http://www.liveflash.tv/resources/scripts/liveFlashEmbed.js'></script>").findall(link)
match17=re.compile('<script type="text/javascript">ca="(.*?)";width="(.*?)"; height="(.*?)";</script><script type="text/javascript" src="https://ovcast.com/js/embed.js"></script>').findall(link)
match18=re.compile("<script type=\'text/javascript\'>id=\'(.*?)\'; width=\'(.*?)\'; height=\'(.*?)\';</script><script type=\'text/javascript\' src=\'http://stream4.tv/player.js\'>").findall(link)
match19=re.compile("<script type='text/javascript'>id='(.*?)'; width='(.*?)'; height='(.*?)';</script><script type='text/javascript' src='http://goodcast.org/player.js'></script>").findall(link)
match20=re.compile('<script type="text/javascript" src="http://(.*?)jjcast.com/(.*?)">').findall(link)
match21=re.compile('<script type="text/javascript" language="JavaScript" src="http://hqstream.tv/pl?(.*?)"></script>').findall(link)
match22=re.compile("<script type='text/javascript'>(.*?)</script><script type='text/javascript' src='http://cdn.tiv.pw/stream(.*?).js'></script>").findall(link)
match23=re.compile('<script type="text/javascript" src="http://7cast.net/embed/(.*?)/(.*?)/(.*?)"></script>').findall(link)
match24=re.compile('<script type=\'text/javascript\'> file=\'(.*?)\'(.*?)</script>\n<script type=\'text/javascript\' src=\'http://abcast.biz/embedPlayer.js\'>').findall(link)
match25=re.compile('<script type=\'text/javascript\'> file=\'(.*?)\'; width=\'(.*?)\'; height=\'(.*?)\';</script><script type=\'text/javascript\' src=\'http://flexstream.net/embedPlayer.js\'></script>').findall(link)
match26=re.compile('<script type=\'text/javascript\'> file=\'(.*?)\'(.*?)</script><script type=\'text/javascript\' src=\'http://abcast.biz/embedPlayer.js\'></script>').findall(link)
match27=re.compile('<script type=\'text/javascript\'> file=\'(.*?)\'(.*?)</script>\n<script type=\'text/javascript\' src=\'http://www.freelivestream.tv/embedPlayerScript.js\'></script>').findall(link)
match28=re.compile('<script type=\'text/javascript\'>id=\'(.*?)\'(.*?)</script><script type=\'text/javascript\' src=\'http://up4free.com/player.js\'></script>').findall(link)
match29=re.compile('<script type=\'text/javascript\'>id=\'(.*?)\'(.*?)</script><script type=\'text/javascript\' src=\'http://goodcast.me/player.js\'></script>').findall(link)
match30=re.compile('<script type=\'text/javascript\' src=\'http://www.shidurlive.com/embed/(.*?)\'></script>').findall(link)
match31=re.compile('<script type="text/javascript"> id="(.*?)"; ew="(.*?)"; eh="(.*?)";</script><script type="text/javascript" src="http://www.castalba.tv/js/embed.js"></script>').findall(link)
match32=re.compile('<script type=\'text/javascript\'> file=\'(.*?)\'(.*?)</script><script type=\'text/javascript\' src=\'http://abcast.net/abc.js\'></script>').findall(link)
match33=re.compile('<script type=\'text/javascript\'>id=\'(.*?)\'(.*?)</script><script type=\'text/javascript\' src=\'http://player.goodcast.co/goodcast/player.js\'></script>').findall(link)
match34=re.compile('<script type="text/javascript"> fid="(.*?)"; v_width=(.*?); v_height=(.*?);</script><script type="text/javascript" src="http://static.castto.me/js/embedplayer.js">').findall(link)
match35=re.compile('<script type="text/javascript" src="http://www.byetv.org/(.*?)"></script>').findall(link)
match36=re.compile('<script type="text/javascript" src="http://www.hdcast.me/(.*?)"></script>').findall(link)
match37=re.compile("<script type='text/javascript'> file='(.*?)'(.*?)</script><script type='text/javascript' src='http://pxstream.tv/embedPlayerScript.js'></script>").findall(link)
match38=re.compile("<script type='text/javascript'>id='(.*?)';(.*?)</script><script type='text/javascript' src='http://deltatv.pw/player.js'></script>").findall(link)
match39=re.compile("<script type='text/javascript'> id='(.*?)';(.*?)</script><script type='text/javascript' src='http://ultracast.me/player.js'></script>").findall(link)
match40=re.compile("<script type='text/javascript'>(.*?)</script><script type='text/javascript' src='http://shidurlive.com/embed/(.*?)'></script>").findall(link)
match41=re.compile("<script type='text/javascript'>id='(.*?)'(.*?)</script><script type='text/javascript' src='http://biggestplayer.me/player.js'></script>").findall(link)
match42=re.compile("<script type='text/javascript'> file='(.*?)';(.*?)</script><script type='text/javascript' src='http://pxstream.tv/embedRouter.js'></script>").findall(link)
match43=re.compile("<script type='text/javascript'>id='(.*?)';(.*?)</script><script type='text/javascript' src='http://js.p2pcast.tv/p2pcast/player.js'></script>").findall(link)
match44=re.compile("<script type='text/javascript'>(.*?)channel='(.*?)',(.*?)</script><script type='text/javascript' src='http://tutelehd.com/embedPlayer.js'></script>").findall(link)
#
match1001=re.compile("file : '(.*?)'").findall(link)
if len(match) > 0:
return self.up.getVideoLink('http://yukons.net/'+match[0][0],referer)
elif len(match1000) > 0:
return self.up.getVideoLink('http://yukons.net/'+match1000[0][0],referer)
elif len(match1) > 0:
return self.up.getVideoLink('http://www.reyhq.com/'+match1[0][0])
elif len(match2) > 0:
print ("Match2",match2)
return self.up.getVideoLink('http://www.sawlive.tv/embed/'+match2[0],url)
elif len(match3) > 0:
return self.up.getVideoLink('http://www.sawlive.tv/embed/'+match3[0],url)
elif len(match4) > 0:
print ("Match4",match4)
return self.up.getVideoLink('http://www.ilive.to/embed/'+match4[0],referer)
elif len(match6) > 0:
print ("Match6",match6[0])
return self.up.getVideoLink('http://mips.tv/embedplayer/'+match6[0][2]+'/'+match6[0][3]+'/'+match6[0][0]+'/'+match6[0][1])
elif len(match7) > 0:
print ("Match7",match7)
return self.up.getVideoLink('http://www.ukcast.tv/embed.php?u='+match7[0][0]+'&vw='+match7[0][1]+'&vh='+match7[0][2])
elif len(match8) > 0:
print ("Match8",match8)
query_data = { 'url': 'http://castamp.com/embed.js', 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True}
link = self.cm.getURLRequestData(query_data)
print("Link",link)
chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXTZabcdefghiklmnopqrstuvwxyz";
string_length = 8;
randomstring = '';
for i in range(0, string_length):
rnum = int(math.floor(random.randint(0, len(chars))))
print("AAA",rnum, chars[1])
randomstring = randomstring + chars[rnum]
return self.up.getVideoLink('http://castamp.com/embed.php?c='+match8[0][0]+'&tk='+randomstring+'&vwidth=710&vheight=460',referer)
elif len(match9) > 0:
print ("Match9",match9)
return self.up.getVideoLink('http://liveview365.tv/embedded?id='+match9[0][0],referer)
elif len(match10) > 0:
print ("Match10",match10)
return self.up.getVideoLink('http://yukons.net/'+match10[0][0])
elif len(match11) > 0:
print ("Match11",'http://www.putlive.in/'+match11[0])
return self.up.getVideoLink('http://www.putlive.in/'+match11[0],referer)
elif len(match12) > 0:
print ("Match12",match12)
return self.up.getVideoLink(match12[0][0],referer)
elif len(match13) > 0:
print ("Match13",match13)
return self.up.getVideoLink('http://www.ucaster.eu/embedded/'+match13[0][0]+'/'+match13[0][1]+'/400/480',referer)
elif len(match14) > 0:
print ("Match14",match14)
return self.up.getVideoLink('http://www.flashwiz.tv/embed.php?live='+match14[0][0]+'&vw='+match14[0][1]+'&vh='+match14[0][2],referer)
elif len(match15) > 0:
print ("Match15",match15)
return self.up.getVideoLink('http://www.yycast.com/embed.php?fileid='+match15[0][0]+'&vw='+match15[0][1]+'&vh='+match15[0][2],referer)
elif len(match16) > 0:
print ("Match16",match16)
return self.up.getVideoLink('http://www.liveflash.tv/embedplayer/'+match16[0][2]+'/'+match16[0][3]+'/'+match16[0][0]+'/'+match16[0][1],referer)
elif len(match17) > 0:
print ("Match17",match17)
return self.up.getVideoLink('https://ovcast.com/gen.php?ch='+match17[0][0]+'&width='+match17[0][1]+'&height='+match17[0][2],referer)
elif len(match18) > 0:
print ("Match18",match18)
return self.up.getVideoLink('http://stream4.tv/player.php?id='+match18[0][0]+'&width='+match18[0][1]+'&height='+match18[0][2],referer)
elif len(match19) > 0:
print ("Match19",match19)
return self.up.getVideoLink('http://goodcast.org/stream.php?id='+match19[0][0]+'&width='+match19[0][1]+'&height='+match19[0][2],referer)
elif len(match20) > 0:
print ("Match20",match20)
return self.up.getVideoLink('http://jjcast.com/'+match20[0][1].replace('embed','player'),referer)
elif len(match21) > 0:
print ("Match21",match21)
return self.up.getVideoLink('http://hqstream.tv/player.php'+match21[0],referer)
elif len(match22) > 0:
print ("Match22",match22)
return self.up.getVideoLink('http://cdn.tiv.pw/stream'++match22[0] +'.html',referer)
elif len(match23) > 0:
print ("Match23",match23)
return self.up.getVideoLink('http://7cast.net/player/'+match23[0][0]+'/650/450',referer)
elif len(match24) > 0:
print ("Match24",match24)
return self.up.getVideoLink('http://abcast.biz/embed.php?file='+match24[0][0]+'&width=640&height=400',referer)
elif len(match25) > 0:
print ("Match25",match25)
return self.up.getVideoLink('http://flexstream.net/embed.php?file='+match25[0][0]+'&width=640&height=400',referer)
elif len(match26) > 0:
print ("Match26",match26)
return self.up.getVideoLink('http://abcast.biz/embed.php?file='+match26[0][0]+'&width=640&height=400',referer)
elif len(match27) > 0:
print ("Match27",match27)
return self.up.getVideoLink('http://www.freelivestream.tv/embedPlayer.php?file='+match27[0][0]+'&width=600&height=400',referer)
elif len(match28) > 0:
print ("Match28",match28, url)
#http://embed.up4free.com/stream.php?id=nsajfnidg&width=700&height=450&stretching=
url2 = 'http://embed.up4free.com/stream.php?id='+match28[0][0]+'&width=700&height=450&stretching='
mylink10 = mrknow_Pageparser()
mylink3 = mylink10.pageanalyze(url2,url)
print("MyLink3",mylink3,referer)
print("MyLink3",url2, mylink3,match28[0][0],link)
return mylink3
elif len(match29) > 0:
print ("Match29",match29)
return self.up.getVideoLink('http://goodcast.me/stream.php?id='+match29[0][0]+'&width=640&height=480&stretching=',referer)
elif len(match30) > 0:
print ("Match30",match30)
return self.up.getVideoLink('http://www.shidurlive.com/embed/'+match30[0],referer)
elif len(match31) > 0:
print ("Match31",match31)
return self.up.getVideoLink('http://castalba.tv/embed.php?cid='+match31[0][0]+'&wh=640&ht=400&r='+self.getHostName(referer),referer)
elif len(match32) > 0:
print ("Match32",match32)
return self.up.getVideoLink('http://abcast.net/abc.php?file='+match32[0][0]+'&width=640&height=400',referer)
elif len(match33) > 0:
print ("Match33",match33, referer)
host = self.getHostName(url)
if host == 'embed.up4free.com':
return self.up.getVideoLink('http://goodcast.co/stream.php?id='+match33[0][0]+'&width=640&height=480&stretching=',url)
else:
return self.up.getVideoLink('http://goodcast.co/stream.php?id='+match33[0][0]+'&width=640&height=480&stretching=',referer)
elif len(match34) > 0:
print ("Match34",match34, referer)
mylink = self.up.getVideoLink('http://static.castto.me/embed.php?channel='+match34[0][0]+'&vw=710&vh=460', referer)
print("Match34", mylink)
return mylink
elif len(match35) > 0:
print ("Match35",match35, referer)
link = match35[0].replace('channel.php?file=','http://www.byetv.org/embed.php?a=')
mylink = self.up.getVideoLink(link, referer)
return mylink
elif len(match36)>0:
print ("Match36",match36, referer)
mylink = self.up.getVideoLink('http://hdcast.me/'+match36[0].replace('embed.php?','embedplayer.php?'), referer)
return mylink
elif len(match37)>0:
print ("Match37",match37, referer, match37[0][0])
return self.up.getVideoLink('http://pxstream.tv/embed.php?file='+match37[0][0]+'&width=710&height=460',referer)
elif len(match38)>0:
print ("Match38",match38, referer)
return self.up.getVideoLink('http://deltatv.pw/stream.php?id='+match38[0][0]+'&width=710&height=460',referer)
elif len(match39)>0:
print ("Match39",match39, referer)
return self.up.getVideoLink('http://www.ultracast.me/player.php?id='+match39[0][0]+'&width=710&height=460',referer)
elif len(match40) > 0:
print ("Match40",match40)
return self.up.getVideoLink('http://www.shidurlive.com/embed/'+match40[0][1],referer)
elif len(match41) > 0:
print ("Match41",match41)
return self.up.getVideoLink('http://biggestplayer.me/stream.php?id='+match41[0][0]+'&width=690&height=440',referer)
elif len(match42) > 0:
print ("Match42",match42)
return self.up.getVideoLink('http://pxstream.tv/embed.php?file='+match42[0][0]+'&width=710&height=460',referer)
elif len(match43) > 0:
print ("Match43",match43)
return self.up.getVideoLink('http://p2pcast.tv/stream.php?id='+match43[0][0]+'&live=0&p2p=0&stretching=uniform',referer)
elif len(match44) > 0:
print ("Match44",match44)
return self.up.getVideoLink('http://tutelehd.com/embed/embed.php?channel='+match44[0][1]+'&w=690&h=440',referer)
elif len(match1001) > 0:
print ("match1001",match1001)
if len(match1001)>0:
return match1001[0] + " live=true timeout=30"
else:
return ''
else:
print ("jEDZIEMY NA ELSE",link)
return self.up.getVideoLink(url,referer)
| 56.837584
| 390
| 0.617868
|
175cbea9180c9b976613acc8290cc47149763254
| 1,134
|
py
|
Python
|
diskcollections/handlers.py
|
ectomancer/python-disk-collections
|
eff4e54c56cef3120a0ffda231b962880f279bda
|
[
"MIT"
] | 1
|
2018-12-10T15:08:33.000Z
|
2018-12-10T15:08:33.000Z
|
diskcollections/handlers.py
|
ectomancer/python-disk-collections
|
eff4e54c56cef3120a0ffda231b962880f279bda
|
[
"MIT"
] | null | null | null |
diskcollections/handlers.py
|
ectomancer/python-disk-collections
|
eff4e54c56cef3120a0ffda231b962880f279bda
|
[
"MIT"
] | null | null | null |
import json
import pickle
import zlib
from diskcollections.interfaces import IHandler
class PickleHandler(IHandler):
dumps = staticmethod(pickle.dumps)
loads = staticmethod(pickle.loads)
class PickleZLibHandler(IHandler):
@staticmethod
def dumps(
obj,
protocol=pickle.HIGHEST_PROTOCOL,
level=zlib.Z_DEFAULT_COMPRESSION
):
pickled = pickle.dumps(obj, protocol=protocol)
compressed = zlib.compress(pickled, level)
return compressed
@staticmethod
def loads(compressed):
pickled = zlib.decompress(compressed)
obj = pickle.loads(pickled)
return obj
class JsonHandler(IHandler):
dumps = staticmethod(json.dumps)
loads = staticmethod(json.loads)
class JsonZLibHandler(IHandler):
@staticmethod
def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):
jsoned = json.dumps(obj).encode()
compressed = zlib.compress(jsoned, level)
return compressed
@staticmethod
def loads(compressed):
jsoned = zlib.decompress(compressed).decode()
obj = json.loads(jsoned)
return obj
| 22.68
| 54
| 0.679894
|
d8daedf3b853a8a809b6205c0aa04031f2913898
| 19,093
|
py
|
Python
|
tests/integration/cattletest/core/test_authorization.py
|
mbrukman/rancher-cattle
|
ac7caffb97346f601043458411391d2d00fd6129
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/cattletest/core/test_authorization.py
|
mbrukman/rancher-cattle
|
ac7caffb97346f601043458411391d2d00fd6129
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/cattletest/core/test_authorization.py
|
mbrukman/rancher-cattle
|
ac7caffb97346f601043458411391d2d00fd6129
|
[
"Apache-2.0"
] | null | null | null |
from common_fixtures import * # NOQA
def test_client_access(clients):
typesLen = {
'admin': 91,
'agent': 8,
'user': 69,
'agentRegister': 4,
'test': 140,
'readAdmin': 91,
'token': 2,
'superadmin': 141,
'service': 91,
'project': 69,
}
for tuple in clients.items():
assert typesLen[tuple[0]] == len(tuple[1].schema.types.items())
def test_instance_link_auth(admin_client, client):
auth_check(admin_client.schema, 'instanceLink', 'ru', {
'accountId': 'r',
'data': 'r',
'instanceId': 'r',
'linkName': 'r',
'ports': 'r',
'targetInstanceId': 'ru',
})
auth_check(client.schema, 'instanceLink', 'ru', {
'accountId': 'r',
'instanceId': 'r',
'linkName': 'r',
'targetInstanceId': 'ru',
})
def test_token_auth(token_client):
auth_check(token_client.schema, 'token', 'cr', {
'jwt': 'r',
'code': 'cr',
'user': 'r',
'orgs': 'r',
'clientId': 'r',
'security': 'r',
'teams': 'r',
'userType': 'r',
'accountId': 'r',
'defaultProject': 'r'
})
def test_github_auth(admin_client):
auth_check(admin_client.schema, 'githubconfig', 'cru', {
'enabled': 'cr',
'allowedOrganizations': 'cr',
'allowedUsers': 'cr',
'clientId': 'cr',
'clientSecret': 'cr',
'accessMode': 'cr'
})
def test_project_auth(admin_client, client):
auth_check(admin_client.schema, 'project', 'crud', {
'description': 'cru',
'kind': 'r',
'name': 'cru',
'uuid': 'cr',
'data': 'r',
'members': 'cr',
'projectId': 'r'
})
auth_check(client.schema, 'project', 'crud', {
'description': 'cru',
'kind': 'r',
'name': 'cru',
'uuid': 'r',
'members': 'cr'
})
def test_project_member_auth(admin_client, client):
auth_check(admin_client.schema, 'projectMember', 'r', {
"role": "r",
"externalId": "r",
"externalIdType": "r",
"projectId": "r",
"data": 'r'
})
auth_check(client.schema, 'projectMember', 'r', {
"role": "r",
"externalId": "r",
"externalIdType": "r",
"projectId": "r"
})
def test_host_auth(admin_client, client):
auth_check(admin_client.schema, 'host', 'rud', {
'accountId': 'r',
'apiProxy': 'ru',
'agentId': 'r',
'computeTotal': 'r',
'data': 'r',
'physicalHostId': 'r',
'info': 'r',
})
auth_check(client.schema, 'host', 'rud', {
'accountId': 'r',
'computeTotal': 'r',
'physicalHostId': 'r',
'info': 'r',
})
def test_ip_address_auth(admin_client, client):
auth_check(admin_client.schema, 'ipAddress', 'r', {
'accountId': 'r',
'networkId': 'r',
'address': 'r',
'data': 'r',
})
auth_check(client.schema, 'ipAddress', 'r', {
'accountId': 'r',
'address': 'r',
'networkId': 'r',
})
def test_task_instance_auth(admin_client, client):
auth_check(admin_client.schema, 'taskInstance', 'r', {
'endTime': 'r',
'exception': 'r',
'serverId': 'r',
'startTime': 'r',
'taskId': 'r',
})
def test_volume_auth(admin_client, client):
auth_check(admin_client.schema, 'volume', 'rd', {
'accountId': 'r',
'created': 'r',
'data': 'r',
'description': 'r',
'id': 'r',
'imageId': 'r',
'instanceId': 'r',
'kind': 'r',
'name': 'r',
'removeTime': 'r',
'removed': 'r',
'state': 'r',
'uri': 'r',
'uuid': 'r',
'transitioning': 'r',
'transitioningMessage': 'r',
'transitioningProgress': 'r',
'isHostPath': 'r'
})
auth_check(client.schema, 'volume', 'rd', {
'accountId': 'r',
'created': 'r',
'description': 'r',
'id': 'r',
'imageId': 'r',
'instanceId': 'r',
'kind': 'r',
'name': 'r',
'removed': 'r',
'state': 'r',
'uri': 'r',
'uuid': 'r',
'transitioning': 'r',
'transitioningMessage': 'r',
'transitioningProgress': 'r',
'isHostPath': 'r'
})
def test_container_auth(admin_client, client):
auth_check(admin_client.schema, 'container', 'crud', {
'accountId': 'r',
'agentId': 'r',
'allocationState': 'r',
'capAdd': 'cr',
'capDrop': 'cr',
'command': 'cr',
'count': 'cr',
'cpuSet': 'cr',
'cpuShares': 'cr',
'created': 'r',
'data': 'r',
'dataVolumes': 'cr',
'dataVolumesFrom': 'cr',
'description': 'cru',
'devices': 'cr',
'directory': 'cr',
'dns': 'cr',
'dnsSearch': 'cr',
'domainName': 'cr',
'entryPoint': 'cr',
'environment': 'cr',
'firstRunning': 'r',
'hostname': 'cr',
'id': 'r',
'imageUuid': 'cr',
'instanceLinks': 'cr',
'lxcConf': 'cr',
'memory': 'cr',
'memorySwap': 'cr',
'networkIds': 'cr',
'ports': 'cr',
'primaryIpAddress': 'r',
'privileged': 'cr',
'publishAllPorts': 'cr',
'removeTime': 'r',
'registryCredentialId': 'cr',
'requestedHostId': 'cr',
'restartPolicy': 'cr',
'startOnCreate': 'cr',
'stdinOpen': 'cr',
'token': 'r',
'tty': 'cr',
'user': 'cr',
'systemContainer': 'r',
'nativeContainer': 'r',
'externalId': 'r'
})
auth_check(client.schema, 'container', 'crud', {
'accountId': 'r',
'capAdd': 'cr',
'capDrop': 'cr',
'command': 'cr',
'count': 'cr',
'cpuSet': 'cr',
'cpuShares': 'cr',
'created': 'r',
'dataVolumes': 'cr',
'dataVolumesFrom': 'cr',
'description': 'cru',
'devices': 'cr',
'directory': 'cr',
'dns': 'cr',
'dnsSearch': 'cr',
'domainName': 'cr',
'entryPoint': 'cr',
'environment': 'cr',
'firstRunning': 'r',
'hostname': 'cr',
'id': 'r',
'imageUuid': 'cr',
'instanceLinks': 'cr',
'lxcConf': 'cr',
'memory': 'cr',
'memorySwap': 'cr',
'networkIds': 'cr',
'ports': 'cr',
'primaryIpAddress': 'r',
'privileged': 'cr',
'publishAllPorts': 'cr',
'registryCredentialId': 'cr',
'requestedHostId': 'cr',
'restartPolicy': 'cr',
'startOnCreate': 'cr',
'stdinOpen': 'cr',
'tty': 'cr',
'user': 'cr',
'systemContainer': 'r',
'nativeContainer': 'r',
'externalId': 'r',
})
def test_port_auth(admin_client, client):
auth_check(admin_client.schema, 'port', 'ru', {
'accountId': 'r',
'data': 'r',
'instanceId': 'r',
'privateIpAddressId': 'r',
'privatePort': 'r',
'protocol': 'r',
'publicIpAddressId': 'r',
'publicPort': 'ru',
})
auth_check(client.schema, 'port', 'ru', {
'accountId': 'r',
'instanceId': 'r',
'privateIpAddressId': 'r',
'privatePort': 'r',
'protocol': 'r',
'publicIpAddressId': 'r',
'publicPort': 'ru',
})
def test_mount_auth(admin_client, client):
auth_check(admin_client.schema, 'mount', 'r', {
'name': 'r',
'description': 'r',
'data': 'r',
'accountId': 'r',
'instanceId': 'r',
'volumeId': 'r',
'kind': 'r',
'uuid': 'r',
'removeTime': 'r',
'id': 'r',
'created': 'r',
'path': 'r',
'permissions': 'r',
'removed': 'r',
'state': 'r',
'transitioning': 'r',
'transitioningMessage': 'r',
'transitioningProgress': 'r'
})
auth_check(client.schema, 'mount', 'r', {
'accountId': 'r',
'name': 'r',
'description': 'r',
'instanceId': 'r',
'volumeId': 'r',
'kind': 'r',
'uuid': 'r',
'id': 'r',
'created': 'r',
'path': 'r',
'permissions': 'r',
'removed': 'r',
'state': 'r',
'transitioning': 'r',
'transitioningMessage': 'r',
'transitioningProgress': 'r'
})
def test_process_instance_auth(admin_client, client):
auth_check(admin_client.schema, 'processInstance', 'r', {
'endTime': 'r',
'exitReason': 'r',
'phase': 'r',
'priority': 'r',
'processName': 'r',
'resourceId': 'r',
'resourceType': 'r',
'result': 'r',
'runningProcessServerId': 'r',
'startProcessServerId': 'r',
'startTime': 'r',
'data': 'r',
})
def test_process_execution(admin_client, client):
auth_check(admin_client.schema, 'processExecution', 'r', {
'log': 'r',
'processInstanceId': 'r',
})
def test_process_definition(admin_client, client):
auth_check(admin_client.schema, 'processDefinition', 'r', {
'extensionBased': 'r',
'preProcessListeners': 'r',
'postProcessListeners': 'r',
'processHandlers': 'r',
'resourceType': 'r',
'stateTransitions': 'r',
})
def test_config_item(admin_client, client):
auth_check(admin_client.schema, 'configItem', 'r', {
'sourceVersion': 'r',
})
def test_config_item_status_auth(admin_client, client):
auth_check(admin_client.schema, 'configItemStatus', 'ru', {
'agentId': 'r',
'appliedUpdated': 'r',
'appliedVersion': 'ru',
'requestedUpdated': 'r',
'requestedVersion': 'r',
'sourceVersion': 'r',
})
def test_setting_auth(admin_client, client):
auth_check(admin_client.schema, 'setting', 'crud', {
'name': 'cr',
'value': 'cru',
})
def git(admin_client, client):
auth_check(admin_client.schema, 'schema', 'r', {
'collectionActions': 'r',
'collectionFields': 'r',
'collectionFilters': 'r',
'collectionMethods': 'r',
'includeableLinks': 'r',
'pluralName': 'r',
'resourceActions': 'r',
'resourceFields': 'r',
'resourceMethods': 'r',
})
auth_check(client.schema, 'schema', 'r', {
'collectionActions': 'r',
'collectionFields': 'r',
'collectionFilters': 'r',
'collectionMethods': 'r',
'includeableLinks': 'r',
'pluralName': 'r',
'resourceActions': 'r',
'resourceFields': 'r',
'resourceMethods': 'r',
})
def test_account_auth(admin_client, client):
auth_check(admin_client.schema, 'account', 'crud', {
'id': 'r',
'externalId': 'cru',
'externalIdType': 'cru',
'removeTime': 'r',
'data': 'r',
'kind': 'cru',
'uuid': 'cr',
'projectId': 'r'
})
auth_check(client.schema, 'account', 'r', {
})
def test_agent_auth(admin_client, client):
auth_check(admin_client.schema, 'agent', 'r', {
'managedConfig': 'r',
'uri': 'r',
'accountId': 'r',
'data': 'r',
})
def test_extension_point_auth(admin_client, client):
auth_check(admin_client.schema, 'extensionPoint', 'r', {
'excludeSetting': 'r',
'includeSetting': 'r',
'listSetting': 'r',
'implementations': 'r',
})
def test_api_key_auth(admin_client, client):
auth_check(admin_client.schema, 'apiKey', 'crud', {
'publicValue': 'cr',
'secretValue': 'cr',
'removeTime': 'r',
'data': 'r',
'accountId': 'cr',
})
auth_check(client.schema, 'apiKey', 'crud', {
'publicValue': 'r',
'accountId': 'r',
'secretValue': 'r',
})
def test_subscribe_auth(admin_client, client):
auth_check(admin_client.schema, 'subscribe', 'cr', {
'eventNames': 'cr',
'agentId': 'cr',
})
auth_check(client.schema, 'subscribe', 'cr', {
'eventNames': 'cr',
})
def test_registration_tokens_auth(admin_client, client, service_client):
auth_check(admin_client.schema, 'registrationToken', 'cr', {
'created': 'r',
'data': 'r',
'description': 'cr',
'removeTime': 'r',
'accountId': 'r',
})
auth_check(service_client.schema, 'registrationToken', 'cr', {
'created': 'r',
'data': 'r',
'description': 'cr',
'removeTime': 'r',
'accountId': 'cr',
})
auth_check(client.schema, 'registrationToken', 'cr', {
'accountId': 'r',
'created': 'r',
'description': 'cr',
'uuid': 'r',
})
def test_type_documentation_auth(admin_client, client):
auth_check(admin_client.schema, 'typeDocumentation', 'r', {
})
auth_check(client.schema, 'typeDocumentation', 'r', {
})
def test_stats_access_auth(admin_client, client):
auth_check(admin_client.schema, 'statsAccess', 'r', {
'token': 'r',
'url': 'r',
})
auth_check(client.schema, 'statsAccess', 'r', {
'token': 'r',
'url': 'r',
})
def test_account_resource_auth(admin_client, client):
resource_action_check(admin_client.schema, 'account', [
'update',
'activate',
'deactivate',
'restore',
'remove',
'purge',
'create'
])
def test_machine(admin_client, client, service_client):
auth_check(admin_client.schema, 'machine', 'crd', {
'driver': 'r',
'accountId': 'r',
'externalId': 'r',
'data': 'r',
'authCertificateAuthority': 'cr',
'authKey': 'cr',
'virtualboxConfig': 'cr',
'digitaloceanConfig': 'cr',
'amazonec2Config': 'cr',
})
auth_check(client.schema, 'machine', 'crd', {
'driver': 'r',
'accountId': 'r',
'externalId': 'r',
'authCertificateAuthority': 'cr',
'authKey': 'cr',
'virtualboxConfig': 'cr',
'digitaloceanConfig': 'cr',
'amazonec2Config': 'cr',
})
auth_check(service_client.schema, 'machine', 'crud', {
'driver': 'r',
'accountId': 'r',
'externalId': 'r',
'data': 'cru',
'authCertificateAuthority': 'cr',
'authKey': 'cr',
'extractedConfig': 'ru',
'virtualboxConfig': 'cr',
'digitaloceanConfig': 'cr',
'amazonec2Config': 'cr',
})
def test_physical_host(admin_client, client, service_client):
auth_check(admin_client.schema, 'physicalHost', 'r', {
'accountId': 'r',
'data': 'r',
})
auth_check(client.schema, 'physicalHost', 'r', {
'accountId': 'r',
})
def test_registry_credentials(admin_client, client):
auth_check(admin_client.schema, 'registryCredential', 'crud', {
'accountId': 'r',
'data': 'r',
'email': 'cru',
'publicValue': 'cru',
'secretValue': 'cru',
'registryId': 'cr',
})
auth_check(client.schema, 'registryCredential', 'crud', {
'accountId': 'r',
'email': 'cru',
'publicValue': 'cru',
'secretValue': 'cru',
'registryId': 'cr',
})
def test_registry(admin_client, client):
auth_check(admin_client.schema, 'registry', 'crud', {
'accountId': 'r',
'data': 'r',
'serverAddress': 'cr',
})
auth_check(client.schema, 'registry', 'crud', {
'accountId': 'r',
'serverAddress': 'cr',
})
def test_lb_config_listener_map(admin_client, client):
auth_check(admin_client.schema, 'loadBalancerConfigListenerMap', 'r', {
'loadBalancerConfigId': 'r',
'loadBalancerListenerId': 'r',
'accountId': 'r',
'data': 'r',
})
auth_check(client.schema, 'loadBalancerConfigListenerMap', 'r', {
'loadBalancerConfigId': 'r',
'loadBalancerListenerId': 'r',
'accountId': 'r',
})
def test_lb_host_map(admin_client, client):
auth_check(admin_client.schema, 'loadBalancerHostMap', 'r', {
'hostId': 'r',
'loadBalancerId': 'r',
'accountId': 'r',
'data': 'r',
})
auth_check(client.schema, 'loadBalancerHostMap', 'r', {
'hostId': 'r',
'loadBalancerId': 'r',
'accountId': 'r',
})
def test_container_events(admin_client, client, agent_client):
auth_check(admin_client.schema, 'containerEvent', 'r', {
'externalTimestamp': 'r',
'hostId': 'r',
'accountId': 'r',
'externalFrom': 'r',
'reportedHostUuid': 'r',
'externalId': 'r',
'externalStatus': 'r',
'data': 'r',
'dockerInspect': 'r'
})
auth_check(agent_client.schema, 'containerEvent', 'cr', {
'externalTimestamp': 'cr',
'externalFrom': 'cr',
'reportedHostUuid': 'cr',
'externalId': 'cr',
'externalStatus': 'cr',
'dockerInspect': 'cr',
'data': 'cr',
'id': 'r'
})
auth_check(client.schema, 'containerEvent', 'r', {
'externalTimestamp': 'r',
'hostId': 'r',
'externalFrom': 'r',
'reportedHostUuid': 'r',
'externalId': 'r',
'externalStatus': 'r',
'accountId': 'r',
'dockerInspect': 'r'
})
def test_svc_discovery_service(admin_client, client):
auth_check(admin_client.schema, 'service', 'crud', {
'name': 'cr',
'environmentId': 'cr',
'scale': 'cru',
'dataVolumesFromService': 'cr',
'launchConfig': 'cr',
'accountId': 'r',
'data': 'r',
})
auth_check(client.schema, 'service', 'crud', {
'name': 'cr',
'environmentId': 'cr',
'scale': 'cru',
'dataVolumesFromService': 'cr',
'launchConfig': 'cr',
'accountId': 'r',
})
def test_svc_discovery_environment(admin_client, client):
auth_check(admin_client.schema, 'environment', 'crud', {
'name': 'cru',
'accountId': 'r',
'data': 'r',
})
auth_check(client.schema, 'environment', 'crud', {
'name': 'cru',
'accountId': 'r',
})
def test_svc_discovery_lb_service(admin_client, client):
auth_check(admin_client.schema, 'loadBalancerService', 'crud', {
'name': 'cr',
'environmentId': 'cr',
'scale': 'cru',
'dataVolumesFromService': 'cr',
'launchConfig': 'cr',
'accountId': 'r',
'data': 'r',
'loadBalancerConfig': 'cr',
})
auth_check(client.schema, 'loadBalancerService', 'crud', {
'name': 'cr',
'environmentId': 'cr',
'scale': 'cru',
'dataVolumesFromService': 'cr',
'launchConfig': 'cr',
'accountId': 'r',
'loadBalancerConfig': 'cr',
})
| 25.322281
| 75
| 0.500917
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.