hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f158ee0c756142d34bd05900eb20b1644adfbe9
| 4,569
|
py
|
Python
|
plugins/modules/oci_devops_object_content_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_devops_object_content_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_devops_object_content_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_devops_object_content_facts
short_description: Fetches details about a ObjectContent resource in Oracle Cloud Infrastructure
description:
- Fetches details about a ObjectContent resource in Oracle Cloud Infrastructure
- Retrieve contents of a specified object.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
dest:
description:
- The destination file path to write the output. The file will be created if it does not exist. If the file already exists, the content will be
overwritten.
type: str
required: true
repository_id:
description:
- Unique repository identifier.
type: str
required: true
sha:
description:
- The SHA of a blob or tree.
type: str
required: true
file_path:
description:
- A filter to return only commits that affect any of the specified paths.
type: str
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific object_content
oci_devops_object_content_facts:
# required
dest: /tmp/myfile
repository_id: "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx"
sha: sha_example
# optional
file_path: file_path_example
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.devops import DevopsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ObjectContentFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"repository_id",
"sha",
]
def get_resource(self):
optional_get_method_params = [
"file_path",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.get_object_content,
repository_id=self.module.params.get("repository_id"),
sha=self.module.params.get("sha"),
**optional_kwargs
)
def get(self):
response = self.get_resource().data
dest = self.module.params.get("dest")
chunk_size = oci_common_utils.MEBIBYTE
with open(to_bytes(dest), "wb") as dest_file:
for chunk in response.raw.stream(chunk_size, decode_content=True):
dest_file.write(chunk)
return None
ObjectContentFactsHelperCustom = get_custom_class("ObjectContentFactsHelperCustom")
class ResourceFactsHelper(ObjectContentFactsHelperCustom, ObjectContentFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
dest=dict(type="str", required=True),
repository_id=dict(type="str", required=True),
sha=dict(type="str", required=True),
file_path=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="object_content",
service_client_class=DevopsClient,
namespace="devops",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(object_content=result)
if __name__ == "__main__":
main()
| 28.55625
| 155
| 0.677829
|
04e5614a445d89f832d71debcb4ffb6b267a5a12
| 19,246
|
py
|
Python
|
python/genesys/led_set_g_1.py
|
DanielTisza/spectralcamera
|
4fef93b3b4cd8f83e016070f1c0d68aa0cff5102
|
[
"MIT"
] | null | null | null |
python/genesys/led_set_g_1.py
|
DanielTisza/spectralcamera
|
4fef93b3b4cd8f83e016070f1c0d68aa0cff5102
|
[
"MIT"
] | null | null | null |
python/genesys/led_set_g_1.py
|
DanielTisza/spectralcamera
|
4fef93b3b4cd8f83e016070f1c0d68aa0cff5102
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import platform
import sys
from mvIMPACT import acquire
from mvIMPACT.Common import exampleHelper
import ctypes
import numpy as np
import datetime as dt
import matplotlib
from LEDDriver import detect_LED_devices, LEDDriver, LEDException
from spectracular.fpi_driver import detectFPIDevices, createFPIDevice
import fpipy as fp
import fpipy.conventions as c
import xarray as xr
from tqdm.autonotebook import tqdm, trange
# Argument count
argc = len(sys.argv)
print("Argument count: ", argc)
# Arguments passed
for i in range(1, argc):
print(sys.argv[i], end = " ")
print("")
if argc == 1:
exposureTime = "60000"
print("No exposure time argument given! Using default 60000")
else:
exposureTime = sys.argv[1]
print("Exposure time given as argument: ", exposureTime)
print("Using exposure time: ", exposureTime)
print("Exposure time converted to string: ", str(exposureTime))
#-----------------------------------------
# Camera
#-----------------------------------------
devMgr = acquire.DeviceManager()
pDev = exampleHelper.getDeviceFromUserInput(devMgr)
if pDev == None:
exampleHelper.requestENTERFromUser()
sys.exit(-1)
pDev.open()
#
# Set camera settings
#
ac = acquire.AcquisitionControl(pDev)
# print("Old TriggerMode:")
# print(ac.triggerMode.readS())
# print("New TriggerMode:")
# ac.triggerMode.writeS("On")
# print(ac.triggerMode.readS())
# print("Old TriggerSource:")
# print(ac.triggerSource.readS())
# print("New TriggerSource:")
# ac.triggerSource.writeS("Software")
# print(ac.triggerSource.readS())
print("Old ExposureAuto:")
print(ac.exposureAuto.readS())
print("New ExposureAuto:")
ac.exposureAuto.writeS("Off")
print(ac.exposureAuto.readS())
ifc = acquire.ImageFormatControl(pDev)
print("Old pixelformat:")
print(ifc.pixelFormat.readS())
print("New pixelformat:")
ifc.pixelFormat.writeS("BayerGB12")
# ifc.pixelFormat.writeS("RGB8")
print(ifc.pixelFormat.readS())
print("Old pixelColorFilter:")
print(ifc.pixelColorFilter.readS())
imgp = acquire.ImageProcessing(pDev)
# "Auto" originally
print("Old colorProcessing:")
print(imgp.colorProcessing.readS())
imgp.colorProcessing.writeS("Raw")
print("New colorProcessing:")
print(imgp.colorProcessing.readS())
print("Old ExposureTime:")
print(ac.exposureTime.readS())
print("New ExposureTime:")
# ac.exposureTime.writeS("150000")
# ac.exposureTime.writeS("60000")
ac.exposureTime.writeS(str(exposureTime))
print(ac.exposureTime.readS())
anlgc = acquire.AnalogControl(pDev)
print("Old BalanceWhiteAuto:")
print(anlgc.balanceWhiteAuto.readS())
print("New BalanceWhiteAuto:")
anlgc.balanceWhiteAuto.writeS("Off")
print(anlgc.balanceWhiteAuto.readS())
print("Old Gamma:")
print(anlgc.gamma.readS())
print("New Gamma:")
anlgc.gamma.writeS("1")
print(anlgc.gamma.readS())
print("Old Gain:")
print(anlgc.gain.readS())
print("New Gain:")
anlgc.gain.writeS("1.9382002601")
print(anlgc.gain.readS())
print("Old GainAuto:")
print(anlgc.gainAuto.readS())
print("New GainAuto:")
anlgc.gainAuto.writeS("Off")
print(anlgc.gainAuto.readS())
# -----------------------------------------
# Test
# -----------------------------------------
#
# Taking image
#
fi = acquire.FunctionInterface(pDev)
fi.imageRequestSingle()
exampleHelper.manuallyStartAcquisitionIfNeeded(pDev, fi)
requestNr = fi.imageRequestWaitFor(10000)
# Add this from SingleCapture.cpp
exampleHelper.manuallyStopAcquisitionIfNeeded(pDev, fi)
if fi.isRequestNrValid(requestNr):
print("Request number valid!")
pRequest = fi.getRequest(requestNr)
print("Print request: " + str(pRequest))
print("Print request result: " + str(pRequest.requestResult))
print("Print request result readS: " + pRequest.requestResult.readS())
if pRequest.isOK:
print("Request OK!")
height = pRequest.imageHeight.read()
width = pRequest.imageWidth.read()
channelCount = pRequest.imageChannelCount.read()
channelBitDepth = pRequest.imageChannelBitDepth.read()
imageSize = pRequest.imageSize.read()
print("Image height: " + str(height))
print("Image width: " + str(width))
print("Image channel count: " + str(channelCount))
print("Image channel bit depth: " + str(channelBitDepth))
print("Image size: " + str(imageSize))
# For systems with NO mvDisplay library support
cbuf = (ctypes.c_char * pRequest.imageSize.read()).from_address(int(pRequest.imageData.read()))
print(cbuf)
channelType = np.uint16 if channelBitDepth > 8 else np.uint8
arr = np.fromstring(cbuf, dtype = channelType)
arr.shape = (height, width, channelCount)
print(arr)
# print("Start saving PNG image...")
# matplotlib.image.imsave('testimage.png', arr)
fi.imageRequestUnlock(requestNr)
exampleHelper.manuallyStopAcquisitionIfNeeded(pDev, fi)
#-----------------------------------------
# LED driver
#-----------------------------------------
LED_IDS = [
# ( VID, PID) (and the same in decimal)
('1FC9', '0083'), (8137, 131),
]
"""Known VID:PID pairs of LED devices."""
LED_HWIDS = [
# Strings returned by read_hardware_id
'1000e016 aefba123 580267dd f5001982',
'10025018 af28a028 5a66a511 f5001983'
]
ledportdevice = detect_LED_devices()
ledportstring = '/dev/ttyACM0'
print('Trying to use ' + ledportstring + ' for LED control')
# led = LEDDriver('/dev/ttyACM0')
# led = LEDDriver('COM10')
led = LEDDriver(ledportstring)
print(led)
led.open()
print('Turning off LEDs')
led.L(0)
#-----------------------------------------
# MFPI
#-----------------------------------------
FPI_IDS = [
# ( VID, PID) (and the same in decimal)
('1FC9', '0083'), (8137, 131),
]
"""Known VID:PID pairs of FPI devices."""
FPI_HWIDS = [
# Strings returned by read_hardware_id
'd02b012 af380065 5b5bbeab f50019c1'
]
print('Trying to create FPI device')
fpi = createFPIDevice(detectFPIDevices(FPI_IDS, FPI_HWIDS)[0].device)
print(fpi)
# ------------------------------------------
# camazing.pixelformats
# ------------------------------------------
class PixelFormatError(Exception):
pass
def get_valid_range(pxformat):
"""Return the valid range of values for a given pixel format.
Parameters
----------
pxformat: str
Pixel format as given by cameras GenICam PixelFormat feature.
Returns
------
np.array
A vector of [min_value, max_value] with the same type as the decoded
pixel format.
"""
try:
valid_range = _ranges[pxformat]
except KeyError:
raise PixelFormatError(f'No range found for the pixel format `{pxformat}')
return valid_range
def get_decoder(pxformat):
"""Return a numpy decoder for a given GenICam pixel format.
Parameters
----------
pxformat: str
Pixel format as given by cameras PixelFormat.
Returns
-------
decoder: function
Function for decoding a buffer
"""
try:
decoder = _decoders[pxformat]
except KeyError:
raise PixelFormatError(f'No decoder for the pixel format `{pxformat}`')
return decoder
def decode_raw(dtype):
"""Decode raw buffer with a given bit depth."""
def decode(buf, shape):
return np.frombuffer(
buf,
dtype=dtype
).reshape(*shape).copy()
return decode
def decode_RGB(bpp):
"""Decode RGB buffer with a given bit depth."""
def decode(buf, shape):
return np.frombuffer(
buf,
dtype=bpp,
).reshape(*shape, 3).copy()
return decode
def decode_YCbCr422_8():
"""Decode YCbCr422 buffer with given bit depth."""
raise NotImplementedError
_decoders = {
'BayerRG8': decode_raw(np.uint8),
'BayerGB8': decode_raw(np.uint8),
'BayerGB12': decode_raw(np.uint16),
'BayerRG12': decode_raw(np.uint16),
'BayerRG16': decode_raw(np.uint16),
'RGB8': decode_RGB(np.uint8),
'Mono8': decode_raw(np.uint8),
'Mono16': decode_raw(np.uint16),
}
_ranges = {
'BayerRG8': np.uint8([0, 255]),
'BayerGB8': np.uint8([0, 255]),
'BayerGB12': np.uint16([0, 4095]),
'BayerRG12': np.uint16([0, 4095]),
'BayerRG16': np.uint16([0, 65535]),
'RGB8': np.uint8([0, 255]),
'Mono8': np.uint8([0, 255]),
'Mono16': np.uint16([0, 65535]),
}
# ------------------------------------------
# camazing.core
# ------------------------------------------
class DanielCamera:
def __init__(self, pDev):
self._meta = None
self._pDev = pDev
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
print("Exit DanielCamera")
def _get_frame(self, timeout=1):
"""Helper function"""
self._pixel_format = "BayerGB12"
self._buffer_decoder = get_decoder(self._pixel_format)
self._image_range = get_valid_range(self._pixel_format)
# data = self._buffer_decoder(buffer.raw_buffer, (height, width))
#------------------------
# Take frame
#------------------------
self._fi = acquire.FunctionInterface(pDev)
self._fi.imageRequestSingle()
exampleHelper.manuallyStartAcquisitionIfNeeded(self._pDev, self._fi)
requestNr = self._fi.imageRequestWaitFor(20000)
exampleHelper.manuallyStopAcquisitionIfNeeded(self._pDev, self._fi)
data = []
if self._fi.isRequestNrValid(requestNr):
print("Request number valid! " + str(requestNr))
pRequest = self._fi.getRequest(requestNr)
print("Print request: " + str(pRequest))
print("Print request result: " + str(pRequest.requestResult))
print("Print request result readS: " + pRequest.requestResult.readS())
if pRequest.isOK:
print("Request OK!")
height = pRequest.imageHeight.read()
width = pRequest.imageWidth.read()
channelCount = pRequest.imageChannelCount.read()
channelBitDepth = pRequest.imageChannelBitDepth.read()
imageSize = pRequest.imageSize.read()
print("Image height: " + str(height))
print("Image width: " + str(width))
print("Image channel count: " + str(channelCount))
print("Image channel bit depth: " + str(channelBitDepth))
print("Image size: " + str(imageSize))
cbuf = (ctypes.c_char * pRequest.imageSize.read()).from_address(int(pRequest.imageData.read()))
# Check if this is now correct buffer format!
# Convert with numpy if needed
data = self._buffer_decoder(cbuf, (height, width))
print("Data from buffer_decoder()")
print(data)
self._fi.imageRequestUnlock(requestNr)
else:
print("imageRequestWaitFor failed (" + str(requestNr) + ", " + acquire.ImpactAcquireException.getErrorCodeAsString(requestNr) + ")")
exampleHelper.manuallyStopAcquisitionIfNeeded(self._pDev, self._fi)
return data
def _get_frame_with_meta(self):
"""Fetch a frame and add metadata from the camera."""
data = self._get_frame()
print("Data from _get_frame(): ")
print(data)
height, width = data.shape[0], data.shape[1]
coords = {
"x": ("x", np.arange(0, width) + 0.5),
"y": ("y", np.arange(0, height) + 0.5),
"timestamp": dt.datetime.today().timestamp(),
}
if 'RGB' in self._pixel_format:
dims = ('y', 'x', 'colour')
coords['colour'] = list('RGB')
elif 'YUV' in self._pixel_format:
dims = ('y', 'x', 'colour')
coords['colour'] = list('YUV')
elif 'YCbCr' in self._pixel_format:
dims = ('y', 'x', 'colour')
coords['colour'] = ['Y', 'Cb', 'Cr']
else:
dims = ('y', 'x')
# Keep some meta by default, if available
# self._meta = []
# for feature in ['Gain', 'ExposureTime', 'PixelFormat', 'PixelColorFilter']:
# if feature in self._features:
# self._meta.append(feature)
# Add metadata as coordinates
# if self._meta:
# coords.update({k: self._features[k].value for k in self._meta})
# Replace these hard-coded values by reading from camera!
coords['Gain'] = "1.9382002601"
coords['ExposureTime'] = 150000
coords['PixelFormat'] = "BayerGB12"
coords['PixelColorFilter'] = "BayerGB"
frame = xr.DataArray(
data,
name="frame",
dims=dims,
coords=coords,
attrs={
'valid_range': self._image_range,
}
)
return frame
def get_frame(self):
return self._get_frame_with_meta()
# ------------------------------------------
# HSI
# ------------------------------------------
class CaptureException(Exception):
pass
class HSI:
"""Hyperspectral imager"""
def __init__(self, camera=None, fpi=None):
self.camera = camera
self.fpi = fpi
self.dataset = None
self.calibration_file = None
def read_calibration_file(self, calibration_file):
self.dataset = fp.io.read_calibration(calibration_file)
self.calibration_file = calibration_file
def take_dark_reference(self, number_of_frames=40, method="median"):
self.read_calibration_file(self.calibration_file)
# original_trigger_source = self.camera["TriggerSource"].value
# self.camera["TriggerSource"].value = "Software"
frames = []
with self.camera:
for idx in trange(0, number_of_frames):
frame = self.camera.get_frame()
frame.coords[c.image_index] = idx
frames.append(frame)
# self.camera["TriggerSource"].value = original_trigger_source
dark = xr.concat(frames, dim=c.image_index)
if method == "median":
dark = dark.median(dim=c.image_index)
elif method == "mean":
dark = dark.mean(dim=c.image_index)
else:
raise ValueError("Unknown method: '" + method)
self.dataset[c.dark_reference_data] = dark
return dark
def capture_cube(self, *, selectors=None):
if selectors is None:
dataset = self.dataset.copy()
else:
dataset = self.dataset.sel(**selectors).copy()
frames = []
# if self.camera["TriggerSource"].value == "Software":
with self.camera:
for idx in tqdm(dataset[c.image_index].values):
setpoint = dataset[c.setpoint_data].sel(
**{c.setpoint_coord: "SP1",
c.image_index: idx,
}).values
self.fpi.set_setpoint(setpoint, wait=True)
frame = self.camera.get_frame()
frame.coords[c.image_index] = idx
frames.append(frame)
# else:
# with self.camera:
# self.create_fpi_taskfile(dataset)
# self.camera["StrobeDuration"].value = \
# self.camera["ExposureTime"].value
# self.fpi.run_taskfile()
# for idx, setpoint in enumerate(tqdm(
# dataset.setpoint.sel(setpoint_index="SP1").values)):
# frame = self.camera.get_frame()
# frame.coords[c.image_index] = idx
# frames.append(frame)
dataset[c.cfa_data] = xr.concat(frames, dim=c.image_index)
return dataset
def create_fpi_taskfile(dataset):
raise NotImplementedError()
danielCam = DanielCamera(pDev)
print(danielCam)
hsi = HSI(danielCam, fpi)
print(hsi)
hsi.read_calibration_file('led_set_g_calib_1.txt')
input("Put the lens cap on")
hsi.take_dark_reference()
print(hsi.dataset.dark)
input("Take the lens cap off and set white reference")
print('Turning on LEDs')
# VNIR1 and VNIR2
#
# 810.0078184
# 848.0322309
#
# 000011110000011110000011110
# * Reverse for LED control:
# 011110000011110000011110000
#
led.L(0b011110000011110000011110000)
print('Capturing white reference')
white_raw = hsi.capture_cube()
input("Set image (only for radiance)")
print('Capturing cube')
raw = hsi.capture_cube()
print(raw)
print('Turning off LEDs')
led.L(0)
print('Calculating radiance')
rad = fp.raw_to_radiance(raw, keep_variables=['dark'])
print(rad)
print(rad['radiance'])
print('Calculating white radiance')
rad['white'] = fp.raw_to_radiance(white_raw, keep_variables = []).radiance
print(rad['white'])
print('Calculating reflectance')
rad['reflectance'] = rad.radiance / rad.white
print(rad['reflectance'])
# reflectance = fp.radiance_to_reflectance(rad, white_raw, keep_variables=[])
# print(reflectance)
print('Extracting single frame from cube and saving to PNG')
test = rad["radiance"]
print('Radiance data')
testdata = test.data
print(testdata)
print('White data')
whitedata = rad['white'].data
print(whitedata)
print('Reflectance data')
reflectdata = rad['reflectance'].data
print(reflectdata)
print ("Wavelengths")
wavelengths = rad["wavelength"].data
print(wavelengths)
print ("Wavelengths count")
wavelengthCount = len(wavelengths)
print(wavelengthCount)
# Multiple peaks result in multiple of single calib file row count
imagelastindex = wavelengthCount
#
# Save radiance images
#
print('Start saving radiance images')
for x in range(0, imagelastindex):
wavelengthValue = wavelengths[x]
wavelengthStr = str(wavelengthValue)
wavelengthReplacedStr = wavelengthStr.replace(".", "p")
print('Saving wavelength: ' + wavelengthStr)
rad1 = testdata[:,:,x]
matplotlib.image.imsave('rad_' + wavelengthReplacedStr + 'nm_' + str(x) + '_exp_' + exposureTime + '.png', rad1, cmap='gray')
white1 = whitedata[:,:,x]
# matplotlib.image.imsave('white_' + wavelengthReplacedStr + 'nm_' + str(x) + '.png', white1, cmap='gray')
ref1 = reflectdata[:,:,x]
matplotlib.image.imsave('refl_' + wavelengthReplacedStr + 'nm_' + str(x) + '_exp_' + exposureTime + '.png', ref1, cmap='gray', vmin=0,vmax=1)
# import matplotlib.pyplot as plt
# plt.gray()
#
# Save raw images and demosaic images
#
# print('Start saving raw data')
# for x in range(1, 2):
# Raw data values
# dn1 = raw.dn.isel(index=x)
# matplotlib.image.imsave('raw_' + str(x) + '.png', dn1)
# Demosaic to get three colour channels
# dm1 = fp.demosaic(dn1, 'BayerGB', 'bilinear')
# dm1_red = dm1[:,:,0]
# dm1_green = dm1[:,:,1]
# dm1_blue = dm1[:,:,2]
# matplotlib.image.imsave('raw_' + str(x) + '_demosaic_red.png', dm1_red)
# matplotlib.image.imsave('raw_' + str(x) + '_demosaic_green.png', dm1_green)
# matplotlib.image.imsave('raw_' + str(x) + '_demosaic_blue.png', dm1_blue)
# fi.acquisitionStart()
# self["TriggerSoftware"].execute()
# acquire.TriggerControl.triggerSoftware()
# fi.acquisitionStop()
| 27.338068
| 144
| 0.618622
|
e1bbcb70554808c096963d02b38f7656fc591eb0
| 3,587
|
py
|
Python
|
tohu/v4/spawn_context.py
|
maxalbert/tohu
|
3adf0c58b13ef1e1d716d7d613484d2adc58fb60
|
[
"MIT"
] | 1
|
2019-03-07T19:58:45.000Z
|
2019-03-07T19:58:45.000Z
|
tohu/v4/spawn_context.py
|
maxalbert/tohu
|
3adf0c58b13ef1e1d716d7d613484d2adc58fb60
|
[
"MIT"
] | 9
|
2017-10-04T15:08:53.000Z
|
2021-02-02T21:51:41.000Z
|
tohu/v4/spawn_context.py
|
maxalbert/tohu
|
3adf0c58b13ef1e1d716d7d613484d2adc58fb60
|
[
"MIT"
] | null | null | null |
import textwrap
from bidict import bidict
from itertools import count
from .logging import logger
from .primitive_generators import PrimitiveGenerator
from .derived_generators import Apply, GetAttribute, Lookup, SelectOneDerived
__all__ = ['SpawnContext']
class NoExistingSpawn(Exception):
"""
Custom exception
"""
class SpawnContext:
def __init__(self):
self.templates = bidict() # mapping {name -> field_generator_template}
self.spawns = {} # mapping {name -> field_generator}
self.anonymous_spawns = [] # names of anonymously spawned generators
self.cnt_anonymous = count()
def __repr__(self):
return textwrap.dedent(f"""
<SpawnContextCG:
templates: {dict(self.templates)}
spawns: {dict(self.spawns)}
anonymous: {self.anonymous_spawns}
>""")
@property
def named_spawns(self):
return {name: g for (name, g) in self.spawns.items() if name not in self.anonymous_spawns}
def get_existing_spawn(self, g_tpl):
try:
existing_name = self.templates.inv[g_tpl]
return self.spawns[existing_name]
except KeyError:
logger.debug(f"No existing spawn for {g_tpl}")
raise NoExistingSpawn()
def spawn_template(self, g_tpl, *, name):
if name is None:
try:
name = self.templates.inv[g_tpl]
except KeyError:
name = f'ANONYMOUS_ANONYMOUS_ANONYMOUS_{next(self.cnt_anonymous)}'
self.anonymous_spawns.append(name)
logger.debug(f"Found anonymous field generator template: {g_tpl}")
try:
self.spawns[name] = self.get_existing_spawn(g_tpl)
except NoExistingSpawn:
if isinstance(g_tpl, PrimitiveGenerator):
self.templates[name] = g_tpl
self.spawns[name] = g_tpl.spawn()
elif isinstance(g_tpl, SelectOneDerived):
new_parent = self.spawn_template(g_tpl.parent, name=None)
self.templates[name] = g_tpl
self.spawns[name] = SelectOneDerived(new_parent)
elif isinstance(g_tpl, GetAttribute):
new_parent = self.spawn_template(g_tpl.parent, name=None)
self.templates[name] = g_tpl
self.spawns[name] = GetAttribute(new_parent, name=g_tpl.name)
elif isinstance(g_tpl, Lookup):
new_parent = self.spawn_template(g_tpl.parent, name=None)
self.templates[name] = g_tpl
self.spawns[name] = Lookup(new_parent, mapping=g_tpl.mapping)
elif isinstance(g_tpl, Apply):
new_arg_gens = []
for gen in g_tpl.func_arg_gens_orig.arg_gens:
new_arg_gens.append(self.spawn_template(gen, name=None))
new_kwarg_gens = {}
for gen_name, gen in g_tpl.func_arg_gens_orig.kwarg_gens.items():
new_kwarg_gens[gen_name] = self.spawn_template(gen, name=None)
self.templates[name] = g_tpl
self.spawns[name] = Apply(g_tpl.func, *new_arg_gens, **new_kwarg_gens)
else:
raise NotImplementedError(f'g_tpl: {g_tpl}')
# Set tohu_name for nicer debugging
if name.startswith('ANONYMOUS_ANONYMOUS_ANONYMOUS_'):
self.spawns[name].set_tohu_name(f'anonymous_{g_tpl.tohu_id}')
else:
self.spawns[name].set_tohu_name(name)
return self.spawns[name]
| 37.364583
| 98
| 0.609144
|
a2111c1deaabe6f5dc6fdb00a01910d19e36f0f4
| 3,534
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/mycobacteriumrufum.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/mycobacteriumrufum.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/mycobacteriumrufum.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Mycobacterium rufum.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MycobacteriumRufum(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Mycobacterium rufum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Mycobacterium rufum graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MycobacteriumRufum",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.722222
| 223
| 0.676287
|
2b028e0fcbaf8105a02750f0b3f377386ae63c17
| 69,420
|
py
|
Python
|
salt/loader.py
|
casselt/salt
|
d8a2ef4e0cd544656489d23d161928879b1fc1c0
|
[
"Apache-2.0"
] | null | null | null |
salt/loader.py
|
casselt/salt
|
d8a2ef4e0cd544656489d23d161928879b1fc1c0
|
[
"Apache-2.0"
] | 1
|
2019-08-18T07:03:30.000Z
|
2019-08-18T07:03:30.000Z
|
salt/loader.py
|
casselt/salt
|
d8a2ef4e0cd544656489d23d161928879b1fc1c0
|
[
"Apache-2.0"
] | 2
|
2020-11-04T06:24:32.000Z
|
2020-11-06T11:00:57.000Z
|
# -*- coding: utf-8 -*-
'''
The Salt loader is the core to Salt's plugin system, the loader scans
directories for python loadable code and organizes the code into the
plugin interfaces used by Salt.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import sys
import time
import logging
import inspect
import tempfile
import functools
import threading
import traceback
import types
from zipimport import zipimporter
# Import salt libs
import salt.config
import salt.defaults.exitcodes
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
import salt.utils.lazy
import salt.utils.odict
import salt.utils.platform
import salt.utils.versions
import salt.utils.stringutils
from salt.exceptions import LoaderError
from salt.template import check_render_pipe_str
from salt.utils.decorators import Depends
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import reload_module
if sys.version_info[:2] >= (3, 5):
import importlib.machinery # pylint: disable=no-name-in-module,import-error
import importlib.util # pylint: disable=no-name-in-module,import-error
USE_IMPORTLIB = True
else:
import imp
USE_IMPORTLIB = False
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
log = logging.getLogger(__name__)
SALT_BASE_PATH = os.path.abspath(salt.syspaths.INSTALL_DIR)
LOADED_BASE_NAME = 'salt.loaded'
if USE_IMPORTLIB:
# pylint: disable=no-member
MODULE_KIND_SOURCE = 1
MODULE_KIND_COMPILED = 2
MODULE_KIND_EXTENSION = 3
MODULE_KIND_PKG_DIRECTORY = 5
SUFFIXES = []
for suffix in importlib.machinery.EXTENSION_SUFFIXES:
SUFFIXES.append((suffix, 'rb', MODULE_KIND_EXTENSION))
for suffix in importlib.machinery.SOURCE_SUFFIXES:
SUFFIXES.append((suffix, 'rb', MODULE_KIND_SOURCE))
for suffix in importlib.machinery.BYTECODE_SUFFIXES:
SUFFIXES.append((suffix, 'rb', MODULE_KIND_COMPILED))
MODULE_KIND_MAP = {
MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader,
MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader,
MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader
}
# pylint: enable=no-member
else:
SUFFIXES = imp.get_suffixes()
PY3_PRE_EXT = \
re.compile(r'\.cpython-{0}{1}(\.opt-[1-9])?'.format(*sys.version_info[:2]))
# Because on the cloud drivers we do `from salt.cloud.libcloudfuncs import *`
# which simplifies code readability, it adds some unsupported functions into
# the driver's module scope.
# We list un-supported functions here. These will be removed from the loaded.
# TODO: remove the need for this cross-module code. Maybe use NotImplemented
LIBCLOUD_FUNCS_NOT_SUPPORTED = (
'parallels.avail_sizes',
'parallels.avail_locations',
'proxmox.avail_sizes',
)
# Will be set to pyximport module at runtime if cython is enabled in config.
pyximport = None
def static_loader(
opts,
ext_type,
tag,
pack=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
filter_name=None,
):
funcs = LazyLoader(
_module_dirs(
opts,
ext_type,
tag,
int_type,
ext_dirs,
ext_type_dirs,
base_path,
),
opts,
tag=tag,
pack=pack,
)
ret = {}
funcs._load_all()
if filter_name:
funcs = FilterDictWrapper(funcs, filter_name)
for key in funcs:
ret[key] = funcs[key]
return ret
def _module_dirs(
opts,
ext_type,
tag=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
):
if tag is None:
tag = ext_type
sys_types = os.path.join(base_path or SALT_BASE_PATH, int_type or ext_type)
ext_types = os.path.join(opts['extension_modules'], ext_type)
ext_type_types = []
if ext_dirs:
if ext_type_dirs is None:
ext_type_dirs = '{0}_dirs'.format(tag)
if ext_type_dirs in opts:
ext_type_types.extend(opts[ext_type_dirs])
if HAS_PKG_RESOURCES and ext_type_dirs:
for entry_point in pkg_resources.iter_entry_points('salt.loader', ext_type_dirs):
loaded_entry_point = entry_point.load()
for path in loaded_entry_point():
ext_type_types.append(path)
cli_module_dirs = []
# The dirs can be any module dir, or a in-tree _{ext_type} dir
for _dir in opts.get('module_dirs', []):
# Prepend to the list to match cli argument ordering
maybe_dir = os.path.join(_dir, ext_type)
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
continue
maybe_dir = os.path.join(_dir, '_{0}'.format(ext_type))
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
return cli_module_dirs + ext_type_types + [ext_types, sys_types]
def minion_mods(
opts,
context=None,
utils=None,
whitelist=None,
initial_load=False,
loaded_base_name=None,
notify=False,
static_modules=None,
proxy=None):
'''
Load execution modules
Returns a dictionary of execution modules appropriate for the current
system by evaluating the __virtual__() function in each module.
:param dict opts: The Salt options dictionary
:param dict context: A Salt context that should be made present inside
generated modules in __context__
:param dict utils: Utility functions which should be made available to
Salt modules in __utils__. See `utils_dirs` in
salt.config for additional information about
configuration.
:param list whitelist: A list of modules which should be whitelisted.
:param bool initial_load: Deprecated flag! Unused.
:param str loaded_base_name: A string marker for the loaded base name.
:param bool notify: Flag indicating that an event should be fired upon
completion of module loading.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
__opts__['grains'] = __grains__
__utils__ = salt.loader.utils(__opts__)
__salt__ = salt.loader.minion_mods(__opts__, utils=__utils__)
__salt__['test.ping']()
'''
# TODO Publish documentation for module whitelisting
if not whitelist:
whitelist = opts.get('whitelist_modules', None)
ret = LazyLoader(
_module_dirs(opts, 'modules', 'module'),
opts,
tag='module',
pack={'__context__': context, '__utils__': utils, '__proxy__': proxy},
whitelist=whitelist,
loaded_base_name=loaded_base_name,
static_modules=static_modules,
)
ret.pack['__salt__'] = ret
# Load any provider overrides from the configuration file providers option
# Note: Providers can be pkg, service, user or group - not to be confused
# with cloud providers.
providers = opts.get('providers', False)
if providers and isinstance(providers, dict):
for mod in providers:
# sometimes providers opts is not to diverge modules but
# for other configuration
try:
funcs = raw_mod(opts, providers[mod], ret)
except TypeError:
break
else:
if funcs:
for func in funcs:
f_key = '{0}{1}'.format(mod, func[func.rindex('.'):])
ret[f_key] = funcs[func]
if notify:
evt = salt.utils.event.get_event('minion', opts=opts, listen=False)
evt.fire_event({'complete': True}, tag='/salt/minion/minion_mod_complete')
return ret
def raw_mod(opts, name, functions, mod='modules'):
'''
Returns a single module loaded raw and bypassing the __virtual__ function
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
testmod = salt.loader.raw_mod(__opts__, 'test', None)
testmod['test.ping']()
'''
loader = LazyLoader(
_module_dirs(opts, mod, 'module'),
opts,
tag='rawmodule',
virtual_enable=False,
pack={'__salt__': functions},
)
# if we don't have the module, return an empty dict
if name not in loader.file_mapping:
return {}
loader._load_module(name) # load a single module (the one passed in)
return dict(loader._dict) # return a copy of *just* the funcs for `name`
def matchers(opts):
'''
Return the matcher services plugins
'''
return LazyLoader(
_module_dirs(opts, 'matchers'),
opts,
tag='matchers'
)
def engines(opts, functions, runners, utils, proxy=None):
'''
Return the master services plugins
'''
pack = {'__salt__': functions,
'__runners__': runners,
'__proxy__': proxy,
'__utils__': utils}
return LazyLoader(
_module_dirs(opts, 'engines'),
opts,
tag='engines',
pack=pack,
)
def proxy(opts, functions=None, returners=None, whitelist=None, utils=None):
'''
Returns the proxy module for this salt-proxy-minion
'''
ret = LazyLoader(
_module_dirs(opts, 'proxy'),
opts,
tag='proxy',
pack={'__salt__': functions, '__ret__': returners, '__utils__': utils},
)
ret.pack['__proxy__'] = ret
return ret
def returners(opts, functions, whitelist=None, context=None, proxy=None):
'''
Returns the returner modules
'''
return LazyLoader(
_module_dirs(opts, 'returners', 'returner'),
opts,
tag='returner',
whitelist=whitelist,
pack={'__salt__': functions, '__context__': context, '__proxy__': proxy or {}},
)
def utils(opts, whitelist=None, context=None, proxy=proxy):
'''
Returns the utility modules
'''
return LazyLoader(
_module_dirs(opts, 'utils', ext_type_dirs='utils_dirs'),
opts,
tag='utils',
whitelist=whitelist,
pack={'__context__': context, '__proxy__': proxy or {}},
)
def pillars(opts, functions, context=None):
'''
Returns the pillars modules
'''
ret = LazyLoader(_module_dirs(opts, 'pillar'),
opts,
tag='pillar',
pack={'__salt__': functions,
'__context__': context,
'__utils__': utils(opts)})
ret.pack['__ext_pillar__'] = ret
return FilterDictWrapper(ret, '.ext_pillar')
def tops(opts):
'''
Returns the tops modules
'''
if 'master_tops' not in opts:
return {}
whitelist = list(opts['master_tops'].keys())
ret = LazyLoader(
_module_dirs(opts, 'tops', 'top'),
opts,
tag='top',
whitelist=whitelist,
)
return FilterDictWrapper(ret, '.top')
def wheels(opts, whitelist=None, context=None):
'''
Returns the wheels modules
'''
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, 'wheel'),
opts,
tag='wheel',
whitelist=whitelist,
pack={'__context__': context},
)
def outputters(opts):
'''
Returns the outputters modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only outputters present in the keyspace
'''
ret = LazyLoader(
_module_dirs(opts, 'output', ext_type_dirs='outputter_dirs'),
opts,
tag='output',
)
wrapped_ret = FilterDictWrapper(ret, '.output')
# TODO: this name seems terrible... __salt__ should always be execution mods
ret.pack['__salt__'] = wrapped_ret
return wrapped_ret
def serializers(opts):
'''
Returns the serializers modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only serializers present in the keyspace
'''
return LazyLoader(
_module_dirs(opts, 'serializers'),
opts,
tag='serializers',
)
def eauth_tokens(opts):
'''
Returns the tokens modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only token backends present in the keyspace
'''
return LazyLoader(
_module_dirs(opts, 'tokens'),
opts,
tag='tokens',
)
def auth(opts, whitelist=None):
'''
Returns the auth modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader
'''
return LazyLoader(
_module_dirs(opts, 'auth'),
opts,
tag='auth',
whitelist=whitelist,
pack={'__salt__': minion_mods(opts)},
)
def fileserver(opts, backends):
'''
Returns the file server modules
'''
return LazyLoader(_module_dirs(opts, 'fileserver'),
opts,
tag='fileserver',
whitelist=backends,
pack={'__utils__': utils(opts)})
def roster(opts, runner=None, utils=None, whitelist=None):
'''
Returns the roster modules
'''
return LazyLoader(
_module_dirs(opts, 'roster'),
opts,
tag='roster',
whitelist=whitelist,
pack={
'__runner__': runner,
'__utils__': utils,
},
)
def thorium(opts, functions, runners):
'''
Load the thorium runtime modules
'''
pack = {'__salt__': functions, '__runner__': runners, '__context__': {}}
ret = LazyLoader(_module_dirs(opts, 'thorium'),
opts,
tag='thorium',
pack=pack)
ret.pack['__thorium__'] = ret
return ret
def states(opts, functions, utils, serializers, whitelist=None, proxy=None):
'''
Returns the state modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
statemods = salt.loader.states(__opts__, None, None)
'''
ret = LazyLoader(
_module_dirs(opts, 'states'),
opts,
tag='states',
pack={'__salt__': functions, '__proxy__': proxy or {}},
whitelist=whitelist,
)
ret.pack['__states__'] = ret
ret.pack['__utils__'] = utils
ret.pack['__serializers__'] = serializers
return ret
def beacons(opts, functions, context=None, proxy=None):
'''
Load the beacon modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
'''
return LazyLoader(
_module_dirs(opts, 'beacons'),
opts,
tag='beacons',
pack={'__context__': context, '__salt__': functions, '__proxy__': proxy or {}},
virtual_funcs=[],
)
def log_handlers(opts):
'''
Returns the custom logging handler modules
:param dict opts: The Salt options dictionary
'''
ret = LazyLoader(
_module_dirs(
opts,
'log_handlers',
int_type='handlers',
base_path=os.path.join(SALT_BASE_PATH, 'log'),
),
opts,
tag='log_handlers',
)
return FilterDictWrapper(ret, '.setup_handlers')
def ssh_wrapper(opts, functions=None, context=None):
'''
Returns the custom logging handler modules
'''
return LazyLoader(
_module_dirs(
opts,
'wrapper',
base_path=os.path.join(SALT_BASE_PATH, os.path.join('client', 'ssh')),
),
opts,
tag='wrapper',
pack={
'__salt__': functions,
'__grains__': opts.get('grains', {}),
'__pillar__': opts.get('pillar', {}),
'__context__': context,
},
)
def render(opts, functions, states=None, proxy=None):
'''
Returns the render modules
'''
pack = {'__salt__': functions,
'__grains__': opts.get('grains', {})}
if states:
pack['__states__'] = states
pack['__proxy__'] = proxy or {}
ret = LazyLoader(
_module_dirs(
opts,
'renderers',
'render',
ext_type_dirs='render_dirs',
),
opts,
tag='render',
pack=pack,
)
rend = FilterDictWrapper(ret, '.render')
if not check_render_pipe_str(opts['renderer'], rend, opts['renderer_blacklist'], opts['renderer_whitelist']):
err = ('The renderer {0} is unavailable, this error is often because '
'the needed software is unavailable'.format(opts['renderer']))
log.critical(err)
raise LoaderError(err)
return rend
def grain_funcs(opts, proxy=None):
'''
Returns the grain functions
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
grainfuncs = salt.loader.grain_funcs(__opts__)
'''
ret = LazyLoader(
_module_dirs(
opts,
'grains',
'grain',
ext_type_dirs='grains_dirs',
),
opts,
tag='grains',
)
ret.pack['__utils__'] = utils(opts, proxy=proxy)
return ret
def _load_cached_grains(opts, cfn):
'''
Returns the grains cached in cfn, or None if the cache is too old or is
corrupted.
'''
if not os.path.isfile(cfn):
log.debug('Grains cache file does not exist.')
return None
grains_cache_age = int(time.time() - os.path.getmtime(cfn))
if grains_cache_age > opts.get('grains_cache_expiration', 300):
log.debug(
'Grains cache last modified %s seconds ago and cache '
'expiration is set to %s. Grains cache expired. '
'Refreshing.',
grains_cache_age, opts.get('grains_cache_expiration', 300)
)
return None
if opts.get('refresh_grains_cache', False):
log.debug('refresh_grains_cache requested, Refreshing.')
return None
log.debug('Retrieving grains from cache')
try:
serial = salt.payload.Serial(opts)
with salt.utils.files.fopen(cfn, 'rb') as fp_:
cached_grains = salt.utils.data.decode(serial.load(fp_), preserve_tuples=True)
if not cached_grains:
log.debug('Cached grains are empty, cache might be corrupted. Refreshing.')
return None
return cached_grains
except (IOError, OSError):
return None
def grains(opts, force_refresh=False, proxy=None):
'''
Return the functions for the dynamic grains and the values for the static
grains.
Since grains are computed early in the startup process, grains functions
do not have __salt__ or __proxy__ available. At proxy-minion startup,
this function is called with the proxymodule LazyLoader object so grains
functions can communicate with their controlled device.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
print __grains__['id']
'''
# Need to re-import salt.config, somehow it got lost when a minion is starting
import salt.config
# if we have no grains, lets try loading from disk (TODO: move to decorator?)
cfn = os.path.join(
opts['cachedir'],
'grains.cache.p'
)
if not force_refresh and opts.get('grains_cache', False):
cached_grains = _load_cached_grains(opts, cfn)
if cached_grains:
return cached_grains
else:
log.debug('Grains refresh requested. Refreshing grains.')
if opts.get('skip_grains', False):
return {}
grains_deep_merge = opts.get('grains_deep_merge', False) is True
if 'conf_file' in opts:
pre_opts = {}
pre_opts.update(salt.config.load_config(
opts['conf_file'], 'SALT_MINION_CONFIG',
salt.config.DEFAULT_MINION_OPTS['conf_file']
))
default_include = pre_opts.get(
'default_include', opts['default_include']
)
include = pre_opts.get('include', [])
pre_opts.update(salt.config.include_config(
default_include, opts['conf_file'], verbose=False
))
pre_opts.update(salt.config.include_config(
include, opts['conf_file'], verbose=True
))
if 'grains' in pre_opts:
opts['grains'] = pre_opts['grains']
else:
opts['grains'] = {}
else:
opts['grains'] = {}
grains_data = {}
blist = opts.get('grains_blacklist', [])
funcs = grain_funcs(opts, proxy=proxy)
if force_refresh: # if we refresh, lets reload grain modules
funcs.clear()
# Run core grains
for key in funcs:
if not key.startswith('core.'):
continue
log.trace('Loading %s grain', key)
ret = funcs[key]()
if not isinstance(ret, dict):
continue
if blist:
for key in list(ret):
for block in blist:
if salt.utils.stringutils.expr_match(key, block):
del ret[key]
log.trace('Filtering %s grain', key)
if not ret:
continue
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
# Run the rest of the grains
for key in funcs:
if key.startswith('core.') or key == '_errors':
continue
try:
# Grains are loaded too early to take advantage of the injected
# __proxy__ variable. Pass an instance of that LazyLoader
# here instead to grains functions if the grains functions take
# one parameter. Then the grains can have access to the
# proxymodule for retrieving information from the connected
# device.
log.trace('Loading %s grain', key)
parameters = salt.utils.args.get_function_argspec(funcs[key]).args
kwargs = {}
if 'proxy' in parameters:
kwargs['proxy'] = proxy
if 'grains' in parameters:
kwargs['grains'] = grains_data
ret = funcs[key](**kwargs)
except Exception:
if salt.utils.platform.is_proxy():
log.info('The following CRITICAL message may not be an error; the proxy may not be completely established yet.')
log.critical(
'Failed to load grains defined in grain file %s in '
'function %s, error:\n', key, funcs[key],
exc_info=True
)
continue
if not isinstance(ret, dict):
continue
if blist:
for key in list(ret):
for block in blist:
if salt.utils.stringutils.expr_match(key, block):
del ret[key]
log.trace('Filtering %s grain', key)
if not ret:
continue
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
if opts.get('proxy_merge_grains_in_module', True) and proxy:
try:
proxytype = proxy.opts['proxy']['proxytype']
if proxytype + '.grains' in proxy:
if proxytype + '.initialized' in proxy and proxy[proxytype + '.initialized']():
try:
proxytype = proxy.opts['proxy']['proxytype']
ret = proxy[proxytype + '.grains']()
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
except Exception:
log.critical('Failed to run proxy\'s grains function!',
exc_info=True
)
except KeyError:
pass
grains_data.update(opts['grains'])
# Write cache if enabled
if opts.get('grains_cache', False):
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Late import
import salt.modules.cmdmod
# Make sure cache file isn't read-only
salt.modules.cmdmod._run_quiet('attrib -R "{0}"'.format(cfn))
with salt.utils.files.fopen(cfn, 'w+b') as fp_:
try:
serial = salt.payload.Serial(opts)
serial.dump(grains_data, fp_)
except TypeError as e:
log.error('Failed to serialize grains cache: %s', e)
raise # re-throw for cleanup
except Exception as e:
log.error('Unable to write to grains cache file %s: %s', cfn, e)
# Based on the original exception, the file may or may not have been
# created. If it was, we will remove it now, as the exception means
# the serialized data is not to be trusted, no matter what the
# exception is.
if os.path.isfile(cfn):
os.unlink(cfn)
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, opts['grains'])
else:
grains_data.update(opts['grains'])
return salt.utils.data.decode(grains_data, preserve_tuples=True)
# TODO: get rid of? Does anyone use this? You should use raw() instead
def call(fun, **kwargs):
'''
Directly call a function inside a loader directory
'''
args = kwargs.get('args', [])
dirs = kwargs.get('dirs', [])
funcs = LazyLoader(
[os.path.join(SALT_BASE_PATH, 'modules')] + dirs,
None,
tag='modules',
virtual_enable=False,
)
return funcs[fun](*args)
def runner(opts, utils=None, context=None, whitelist=None):
'''
Directly call a function inside a loader directory
'''
if utils is None:
utils = {}
if context is None:
context = {}
ret = LazyLoader(
_module_dirs(opts, 'runners', 'runner', ext_type_dirs='runner_dirs'),
opts,
tag='runners',
pack={'__utils__': utils, '__context__': context},
whitelist=whitelist,
)
# TODO: change from __salt__ to something else, we overload __salt__ too much
ret.pack['__salt__'] = ret
return ret
def queues(opts):
'''
Directly call a function inside a loader directory
'''
return LazyLoader(
_module_dirs(opts, 'queues', 'queue', ext_type_dirs='queue_dirs'),
opts,
tag='queues',
)
def sdb(opts, functions=None, whitelist=None, utils=None):
'''
Make a very small database call
'''
if utils is None:
utils = {}
return LazyLoader(
_module_dirs(opts, 'sdb'),
opts,
tag='sdb',
pack={
'__sdb__': functions,
'__opts__': opts,
'__utils__': utils,
'__salt__': minion_mods(opts, utils),
},
whitelist=whitelist,
)
def pkgdb(opts):
'''
Return modules for SPM's package database
.. versionadded:: 2015.8.0
'''
return LazyLoader(
_module_dirs(
opts,
'pkgdb',
base_path=os.path.join(SALT_BASE_PATH, 'spm')
),
opts,
tag='pkgdb'
)
def pkgfiles(opts):
'''
Return modules for SPM's file handling
.. versionadded:: 2015.8.0
'''
return LazyLoader(
_module_dirs(
opts,
'pkgfiles',
base_path=os.path.join(SALT_BASE_PATH, 'spm')
),
opts,
tag='pkgfiles'
)
def clouds(opts):
'''
Return the cloud functions
'''
# Let's bring __active_provider_name__, defaulting to None, to all cloud
# drivers. This will get temporarily updated/overridden with a context
# manager when needed.
functions = LazyLoader(
_module_dirs(opts,
'clouds',
'cloud',
base_path=os.path.join(SALT_BASE_PATH, 'cloud'),
int_type='clouds'),
opts,
tag='clouds',
pack={'__utils__': salt.loader.utils(opts),
'__active_provider_name__': None},
)
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
log.trace(
'\'%s\' has been marked as not supported. Removing from the '
'list of supported cloud functions', funcname
)
functions.pop(funcname, None)
return functions
def netapi(opts):
'''
Return the network api functions
'''
return LazyLoader(
_module_dirs(opts, 'netapi'),
opts,
tag='netapi',
)
def executors(opts, functions=None, context=None, proxy=None):
'''
Returns the executor modules
'''
executors = LazyLoader(
_module_dirs(opts, 'executors', 'executor'),
opts,
tag='executor',
pack={'__salt__': functions, '__context__': context or {}, '__proxy__': proxy or {}},
)
executors.pack['__executors__'] = executors
return executors
def cache(opts, serial):
'''
Returns the returner modules
'''
return LazyLoader(
_module_dirs(opts, 'cache', 'cache'),
opts,
tag='cache',
pack={'__opts__': opts, '__context__': {'serial': serial}},
)
def _generate_module(name):
if name in sys.modules:
return
code = "'''Salt loaded {0} parent module'''".format(name.split('.')[-1])
# ModuleType can't accept a unicode type on PY2
module = types.ModuleType(str(name)) # future lint: disable=blacklisted-function
exec(code, module.__dict__)
sys.modules[name] = module
def _mod_type(module_path):
if module_path.startswith(SALT_BASE_PATH):
return 'int'
return 'ext'
# TODO: move somewhere else?
class FilterDictWrapper(MutableMapping):
'''
Create a dict which wraps another dict with a specific key suffix on get
This is to replace "filter_load"
'''
def __init__(self, d, suffix):
self._dict = d
self.suffix = suffix
def __setitem__(self, key, val):
self._dict[key] = val
def __delitem__(self, key):
del self._dict[key]
def __getitem__(self, key):
return self._dict[key + self.suffix]
def __len__(self):
return len(self._dict)
def __iter__(self):
for key in self._dict:
if key.endswith(self.suffix):
yield key.replace(self.suffix, '')
class LazyLoader(salt.utils.lazy.LazyDict):
'''
A pseduo-dictionary which has a set of keys which are the
name of the module and function, delimited by a dot. When
the value of the key is accessed, the function is then loaded
from disk and into memory.
.. note::
Iterating over keys will cause all modules to be loaded.
:param list module_dirs: A list of directories on disk to search for modules
:param dict opts: The salt options dictionary.
:param str tag: The tag for the type of module to load
:param func mod_type_check: A function which can be used to verify files
:param dict pack: A dictionary of function to be packed into modules as they are loaded
:param list whitelist: A list of modules to whitelist
:param bool virtual_enable: Whether or not to respect the __virtual__ function when loading modules.
:param str virtual_funcs: The name of additional functions in the module to call to verify its functionality.
If not true, the module will not load.
:returns: A LazyLoader object which functions as a dictionary. Keys are 'module.function' and values
are function references themselves which are loaded on-demand.
# TODO:
- move modules_max_memory into here
- singletons (per tag)
'''
mod_dict_class = salt.utils.odict.OrderedDict
def __init__(self,
module_dirs,
opts=None,
tag='module',
loaded_base_name=None,
mod_type_check=None,
pack=None,
whitelist=None,
virtual_enable=True,
static_modules=None,
proxy=None,
virtual_funcs=None,
): # pylint: disable=W0231
'''
In pack, if any of the values are None they will be replaced with an
empty context-specific dict
'''
self.inject_globals = {}
self.pack = {} if pack is None else pack
if opts is None:
opts = {}
threadsafety = not opts.get('multiprocessing')
self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety)
self.opts = self.__prep_mod_opts(opts)
self.module_dirs = module_dirs
self.tag = tag
self.loaded_base_name = loaded_base_name or LOADED_BASE_NAME
self.mod_type_check = mod_type_check or _mod_type
if '__context__' not in self.pack:
self.pack['__context__'] = None
for k, v in six.iteritems(self.pack):
if v is None: # if the value of a pack is None, lets make an empty dict
self.context_dict.setdefault(k, {})
self.pack[k] = salt.utils.context.NamespacedDictWrapper(self.context_dict, k)
self.whitelist = whitelist
self.virtual_enable = virtual_enable
self.initial_load = True
# names of modules that we don't have (errors, __virtual__, etc.)
self.missing_modules = {} # mapping of name -> error
self.loaded_modules = {} # mapping of module_name -> dict_of_functions
self.loaded_files = set() # TODO: just remove them from file_mapping?
self.static_modules = static_modules if static_modules else []
if virtual_funcs is None:
virtual_funcs = []
self.virtual_funcs = virtual_funcs
self.disabled = set(
self.opts.get(
'disable_{0}{1}'.format(
self.tag,
'' if self.tag[-1] == 's' else 's'
),
[]
)
)
# A map of suffix to description for imp
self.suffix_map = {}
# A list to determine precedence of extensions
# Prefer packages (directories) over modules (single files)!
self.suffix_order = ['']
for (suffix, mode, kind) in SUFFIXES:
self.suffix_map[suffix] = (suffix, mode, kind)
self.suffix_order.append(suffix)
self._lock = threading.RLock()
self._refresh_file_mapping()
super(LazyLoader, self).__init__() # late init the lazy loader
# create all of the import namespaces
_generate_module('{0}.int'.format(self.loaded_base_name))
_generate_module('{0}.int.{1}'.format(self.loaded_base_name, tag))
_generate_module('{0}.ext'.format(self.loaded_base_name))
_generate_module('{0}.ext.{1}'.format(self.loaded_base_name, tag))
def __getitem__(self, item):
'''
Override the __getitem__ in order to decorate the returned function if we need
to last-minute inject globals
'''
func = super(LazyLoader, self).__getitem__(item)
if self.inject_globals:
return global_injector_decorator(self.inject_globals)(func)
else:
return func
def __getattr__(self, mod_name):
'''
Allow for "direct" attribute access-- this allows jinja templates to
access things like `salt.test.ping()`
'''
if mod_name in ('__getstate__', '__setstate__'):
return object.__getattribute__(self, mod_name)
# if we have an attribute named that, lets return it.
try:
return object.__getattr__(self, mod_name) # pylint: disable=no-member
except AttributeError:
pass
# otherwise we assume its jinja template access
if mod_name not in self.loaded_modules and not self.loaded:
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and mod_name in self.loaded_modules:
break
if mod_name in self.loaded_modules:
return self.loaded_modules[mod_name]
else:
raise AttributeError(mod_name)
def missing_fun_string(self, function_name):
'''
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
'''
mod_name = function_name.split('.')[0]
if mod_name in self.loaded_modules:
return '\'{0}\' is not available.'.format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
return '\'{0}\' is not available.'.format(function_name)
else:
if reason is not None:
return '\'{0}\' __virtual__ returned False: {1}'.format(mod_name, reason)
else:
return '\'{0}\' __virtual__ returned False'.format(mod_name)
def _refresh_file_mapping(self):
'''
refresh the mapping of the FS on disk
'''
# map of suffix to description for imp
if self.opts.get('cython_enable', True) is True:
try:
global pyximport
pyximport = __import__('pyximport') # pylint: disable=import-error
pyximport.install()
# add to suffix_map so file_mapping will pick it up
self.suffix_map['.pyx'] = tuple()
except ImportError:
log.info('Cython is enabled in the options but not present '
'in the system path. Skipping Cython modules.')
# Allow for zipimport of modules
if self.opts.get('enable_zip_modules', True) is True:
self.suffix_map['.zip'] = tuple()
# allow for module dirs
if USE_IMPORTLIB:
self.suffix_map[''] = ('', '', MODULE_KIND_PKG_DIRECTORY)
else:
self.suffix_map[''] = ('', '', imp.PKG_DIRECTORY)
# create mapping of filename (without suffix) to (path, suffix)
# The files are added in order of priority, so order *must* be retained.
self.file_mapping = salt.utils.odict.OrderedDict()
opt_match = []
def _replace_pre_ext(obj):
'''
Hack so we can get the optimization level that we replaced (if
any) out of the re.sub call below. We use a list here because
it is a persistent data structure that we will be able to
access after re.sub is called.
'''
opt_match.append(obj)
return ''
for mod_dir in self.module_dirs:
try:
# Make sure we have a sorted listdir in order to have
# expectable override results
files = sorted(
x for x in os.listdir(mod_dir) if x != '__pycache__'
)
except OSError:
continue # Next mod_dir
if six.PY3:
try:
pycache_files = [
os.path.join('__pycache__', x) for x in
sorted(os.listdir(os.path.join(mod_dir, '__pycache__')))
]
except OSError:
pass
else:
files.extend(pycache_files)
for filename in files:
try:
dirname, basename = os.path.split(filename)
if basename.startswith('_'):
# skip private modules
# log messages omitted for obviousness
continue # Next filename
f_noext, ext = os.path.splitext(basename)
if six.PY3:
f_noext = PY3_PRE_EXT.sub(_replace_pre_ext, f_noext)
try:
opt_level = int(
opt_match.pop().group(1).rsplit('-', 1)[-1]
)
except (AttributeError, IndexError, ValueError):
# No regex match or no optimization level matched
opt_level = 0
try:
opt_index = self.opts['optimization_order'].index(opt_level)
except KeyError:
log.trace(
'Disallowed optimization level %d for module '
'name \'%s\', skipping. Add %d to the '
'\'optimization_order\' config option if you '
'do not want to ignore this optimization '
'level.', opt_level, f_noext, opt_level
)
continue
else:
# Optimization level not reflected in filename on PY2
opt_index = 0
# make sure it is a suffix we support
if ext not in self.suffix_map:
continue # Next filename
if f_noext in self.disabled:
log.trace(
'Skipping %s, it is disabled by configuration',
filename
)
continue # Next filename
fpath = os.path.join(mod_dir, filename)
# if its a directory, lets allow us to load that
if ext == '':
# is there something __init__?
subfiles = os.listdir(fpath)
for suffix in self.suffix_order:
if '' == suffix:
continue # Next suffix (__init__ must have a suffix)
init_file = '__init__{0}'.format(suffix)
if init_file in subfiles:
break
else:
continue # Next filename
try:
curr_ext = self.file_mapping[f_noext][1]
curr_opt_index = self.file_mapping[f_noext][2]
except KeyError:
pass
else:
if '' in (curr_ext, ext) and curr_ext != ext:
log.error(
'Module/package collision: \'%s\' and \'%s\'',
fpath,
self.file_mapping[f_noext][0]
)
if six.PY3 and ext == '.pyc' and curr_ext == '.pyc':
# Check the optimization level
if opt_index >= curr_opt_index:
# Module name match, but a higher-priority
# optimization level was already matched, skipping.
continue
elif not curr_ext or self.suffix_order.index(ext) >= self.suffix_order.index(curr_ext):
# Match found but a higher-priorty match already
# exists, so skip this.
continue
if six.PY3 and not dirname and ext == '.pyc':
# On Python 3, we should only load .pyc files from the
# __pycache__ subdirectory (i.e. when dirname is not an
# empty string).
continue
# Made it this far - add it
self.file_mapping[f_noext] = (fpath, ext, opt_index)
except OSError:
continue
for smod in self.static_modules:
f_noext = smod.split('.')[-1]
self.file_mapping[f_noext] = (smod, '.o', 0)
def clear(self):
'''
Clear the dict
'''
with self._lock:
super(LazyLoader, self).clear() # clear the lazy loader
self.loaded_files = set()
self.missing_modules = {}
self.loaded_modules = {}
# if we have been loaded before, lets clear the file mapping since
# we obviously want a re-do
if hasattr(self, 'opts'):
self._refresh_file_mapping()
self.initial_load = False
def __prep_mod_opts(self, opts):
'''
Strip out of the opts any logger instance
'''
if '__grains__' not in self.pack:
self.context_dict['grains'] = opts.get('grains', {})
self.pack['__grains__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, 'grains')
if '__pillar__' not in self.pack:
self.context_dict['pillar'] = opts.get('pillar', {})
self.pack['__pillar__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, 'pillar')
mod_opts = {}
for key, val in list(opts.items()):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _iter_files(self, mod_name):
'''
Iterate over all file_mapping files in order of closeness to mod_name
'''
# do we have an exact match?
if mod_name in self.file_mapping:
yield mod_name
# do we have a partial match?
for k in self.file_mapping:
if mod_name in k:
yield k
# anyone else? Bueller?
for k in self.file_mapping:
if mod_name not in k:
yield k
def _reload_submodules(self, mod):
submodules = (
getattr(mod, sname) for sname in dir(mod) if
isinstance(getattr(mod, sname), mod.__class__)
)
# reload only custom "sub"modules
for submodule in submodules:
# it is a submodule if the name is in a namespace under mod
if submodule.__name__.startswith(mod.__name__ + '.'):
reload_module(submodule)
self._reload_submodules(submodule)
def _load_module(self, name):
mod = None
fpath, suffix = self.file_mapping[name][:2]
self.loaded_files.add(name)
fpath_dirname = os.path.dirname(fpath)
try:
sys.path.append(fpath_dirname)
if suffix == '.pyx':
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
elif suffix == '.o':
top_mod = __import__(fpath, globals(), locals(), [])
comps = fpath.split('.')
if len(comps) < 2:
mod = top_mod
else:
mod = top_mod
for subname in comps[1:]:
mod = getattr(mod, subname)
elif suffix == '.zip':
mod = zipimporter(fpath).load_module(name)
else:
desc = self.suffix_map[suffix]
# if it is a directory, we don't open a file
try:
mod_namespace = '.'.join((
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name))
except TypeError:
mod_namespace = '{0}.{1}.{2}.{3}'.format(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name)
if suffix == '':
if USE_IMPORTLIB:
# pylint: disable=no-member
# Package directory, look for __init__
loader_details = [
(importlib.machinery.SourceFileLoader, importlib.machinery.SOURCE_SUFFIXES),
(importlib.machinery.SourcelessFileLoader, importlib.machinery.BYTECODE_SUFFIXES),
(importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES),
]
file_finder = importlib.machinery.FileFinder(
fpath_dirname,
*loader_details
)
spec = file_finder.find_spec(mod_namespace)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
else:
mod = imp.load_module(mod_namespace, None, fpath, desc)
# reload all submodules if necessary
if not self.initial_load:
self._reload_submodules(mod)
else:
if USE_IMPORTLIB:
# pylint: disable=no-member
loader = MODULE_KIND_MAP[desc[2]](mod_namespace, fpath)
spec = importlib.util.spec_from_file_location(
mod_namespace, fpath, loader=loader
)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
#mod = importlib.util.module_from_spec(spec)
#spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
else:
with salt.utils.files.fopen(fpath, desc[1]) as fn_:
mod = imp.load_module(mod_namespace, fn_, fpath, desc)
except IOError:
raise
except ImportError as exc:
if 'magic number' in six.text_type(exc):
error_msg = 'Failed to import {0} {1}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.'.format(self.tag, name)
log.warning(error_msg)
self.missing_modules[name] = error_msg
log.debug(
'Failed to import %s %s:\n',
self.tag, name, exc_info=True
)
self.missing_modules[name] = exc
return False
except Exception as error:
log.error(
'Failed to import %s %s, this is due most likely to a '
'syntax error:\n', self.tag, name, exc_info=True
)
self.missing_modules[name] = error
return False
except SystemExit as error:
try:
fn_, _, caller, _ = traceback.extract_tb(sys.exc_info()[2])[-1]
except Exception:
pass
else:
tgt_fn = os.path.join('salt', 'utils', 'process.py')
if fn_.endswith(tgt_fn) and '_handle_signals' in caller:
# Race conditon, SIGTERM or SIGINT received while loader
# was in process of loading a module. Call sys.exit to
# ensure that the process is killed.
sys.exit(salt.defaults.exitcodes.EX_OK)
log.error(
'Failed to import %s %s as the module called exit()\n',
self.tag, name, exc_info=True
)
self.missing_modules[name] = error
return False
finally:
sys.path.remove(fpath_dirname)
if hasattr(mod, '__opts__'):
mod.__opts__.update(self.opts)
else:
mod.__opts__ = self.opts
# pack whatever other globals we were asked to
for p_name, p_value in six.iteritems(self.pack):
setattr(mod, p_name, p_value)
module_name = mod.__name__.rsplit('.', 1)[-1]
# Call a module's initialization method if it exists
module_init = getattr(mod, '__init__', None)
if inspect.isfunction(module_init):
try:
module_init(self.opts)
except TypeError as e:
log.error(e)
except Exception:
err_string = '__init__ failed'
log.debug(
'Error loading %s.%s: %s',
self.tag, module_name, err_string, exc_info=True
)
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
# if virtual modules are enabled, we need to look for the
# __virtual__() function inside that module and run it.
if self.virtual_enable:
virtual_funcs_to_process = ['__virtual__'] + self.virtual_funcs
for virtual_func in virtual_funcs_to_process:
virtual_ret, module_name, virtual_err, virtual_aliases = \
self._process_virtual(mod, module_name, virtual_func)
if virtual_err is not None:
log.trace(
'Error loading %s.%s: %s',
self.tag, module_name, virtual_err
)
# if _process_virtual returned a non-True value then we are
# supposed to not process this module
if virtual_ret is not True and module_name not in self.missing_modules:
# If a module has information about why it could not be loaded, record it
self.missing_modules[module_name] = virtual_err
self.missing_modules[name] = virtual_err
return False
else:
virtual_aliases = ()
# If this is a proxy minion then MOST modules cannot work. Therefore, require that
# any module that does work with salt-proxy-minion define __proxyenabled__ as a list
# containing the names of the proxy types that the module supports.
#
# Render modules and state modules are OK though
if 'proxy' in self.opts:
if self.tag in ['grains', 'proxy']:
if not hasattr(mod, '__proxyenabled__') or \
(self.opts['proxy']['proxytype'] not in mod.__proxyenabled__ and
'*' not in mod.__proxyenabled__):
err_string = 'not a proxy_minion enabled module'
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
if getattr(mod, '__load__', False) is not False:
log.info(
'The functions from module \'%s\' are being loaded from the '
'provided __load__ attribute', module_name
)
# If we had another module by the same virtual name, we should put any
# new functions under the existing dictionary.
mod_names = [module_name] + list(virtual_aliases)
mod_dict = dict((
(x, self.loaded_modules.get(x, self.mod_dict_class()))
for x in mod_names
))
for attr in getattr(mod, '__load__', dir(mod)):
if attr.startswith('_'):
# private functions are skipped
continue
func = getattr(mod, attr)
if not inspect.isfunction(func) and not isinstance(func, functools.partial):
# Not a function!? Skip it!!!
continue
# Let's get the function name.
# If the module has the __func_alias__ attribute, it must be a
# dictionary mapping in the form of(key -> value):
# <real-func-name> -> <desired-func-name>
#
# It default's of course to the found callable attribute name
# if no alias is defined.
funcname = getattr(mod, '__func_alias__', {}).get(attr, attr)
for tgt_mod in mod_names:
try:
full_funcname = '.'.join((tgt_mod, funcname))
except TypeError:
full_funcname = '{0}.{1}'.format(tgt_mod, funcname)
# Save many references for lookups
# Careful not to overwrite existing (higher priority) functions
if full_funcname not in self._dict:
self._dict[full_funcname] = func
if funcname not in mod_dict[tgt_mod]:
setattr(mod_dict[tgt_mod], funcname, func)
mod_dict[tgt_mod][funcname] = func
self._apply_outputter(func, mod)
# enforce depends
try:
Depends.enforce_dependencies(self._dict, self.tag)
except RuntimeError as exc:
log.info(
'Depends.enforce_dependencies() failed for the following '
'reason: %s', exc
)
for tgt_mod in mod_names:
self.loaded_modules[tgt_mod] = mod_dict[tgt_mod]
return True
def _load(self, key):
'''
Load a single item if you have it
'''
# if the key doesn't have a '.' then it isn't valid for this mod dict
if not isinstance(key, six.string_types):
raise KeyError('The key must be a string.')
if '.' not in key:
raise KeyError('The key \'%s\' should contain a \'.\'', key)
mod_name, _ = key.split('.', 1)
with self._lock:
# It is possible that the key is in the dictionary after
# acquiring the lock due to another thread loading it.
if mod_name in self.missing_modules or key in self._dict:
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
log.error(
'Failed to load function %s because its module (%s) is '
'not in the whitelist: %s', key, mod_name, self.whitelist
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and key in self._dict:
return True
return False
# try to load the module
ret = None
reloaded = False
# re-scan up to once, IOErrors or a failed load cause re-scans of the
# filesystem
while True:
try:
ret = _inner_load(mod_name)
if not reloaded and ret is not True:
self._refresh_file_mapping()
reloaded = True
continue
break
except IOError:
if not reloaded:
self._refresh_file_mapping()
reloaded = True
continue
return ret
def _load_all(self):
'''
Load all of them
'''
with self._lock:
for name in self.file_mapping:
if name in self.loaded_files or name in self.missing_modules:
continue
self._load_module(name)
self.loaded = True
def reload_modules(self):
with self._lock:
self.loaded_files = set()
self._load_all()
def _apply_outputter(self, func, mod):
'''
Apply the __outputter__ variable to the functions
'''
if hasattr(mod, '__outputter__'):
outp = mod.__outputter__
if func.__name__ in outp:
func.__outputter__ = outp[func.__name__]
def _process_virtual(self, mod, module_name, virtual_func='__virtual__'):
'''
Given a loaded module and its default name determine its virtual name
This function returns a tuple. The first value will be either True or
False and will indicate if the module should be loaded or not (i.e. if
it threw and exception while processing its __virtual__ function). The
second value is the determined virtual name, which may be the same as
the value provided.
The default name can be calculated as follows::
module_name = mod.__name__.rsplit('.', 1)[-1]
'''
# The __virtual__ function will return either a True or False value.
# If it returns a True value it can also set a module level attribute
# named __virtualname__ with the name that the module should be
# referred to as.
#
# This allows us to have things like the pkg module working on all
# platforms under the name 'pkg'. It also allows for modules like
# augeas_cfg to be referred to as 'augeas', which would otherwise have
# namespace collisions. And finally it allows modules to return False
# if they are not intended to run on the given platform or are missing
# dependencies.
virtual_aliases = getattr(mod, '__virtual_aliases__', tuple())
try:
error_reason = None
if hasattr(mod, '__virtual__') and inspect.isfunction(mod.__virtual__):
try:
start = time.time()
virtual = getattr(mod, virtual_func)()
if isinstance(virtual, tuple):
error_reason = virtual[1]
virtual = virtual[0]
if self.opts.get('virtual_timer', False):
end = time.time() - start
msg = 'Virtual function took {0} seconds for {1}'.format(
end, module_name)
log.warning(msg)
except Exception as exc:
error_reason = (
'Exception raised when processing __virtual__ function'
' for {0}. Module will not be loaded: {1}'.format(
mod.__name__, exc))
log.error(error_reason, exc_info_on_loglevel=logging.DEBUG)
virtual = None
# Get the module's virtual name
virtualname = getattr(mod, '__virtualname__', virtual)
if not virtual:
# if __virtual__() evaluates to False then the module
# wasn't meant for this platform or it's not supposed to
# load for some other reason.
# Some modules might accidentally return None and are
# improperly loaded
if virtual is None:
log.warning(
'%s.__virtual__() is wrongly returning `None`. '
'It should either return `True`, `False` or a new '
'name. If you\'re the developer of the module '
'\'%s\', please fix this.', mod.__name__, module_name
)
return (False, module_name, error_reason, virtual_aliases)
# At this point, __virtual__ did not return a
# boolean value, let's check for deprecated usage
# or module renames
if virtual is not True and module_name != virtual:
# The module is renaming itself. Updating the module name
# with the new name
log.trace('Loaded %s as virtual %s', module_name, virtual)
if not hasattr(mod, '__virtualname__'):
salt.utils.versions.warn_until(
'Hydrogen',
'The \'{0}\' module is renaming itself in its '
'__virtual__() function ({1} => {2}). Please '
'set it\'s virtual name as the '
'\'__virtualname__\' module attribute. '
'Example: "__virtualname__ = \'{2}\'"'.format(
mod.__name__,
module_name,
virtual
)
)
if virtualname != virtual:
# The __virtualname__ attribute does not match what's
# being returned by the __virtual__() function. This
# should be considered an error.
log.error(
'The module \'%s\' is showing some bad usage. Its '
'__virtualname__ attribute is set to \'%s\' yet the '
'__virtual__() function is returning \'%s\'. These '
'values should match!',
mod.__name__, virtualname, virtual
)
module_name = virtualname
# If the __virtual__ function returns True and __virtualname__
# is set then use it
elif virtual is True and virtualname != module_name:
if virtualname is not True:
module_name = virtualname
except KeyError:
# Key errors come out of the virtual function when passing
# in incomplete grains sets, these can be safely ignored
# and logged to debug, still, it includes the traceback to
# help debugging.
log.debug('KeyError when loading %s', module_name, exc_info=True)
except Exception:
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.error(
'Failed to read the virtual function for %s: %s',
self.tag, module_name, exc_info=True
)
return (False, module_name, error_reason, virtual_aliases)
return (True, module_name, None, virtual_aliases)
def global_injector_decorator(inject_globals):
'''
Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject
'''
def inner_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with salt.utils.context.func_globals_inject(f, **inject_globals):
return f(*args, **kwargs)
return wrapper
return inner_decorator
| 35.673176
| 171
| 0.554235
|
14e559d46f39d9ec3c8e6e3446aea40e5e25ec20
| 45,256
|
py
|
Python
|
thirdparty-cpp/boost_1_62_0/tools/build/src/build/virtual_target.py
|
nxplatform/nx-mobile
|
0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5
|
[
"Apache-2.0"
] | 1
|
2018-12-15T19:57:24.000Z
|
2018-12-15T19:57:24.000Z
|
thirdparty-cpp/boost_1_62_0/tools/build/src/build/virtual_target.py
|
nxplatform/nx-mobile
|
0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5
|
[
"Apache-2.0"
] | null | null | null |
thirdparty-cpp/boost_1_62_0/tools/build/src/build/virtual_target.py
|
nxplatform/nx-mobile
|
0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5
|
[
"Apache-2.0"
] | null | null | null |
# Status: ported.
# Base revision: 64488.
#
# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
# Implements virtual targets, which correspond to actual files created during
# build, but are not yet targets in Jam sense. They are needed, for example,
# when searching for possible transormation sequences, when it's not known
# if particular target should be created at all.
#
#
# +--------------------------+
# | VirtualTarget |
# +==========================+
# | actualize |
# +--------------------------+
# | actualize_action() = 0 |
# | actualize_location() = 0 |
# +----------------+---------+
# |
# ^
# / \
# +-+-+
# |
# +---------------------+ +-------+--------------+
# | Action | | AbstractFileTarget |
# +=====================| * +======================+
# | action_name | +--+ action |
# | properties | | +----------------------+
# +---------------------+--+ | actualize_action() |
# | actualize() |0..1 +-----------+----------+
# | path() | |
# | adjust_properties() | sources |
# | actualize_sources() | targets |
# +------+--------------+ ^
# | / \
# ^ +-+-+
# / \ |
# +-+-+ +-------------+-------------+
# | | |
# | +------+---------------+ +--------+-------------+
# | | FileTarget | | SearchedLibTarget |
# | +======================+ +======================+
# | | actualize-location() | | actualize-location() |
# | +----------------------+ +----------------------+
# |
# +-+------------------------------+
# | |
# +----+----------------+ +---------+-----------+
# | CompileAction | | LinkAction |
# +=====================+ +=====================+
# | adjust_properties() | | adjust_properties() |
# +---------------------+ | actualize_sources() |
# +---------------------+
#
# The 'CompileAction' and 'LinkAction' classes are defined not here,
# but in builtin.jam modules. They are shown in the diagram to give
# the big picture.
import bjam
import re
import os.path
import string
import types
from b2.util import path, utility, set, is_iterable_typed
from b2.util.utility import add_grist, get_grist, ungrist, replace_grist, get_value
from b2.util.sequence import unique
from b2.tools import common
from b2.exceptions import *
import b2.build.type
import b2.build.property_set as property_set
import b2.build.property as property
from b2.manager import get_manager
from b2.util import bjam_signature
__re_starts_with_at = re.compile ('^@(.*)')
class VirtualTargetRegistry:
def __init__ (self, manager):
self.manager_ = manager
# A cache for FileTargets
self.files_ = {}
# A cache for targets.
self.cache_ = {}
# A map of actual names to virtual targets.
# Used to make sure we don't associate same
# actual target to two virtual targets.
self.actual_ = {}
self.recent_targets_ = []
# All targets ever registed
self.all_targets_ = []
self.next_id_ = 0
def register (self, target):
""" Registers a new virtual target. Checks if there's already registered target, with the same
name, type, project and subvariant properties, and also with the same sources
and equal action. If such target is found it is retured and 'target' is not registered.
Otherwise, 'target' is registered and returned.
"""
assert isinstance(target, VirtualTarget)
if target.path():
signature = target.path() + "-" + target.name()
else:
signature = "-" + target.name()
result = None
if not self.cache_.has_key (signature):
self.cache_ [signature] = []
for t in self.cache_ [signature]:
a1 = t.action ()
a2 = target.action ()
# TODO: why are we checking for not result?
if not result:
if not a1 and not a2:
result = t
else:
if a1 and a2 and a1.action_name () == a2.action_name () and a1.sources () == a2.sources ():
ps1 = a1.properties ()
ps2 = a2.properties ()
p1 = ps1.base () + ps1.free () +\
b2.util.set.difference(ps1.dependency(), ps1.incidental())
p2 = ps2.base () + ps2.free () +\
b2.util.set.difference(ps2.dependency(), ps2.incidental())
if p1 == p2:
result = t
if not result:
self.cache_ [signature].append (target)
result = target
# TODO: Don't append if we found pre-existing target?
self.recent_targets_.append(result)
self.all_targets_.append(result)
return result
def from_file (self, file, file_location, project):
""" Creates a virtual target with appropriate name and type from 'file'.
If a target with that name in that project was already created, returns that already
created target.
TODO: more correct way would be to compute path to the file, based on name and source location
for the project, and use that path to determine if the target was already created.
TODO: passing project with all virtual targets starts to be annoying.
"""
if __debug__:
from .targets import ProjectTarget
assert isinstance(file, basestring)
assert isinstance(file_location, basestring)
assert isinstance(project, ProjectTarget)
# Check if we've created a target corresponding to this file.
path = os.path.join(os.getcwd(), file_location, file)
path = os.path.normpath(path)
if self.files_.has_key (path):
return self.files_ [path]
file_type = b2.build.type.type (file)
result = FileTarget (file, file_type, project,
None, file_location)
self.files_ [path] = result
return result
def recent_targets(self):
"""Each target returned by 'register' is added to a list of
'recent-target', returned by this function. So, this allows
us to find all targets created when building a given main
target, even if the target."""
return self.recent_targets_
def clear_recent_targets(self):
self.recent_targets_ = []
def all_targets(self):
# Returns all virtual targets ever created
return self.all_targets_
# Returns all targets from 'targets' with types
# equal to 'type' or derived from it.
def select_by_type(self, type, targets):
return [t for t in targets if b2.build.type.is_sybtype(t.type(), type)]
def register_actual_name (self, actual_name, virtual_target):
assert isinstance(actual_name, basestring)
assert isinstance(virtual_target, VirtualTarget)
if self.actual_.has_key (actual_name):
cs1 = self.actual_ [actual_name].creating_subvariant ()
cs2 = virtual_target.creating_subvariant ()
cmt1 = cs1.main_target ()
cmt2 = cs2.main_target ()
action1 = self.actual_ [actual_name].action ()
action2 = virtual_target.action ()
properties_added = []
properties_removed = []
if action1 and action2:
p1 = action1.properties ()
p1 = p1.raw ()
p2 = action2.properties ()
p2 = p2.raw ()
properties_removed = set.difference (p1, p2)
if not properties_removed: properties_removed = "none"
properties_added = set.difference (p2, p1)
if not properties_added: properties_added = "none"
# FIXME: Revive printing of real location.
get_manager().errors()(
"Duplicate name of actual target: '%s'\n"
"previous virtual target '%s'\n"
"created from '%s'\n"
"another virtual target '%s'\n"
"created from '%s'\n"
"added properties: '%s'\n"
"removed properties: '%s'\n"
% (actual_name,
self.actual_ [actual_name], "loc", #cmt1.location (),
virtual_target,
"loc", #cmt2.location (),
properties_added, properties_removed))
else:
self.actual_ [actual_name] = virtual_target
def add_suffix (self, specified_name, file_type, prop_set):
""" Appends the suffix appropriate to 'type/property_set' combination
to the specified name and returns the result.
"""
assert isinstance(specified_name, basestring)
assert isinstance(file_type, basestring)
assert isinstance(prop_set, property_set.PropertySet)
suffix = b2.build.type.generated_target_suffix (file_type, prop_set)
if suffix:
return specified_name + '.' + suffix
else:
return specified_name
class VirtualTarget:
""" Potential target. It can be converted into jam target and used in
building, if needed. However, it can be also dropped, which allows
to search for different transformation and select only one.
name: name of this target.
project: project to which this target belongs.
"""
def __init__ (self, name, project):
if __debug__:
from .targets import ProjectTarget
assert isinstance(name, basestring)
assert isinstance(project, ProjectTarget)
self.name_ = name
self.project_ = project
self.dependencies_ = []
self.always_ = False
# Caches if dapendencies for scanners have already been set.
self.made_ = {}
def manager(self):
return self.project_.manager()
def virtual_targets(self):
return self.manager().virtual_targets()
def name (self):
""" Name of this target.
"""
return self.name_
def project (self):
""" Project of this target.
"""
return self.project_
def depends (self, d):
""" Adds additional instances of 'VirtualTarget' that this
one depends on.
"""
self.dependencies_ = unique (self.dependencies_ + d).sort ()
def dependencies (self):
return self.dependencies_
def always(self):
self.always_ = True
def actualize (self, scanner = None):
""" Generates all the actual targets and sets up build actions for
this target.
If 'scanner' is specified, creates an additional target
with the same location as actual target, which will depend on the
actual target and be associated with 'scanner'. That additional
target is returned. See the docs (#dependency_scanning) for rationale.
Target must correspond to a file if 'scanner' is specified.
If scanner is not specified, then actual target is returned.
"""
if __debug__:
from .scanner import Scanner
assert scanner is None or isinstance(scanner, Scanner)
actual_name = self.actualize_no_scanner ()
if self.always_:
bjam.call("ALWAYS", actual_name)
if not scanner:
return actual_name
else:
# Add the scanner instance to the grist for name.
g = '-'.join ([ungrist(get_grist(actual_name)), str(id(scanner))])
name = replace_grist (actual_name, '<' + g + '>')
if not self.made_.has_key (name):
self.made_ [name] = True
self.project_.manager ().engine ().add_dependency (name, actual_name)
self.actualize_location (name)
self.project_.manager ().scanners ().install (scanner, name, str (self))
return name
# private: (overridables)
def actualize_action (self, target):
""" Sets up build actions for 'target'. Should call appropriate rules
and set target variables.
"""
raise BaseException ("method should be defined in derived classes")
def actualize_location (self, target):
""" Sets up variables on 'target' which specify its location.
"""
raise BaseException ("method should be defined in derived classes")
def path (self):
""" If the target is generated one, returns the path where it will be
generated. Otherwise, returns empty list.
"""
raise BaseException ("method should be defined in derived classes")
def actual_name (self):
""" Return that actual target name that should be used
(for the case where no scanner is involved)
"""
raise BaseException ("method should be defined in derived classes")
class AbstractFileTarget (VirtualTarget):
""" Target which correspond to a file. The exact mapping for file
is not yet specified in this class. (TODO: Actually, the class name
could be better...)
May be a source file (when no action is specified), or
derived file (otherwise).
The target's grist is concatenation of project's location,
properties of action (for derived files), and, optionally,
value identifying the main target.
exact: If non-empty, the name is exactly the name
created file should have. Otherwise, the '__init__'
method will add suffix obtained from 'type' by
calling 'type.generated-target-suffix'.
type: optional type of this target.
"""
def __init__ (self, name, type, project, action = None, exact=False):
assert isinstance(type, basestring) or type is None
assert action is None or isinstance(action, Action)
assert isinstance(exact, (int, bool))
VirtualTarget.__init__ (self, name, project)
self.type_ = type
self.action_ = action
self.exact_ = exact
if action:
action.add_targets ([self])
if self.type and not exact:
self.__adjust_name (name)
self.actual_name_ = None
self.path_ = None
self.intermediate_ = False
self.creating_subvariant_ = None
# True if this is a root target.
self.root_ = False
def type (self):
return self.type_
def set_path (self, path):
""" Sets the path. When generating target name, it will override any path
computation from properties.
"""
assert isinstance(path, basestring)
self.path_ = os.path.normpath(path)
def action (self):
""" Returns the action.
"""
return self.action_
def root (self, set = None):
""" Sets/gets the 'root' flag. Target is root is it directly correspods to some
variant of a main target.
"""
assert isinstance(set, (int, bool, type(None)))
if set:
self.root_ = True
return self.root_
def creating_subvariant (self, s = None):
""" Gets or sets the subvariant which created this target. Subvariant
is set when target is brought into existance, and is never changed
after that. In particual, if target is shared by subvariant, only
the first is stored.
s: If specified, specified the value to set,
which should be instance of 'subvariant' class.
"""
assert s is None or isinstance(s, Subvariant)
if s and not self.creating_subvariant ():
if self.creating_subvariant ():
raise BaseException ("Attempt to change 'dg'")
else:
self.creating_subvariant_ = s
return self.creating_subvariant_
def actualize_action (self, target):
assert isinstance(target, basestring)
if self.action_:
self.action_.actualize ()
# Return a human-readable representation of this target
#
# If this target has an action, that's:
#
# { <action-name>-<self.name>.<self.type> <action-sources>... }
#
# otherwise, it's:
#
# { <self.name>.<self.type> }
#
def str(self):
a = self.action()
name_dot_type = self.name_ + "." + self.type_
if a:
action_name = a.action_name()
ss = [ s.str() for s in a.sources()]
return "{ %s-%s %s}" % (action_name, name_dot_type, str(ss))
else:
return "{ " + name_dot_type + " }"
# private:
def actual_name (self):
if not self.actual_name_:
self.actual_name_ = '<' + self.grist() + '>' + os.path.normpath(self.name_)
return self.actual_name_
def grist (self):
"""Helper to 'actual_name', above. Compute unique prefix used to distinguish
this target from other targets with the same name which create different
file.
"""
# Depending on target, there may be different approaches to generating
# unique prefixes. We'll generate prefixes in the form
# <one letter approach code> <the actual prefix>
path = self.path ()
if path:
# The target will be generated to a known path. Just use the path
# for identification, since path is as unique as it can get.
return 'p' + path
else:
# File is either source, which will be searched for, or is not a file at
# all. Use the location of project for distinguishing.
project_location = self.project_.get ('location')
path_components = b2.util.path.split(project_location)
location_grist = '!'.join (path_components)
if self.action_:
ps = self.action_.properties ()
property_grist = ps.as_path ()
# 'property_grist' can be empty when 'ps' is an empty
# property set.
if property_grist:
location_grist = location_grist + '/' + property_grist
return 'l' + location_grist
def __adjust_name(self, specified_name):
"""Given the target name specified in constructor, returns the
name which should be really used, by looking at the <tag> properties.
The tag properties come in two flavour:
- <tag>value,
- <tag>@rule-name
In the first case, value is just added to name
In the second case, the specified rule is called with specified name,
target type and properties and should return the new name.
If not <tag> property is specified, or the rule specified by
<tag> returns nothing, returns the result of calling
virtual-target.add-suffix"""
assert isinstance(specified_name, basestring)
if self.action_:
ps = self.action_.properties()
else:
ps = property_set.empty()
# FIXME: I'm not sure how this is used, need to check with
# Rene to figure out how to implement
#~ We add ourselves to the properties so that any tag rule can get
#~ more direct information about the target than just that available
#~ through the properties. This is useful in implementing
#~ name changes based on the sources of the target. For example to
#~ make unique names of object files based on the source file.
#~ --grafik
#ps = property_set.create(ps.raw() + ["<target>%s" % "XXXX"])
#ps = [ property-set.create [ $(ps).raw ] <target>$(__name__) ] ;
tag = ps.get("<tag>")
if tag:
if len(tag) > 1:
get_manager().errors()(
"""<tag>@rulename is present but is not the only <tag> feature""")
tag = tag[0]
if callable(tag):
self.name_ = tag(specified_name, self.type_, ps)
else:
if not tag[0] == '@':
self.manager_.errors()("""The value of the <tag> feature must be '@rule-nane'""")
exported_ps = b2.util.value_to_jam(ps, methods=True)
self.name_ = b2.util.call_jam_function(
tag[1:], specified_name, self.type_, exported_ps)
if self.name_:
self.name_ = self.name_[0]
# If there's no tag or the tag rule returned nothing.
if not tag or not self.name_:
self.name_ = add_prefix_and_suffix(specified_name, self.type_, ps)
def actualize_no_scanner(self):
name = self.actual_name()
# Do anything only on the first invocation
if not self.made_:
self.made_[name] = True
if self.action_:
# For non-derived target, we don't care if there
# are several virtual targets that refer to the same name.
# One case when this is unavoidable is when file name is
# main.cpp and two targets have types CPP (for compiling)
# and MOCCABLE_CPP (for convertion to H via Qt tools).
self.virtual_targets().register_actual_name(name, self)
for i in self.dependencies_:
self.manager_.engine().add_dependency(name, i.actualize())
self.actualize_location(name)
self.actualize_action(name)
return name
@bjam_signature((["specified_name"], ["type"], ["property_set"]))
def add_prefix_and_suffix(specified_name, type, property_set):
"""Appends the suffix appropriate to 'type/property-set' combination
to the specified name and returns the result."""
property_set = b2.util.jam_to_value_maybe(property_set)
suffix = ""
if type:
suffix = b2.build.type.generated_target_suffix(type, property_set)
# Handle suffixes for which no leading dot is desired. Those are
# specified by enclosing them in <...>. Needed by python so it
# can create "_d.so" extensions, for example.
if get_grist(suffix):
suffix = ungrist(suffix)
elif suffix:
suffix = "." + suffix
prefix = ""
if type:
prefix = b2.build.type.generated_target_prefix(type, property_set)
if specified_name.startswith(prefix):
prefix = ""
if not prefix:
prefix = ""
if not suffix:
suffix = ""
return prefix + specified_name + suffix
class FileTarget (AbstractFileTarget):
""" File target with explicitly known location.
The file path is determined as
- value passed to the 'set_path' method, if any
- for derived files, project's build dir, joined with components
that describe action's properties. If the free properties
are not equal to the project's reference properties
an element with name of main target is added.
- for source files, project's source dir
The file suffix is
- the value passed to the 'suffix' method, if any, or
- the suffix which correspond to the target's type.
"""
def __init__ (self, name, type, project, action = None, path=None, exact=False):
assert isinstance(type, basestring) or type is None
assert action is None or isinstance(action, Action)
assert isinstance(exact, (int, bool))
AbstractFileTarget.__init__ (self, name, type, project, action, exact)
self.path_ = path
def __str__(self):
if self.type_:
return self.name_ + "." + self.type_
else:
return self.name_
def clone_with_different_type(self, new_type):
assert isinstance(new_type, basestring)
return FileTarget(self.name_, new_type, self.project_,
self.action_, self.path_, exact=True)
def actualize_location (self, target):
assert isinstance(target, basestring)
engine = self.project_.manager_.engine ()
if self.action_:
# This is a derived file.
path = self.path ()
engine.set_target_variable (target, 'LOCATE', path)
# Make sure the path exists.
engine.add_dependency (target, path)
common.mkdir(engine, path)
# It's possible that the target name includes a directory
# too, for example when installing headers. Create that
# directory.
d = os.path.dirname(get_value(target))
if d:
d = os.path.join(path, d)
engine.add_dependency(target, d)
common.mkdir(engine, d)
# For real file target, we create a fake target that
# depends on the real target. This allows to run
#
# bjam hello.o
#
# without trying to guess the name of the real target.
# Note the that target has no directory name, and a special
# grist <e>.
#
# First, that means that "bjam hello.o" will build all
# known hello.o targets.
# Second, the <e> grist makes sure this target won't be confused
# with other targets, for example, if we have subdir 'test'
# with target 'test' in it that includes 'test.o' file,
# then the target for directory will be just 'test' the target
# for test.o will be <ptest/bin/gcc/debug>test.o and the target
# we create below will be <e>test.o
engine.add_dependency("<e>%s" % get_value(target), target)
# Allow bjam <path-to-file>/<file> to work. This won't catch all
# possible ways to refer to the path (relative/absolute, extra ".",
# various "..", but should help in obvious cases.
engine.add_dependency("<e>%s" % (os.path.join(path, get_value(target))), target)
else:
# This is a source file.
engine.set_target_variable (target, 'SEARCH', self.project_.get ('source-location'))
def path (self):
""" Returns the directory for this target.
"""
if not self.path_:
if self.action_:
p = self.action_.properties ()
(target_path, relative_to_build_dir) = p.target_path ()
if relative_to_build_dir:
# Indicates that the path is relative to
# build dir.
target_path = os.path.join (self.project_.build_dir (), target_path)
# Store the computed path, so that it's not recomputed
# any more
self.path_ = target_path
return os.path.normpath(self.path_)
class NotFileTarget(AbstractFileTarget):
def __init__(self, name, project, action):
assert isinstance(action, Action)
AbstractFileTarget.__init__(self, name, None, project, action)
def path(self):
"""Returns nothing, to indicate that target path is not known."""
return None
def actualize_location(self, target):
assert isinstance(target, basestring)
bjam.call("NOTFILE", target)
bjam.call("ALWAYS", target)
bjam.call("NOUPDATE", target)
class Action:
""" Class which represents an action.
Both 'targets' and 'sources' should list instances of 'VirtualTarget'.
Action name should name a rule with this prototype
rule action_name ( targets + : sources * : properties * )
Targets and sources are passed as actual jam targets. The rule may
not establish dependency relationship, but should do everything else.
"""
def __init__ (self, manager, sources, action_name, prop_set):
assert is_iterable_typed(sources, VirtualTarget)
assert isinstance(action_name, basestring) or action_name is None
assert(isinstance(prop_set, property_set.PropertySet))
self.sources_ = sources
self.action_name_ = action_name
if not prop_set:
prop_set = property_set.empty()
self.properties_ = prop_set
if not all(isinstance(v, VirtualTarget) for v in prop_set.get('implicit-dependency')):
import pdb
pdb.set_trace()
self.manager_ = manager
self.engine_ = self.manager_.engine ()
self.targets_ = []
# Indicates whether this has been actualized or not.
self.actualized_ = False
self.dependency_only_sources_ = []
self.actual_sources_ = []
def add_targets (self, targets):
assert is_iterable_typed(targets, VirtualTarget)
self.targets_ += targets
def replace_targets(self, old_targets, new_targets):
assert is_iterable_typed(old_targets, VirtualTarget)
assert is_iterable_typed(new_targets, VirtualTarget)
self.targets_ = [t for t in self.targets_ if not t in old_targets] + new_targets
def targets (self):
return self.targets_
def sources (self):
return self.sources_
def action_name (self):
return self.action_name_
def properties (self):
return self.properties_
def actualize (self):
""" Generates actual build instructions.
"""
if self.actualized_:
return
self.actualized_ = True
ps = self.properties ()
properties = self.adjust_properties (ps)
actual_targets = []
for i in self.targets ():
actual_targets.append (i.actualize ())
self.actualize_sources (self.sources (), properties)
self.engine_.add_dependency (actual_targets, self.actual_sources_ + self.dependency_only_sources_)
# FIXME: check the comment below. Was self.action_name_ [1]
# Action name can include additional rule arguments, which should not
# be passed to 'set-target-variables'.
# FIXME: breaking circular dependency
import toolset
toolset.set_target_variables (self.manager_, self.action_name_, actual_targets, properties)
engine = self.manager_.engine ()
# FIXME: this is supposed to help --out-xml option, but we don't
# implement that now, and anyway, we should handle it in Python,
# not but putting variables on bjam-level targets.
bjam.call("set-target-variable", actual_targets, ".action", repr(self))
self.manager_.engine ().set_update_action (self.action_name_, actual_targets, self.actual_sources_,
properties)
# Since we set up creating action here, we also set up
# action for cleaning up
self.manager_.engine ().set_update_action ('common.Clean', 'clean-all',
actual_targets)
return actual_targets
def actualize_source_type (self, sources, prop_set):
""" Helper for 'actualize_sources'.
For each passed source, actualizes it with the appropriate scanner.
Returns the actualized virtual targets.
"""
assert is_iterable_typed(sources, VirtualTarget)
assert isinstance(prop_set, property_set.PropertySet)
result = []
for i in sources:
scanner = None
# FIXME: what's this?
# if isinstance (i, str):
# i = self.manager_.get_object (i)
if i.type ():
scanner = b2.build.type.get_scanner (i.type (), prop_set)
r = i.actualize (scanner)
result.append (r)
return result
def actualize_sources (self, sources, prop_set):
""" Creates actual jam targets for sources. Initializes two member
variables:
'self.actual_sources_' -- sources which are passed to updating action
'self.dependency_only_sources_' -- sources which are made dependencies, but
are not used otherwise.
New values will be *appended* to the variables. They may be non-empty,
if caller wants it.
"""
assert is_iterable_typed(sources, VirtualTarget)
assert isinstance(prop_set, property_set.PropertySet)
dependencies = self.properties_.get ('<dependency>')
self.dependency_only_sources_ += self.actualize_source_type (dependencies, prop_set)
self.actual_sources_ += self.actualize_source_type (sources, prop_set)
# This is used to help bjam find dependencies in generated headers
# in other main targets.
# Say:
#
# make a.h : ....... ;
# exe hello : hello.cpp : <implicit-dependency>a.h ;
#
# However, for bjam to find the dependency the generated target must
# be actualized (i.e. have the jam target). In the above case,
# if we're building just hello ("bjam hello"), 'a.h' won't be
# actualized unless we do it here.
implicit = self.properties_.get("<implicit-dependency>")
for i in implicit:
i.actualize()
def adjust_properties (self, prop_set):
""" Determines real properties when trying building with 'properties'.
This is last chance to fix properties, for example to adjust includes
to get generated headers correctly. Default implementation returns
its argument.
"""
assert isinstance(prop_set, property_set.PropertySet)
return prop_set
class NullAction (Action):
""" Action class which does nothing --- it produces the targets with
specific properties out of nowhere. It's needed to distinguish virtual
targets with different properties that are known to exist, and have no
actions which create them.
"""
def __init__ (self, manager, prop_set):
assert isinstance(prop_set, property_set.PropertySet)
Action.__init__ (self, manager, [], None, prop_set)
def actualize (self):
if not self.actualized_:
self.actualized_ = True
for i in self.targets ():
i.actualize ()
class NonScanningAction(Action):
"""Class which acts exactly like 'action', except that the sources
are not scanned for dependencies."""
def __init__(self, sources, action_name, property_set):
#FIXME: should the manager parameter of Action.__init__
#be removed? -- Steven Watanabe
Action.__init__(self, b2.manager.get_manager(), sources, action_name, property_set)
def actualize_source_type(self, sources, property_set):
assert is_iterable_typed(sources, VirtualTarget)
assert isinstance(property_set, property_set.PropertySet)
result = []
for s in sources:
result.append(s.actualize())
return result
def traverse (target, include_roots = False, include_sources = False):
""" Traverses the dependency graph of 'target' and return all targets that will
be created before this one is created. If root of some dependency graph is
found during traversal, it's either included or not, dependencing of the
value of 'include_roots'. In either case, sources of root are not traversed.
"""
assert isinstance(target, VirtualTarget)
assert isinstance(include_roots, (int, bool))
assert isinstance(include_sources, (int, bool))
result = []
if target.action ():
action = target.action ()
# This includes 'target' as well
result += action.targets ()
for t in action.sources ():
# FIXME:
# TODO: see comment in Manager.register_object ()
#if not isinstance (t, VirtualTarget):
# t = target.project_.manager_.get_object (t)
if not t.root ():
result += traverse (t, include_roots, include_sources)
elif include_roots:
result.append (t)
elif include_sources:
result.append (target)
return result
def clone_action (action, new_project, new_action_name, new_properties):
"""Takes an 'action' instances and creates new instance of it
and all produced target. The rule-name and properties are set
to 'new-rule-name' and 'new-properties', if those are specified.
Returns the cloned action."""
if __debug__:
from .targets import ProjectTarget
assert isinstance(action, Action)
assert isinstance(new_project, ProjectTarget)
assert isinstance(new_action_name, basestring)
assert isinstance(new_properties, property_set.PropertySet)
if not new_action_name:
new_action_name = action.action_name()
if not new_properties:
new_properties = action.properties()
cloned_action = action.__class__(action.manager_, action.sources(), new_action_name,
new_properties)
cloned_targets = []
for target in action.targets():
n = target.name()
# Don't modify the name of the produced targets. Strip the directory f
cloned_target = FileTarget(n, target.type(), new_project,
cloned_action, exact=True)
d = target.dependencies()
if d:
cloned_target.depends(d)
cloned_target.root(target.root())
cloned_target.creating_subvariant(target.creating_subvariant())
cloned_targets.append(cloned_target)
return cloned_action
class Subvariant:
def __init__ (self, main_target, prop_set, sources, build_properties, sources_usage_requirements, created_targets):
"""
main_target: The instance of MainTarget class
prop_set: Properties requested for this target
sources:
build_properties: Actually used properties
sources_usage_requirements: Properties propagated from sources
created_targets: Top-level created targets
"""
if __debug__:
from .targets import AbstractTarget
assert isinstance(main_target, AbstractTarget)
assert isinstance(prop_set, property_set.PropertySet)
assert is_iterable_typed(sources, VirtualTarget)
assert isinstance(build_properties, property_set.PropertySet)
assert isinstance(sources_usage_requirements, property_set.PropertySet)
assert is_iterable_typed(created_targets, VirtualTarget)
self.main_target_ = main_target
self.properties_ = prop_set
self.sources_ = sources
self.build_properties_ = build_properties
self.sources_usage_requirements_ = sources_usage_requirements
self.created_targets_ = created_targets
self.usage_requirements_ = None
# Pre-compose the list of other dependency graphs, on which this one
# depends
deps = build_properties.get('<implicit-dependency>')
self.other_dg_ = []
for d in deps:
self.other_dg_.append(d.creating_subvariant ())
self.other_dg_ = unique (self.other_dg_)
self.implicit_includes_cache_ = {}
self.target_directories_ = None
def main_target (self):
return self.main_target_
def created_targets (self):
return self.created_targets_
def requested_properties (self):
return self.properties_
def build_properties (self):
return self.build_properties_
def sources_usage_requirements (self):
return self.sources_usage_requirements_
def set_usage_requirements (self, usage_requirements):
assert isinstance(usage_requirements, property_set.PropertySet)
self.usage_requirements_ = usage_requirements
def usage_requirements (self):
return self.usage_requirements_
def all_referenced_targets(self, result):
"""Returns all targets referenced by this subvariant,
either directly or indirectly, and either as sources,
or as dependency properties. Targets referred with
dependency property are returned a properties, not targets."""
if __debug__:
from .targets import GenerateResult
assert isinstance(result, GenerateResult)
# Find directly referenced targets.
deps = self.build_properties().dependency()
all_targets = self.sources_ + deps
# Find other subvariants.
r = []
for e in all_targets:
if not e in result:
result.add(e)
if isinstance(e, property.Property):
t = e.value()
else:
t = e
# FIXME: how can this be?
cs = t.creating_subvariant()
if cs:
r.append(cs)
r = unique(r)
for s in r:
if s != self:
s.all_referenced_targets(result)
def implicit_includes (self, feature, target_type):
""" Returns the properties which specify implicit include paths to
generated headers. This traverses all targets in this subvariant,
and subvariants referred by <implcit-dependecy>properties.
For all targets which are of type 'target-type' (or for all targets,
if 'target_type' is not specified), the result will contain
<$(feature)>path-to-that-target.
"""
assert isinstance(feature, basestring)
assert isinstance(target_type, basestring)
if not target_type:
key = feature
else:
key = feature + "-" + target_type
result = self.implicit_includes_cache_.get(key)
if not result:
target_paths = self.all_target_directories(target_type)
target_paths = unique(target_paths)
result = ["<%s>%s" % (feature, p) for p in target_paths]
self.implicit_includes_cache_[key] = result
return result
def all_target_directories(self, target_type = None):
assert isinstance(target_type, (basestring, type(None)))
# TODO: does not appear to use target_type in deciding
# if we've computed this already.
if not self.target_directories_:
self.target_directories_ = self.compute_target_directories(target_type)
return self.target_directories_
def compute_target_directories(self, target_type=None):
assert isinstance(target_type, (basestring, type(None)))
result = []
for t in self.created_targets():
if not target_type or b2.build.type.is_derived(t.type(), target_type):
result.append(t.path())
for d in self.other_dg_:
result.extend(d.all_target_directories(target_type))
result = unique(result)
return result
| 38.581415
| 120
| 0.574045
|
3b7eca163baa98a288a84315b6f332c5db77fd5d
| 14,894
|
py
|
Python
|
EDK_II/BaseTools/Scripts/UpdateBuildVersions.py
|
tomix86/efi-tetris
|
67d495c3b00bc6d4abbdcaceae476f99f40afebf
|
[
"MIT"
] | 8
|
2019-06-03T10:47:48.000Z
|
2021-08-21T19:11:38.000Z
|
EDK_II/BaseTools/Scripts/UpdateBuildVersions.py
|
tomix86/efi-tetris
|
67d495c3b00bc6d4abbdcaceae476f99f40afebf
|
[
"MIT"
] | null | null | null |
EDK_II/BaseTools/Scripts/UpdateBuildVersions.py
|
tomix86/efi-tetris
|
67d495c3b00bc6d4abbdcaceae476f99f40afebf
|
[
"MIT"
] | 1
|
2021-04-21T06:20:00.000Z
|
2021-04-21T06:20:00.000Z
|
## @file
# Update build revisions of the tools when performing a developer build
#
# This script will modife the C/Include/Common/BuildVersion.h file and the two
# Python scripts, Python/Common/BuildVersion.py and Python/UPT/BuildVersion.py.
# If SVN is available, the tool will obtain the current checked out version of
# the source tree for including the the --version commands.
# Copyright (c) 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
##
""" This program will update the BuildVersion.py and BuildVersion.h files used to set a tool's version value """
from __future__ import absolute_import
import os
import shlex
import subprocess
import sys
from argparse import ArgumentParser, SUPPRESS
from tempfile import NamedTemporaryFile
from types import IntType, ListType
SYS_ENV_ERR = "ERROR : %s system environment variable must be set prior to running this tool.\n"
__execname__ = "UpdateBuildVersions.py"
SVN_REVISION = "$Revision: 3 $"
SVN_REVISION = SVN_REVISION.replace("$Revision:", "").replace("$", "").strip()
__copyright__ = "Copyright (c) 2014, Intel Corporation. All rights reserved."
VERSION_NUMBER = "0.7.0"
__version__ = "Version %s.%s" % (VERSION_NUMBER, SVN_REVISION)
def ParseOptions():
"""
Parse the command-line options.
The options for this tool will be passed along to the MkBinPkg tool.
"""
parser = ArgumentParser(
usage=("%s [options]" % __execname__),
description=__copyright__,
conflict_handler='resolve')
# Standard Tool Options
parser.add_argument("--version", action="version",
version=__execname__ + " " + __version__)
parser.add_argument("-s", "--silent", action="store_true",
dest="silent",
help="All output will be disabled, pass/fail determined by the exit code")
parser.add_argument("-v", "--verbose", action="store_true",
dest="verbose",
help="Enable verbose output")
# Tool specific options
parser.add_argument("--revert", action="store_true",
dest="REVERT", default=False,
help="Revert the BuildVersion files only")
parser.add_argument("--svn-test", action="store_true",
dest="TEST_SVN", default=False,
help="Test if the svn command is available")
parser.add_argument("--svnFlag", action="store_true",
dest="HAVE_SVN", default=False,
help=SUPPRESS)
return(parser.parse_args())
def ShellCommandResults(CmdLine, Opt):
""" Execute the comand, returning the output content """
file_list = NamedTemporaryFile(delete=False)
filename = file_list.name
Results = []
returnValue = 0
try:
subprocess.check_call(args=shlex.split(CmdLine), stderr=subprocess.STDOUT, stdout=file_list)
except subprocess.CalledProcessError as err_val:
file_list.close()
if not Opt.silent:
sys.stderr.write("ERROR : %d : %s\n" % (err_val.returncode, err_val.__str__()))
if os.path.exists(filename):
sys.stderr.write(" : Partial results may be in this file: %s\n" % filename)
sys.stderr.flush()
returnValue = err_val.returncode
except IOError as (errno, strerror):
file_list.close()
if not Opt.silent:
sys.stderr.write("I/O ERROR : %s : %s\n" % (str(errno), strerror))
sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine)
if os.path.exists(filename):
sys.stderr.write(" : Partial results may be in this file: %s\n" % filename)
sys.stderr.flush()
returnValue = errno
except OSError as (errno, strerror):
file_list.close()
if not Opt.silent:
sys.stderr.write("OS ERROR : %s : %s\n" % (str(errno), strerror))
sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine)
if os.path.exists(filename):
sys.stderr.write(" : Partial results may be in this file: %s\n" % filename)
sys.stderr.flush()
returnValue = errno
except KeyboardInterrupt:
file_list.close()
if not Opt.silent:
sys.stderr.write("ERROR : Command terminated by user : %s\n" % CmdLine)
if os.path.exists(filename):
sys.stderr.write(" : Partial results may be in this file: %s\n" % filename)
sys.stderr.flush()
returnValue = 1
finally:
if not file_list.closed:
file_list.flush()
os.fsync(file_list.fileno())
file_list.close()
if os.path.exists(filename):
fd_ = open(filename, 'r')
Results = fd_.readlines()
fd_.close()
os.unlink(filename)
if returnValue > 0:
return returnValue
return Results
def UpdateBuildVersionPython(Rev, UserModified, opts):
""" This routine will update the BuildVersion.h files in the C source tree """
for SubDir in ["Common", "UPT"]:
PyPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "Python", SubDir)
BuildVersionPy = os.path.join(PyPath, "BuildVersion.py")
fd_ = open(os.path.normpath(BuildVersionPy), 'r')
contents = fd_.readlines()
fd_.close()
if opts.HAVE_SVN is False:
BuildVersionOrig = os.path.join(PyPath, "orig_BuildVersion.py")
fd_ = open (BuildVersionOrig, 'w')
for line in contents:
fd_.write(line)
fd_.flush()
fd_.close()
new_content = []
for line in contents:
if line.strip().startswith("gBUILD_VERSION"):
new_line = "gBUILD_VERSION = \"Developer Build based on Revision: %s\"" % Rev
if UserModified:
new_line = "gBUILD_VERSION = \"Developer Build based on Revision: %s with Modified Sources\"" % Rev
new_content.append(new_line)
continue
new_content.append(line)
fd_ = open(os.path.normpath(BuildVersionPy), 'w')
for line in new_content:
fd_.write(line)
fd_.close()
def UpdateBuildVersionH(Rev, UserModified, opts):
""" This routine will update the BuildVersion.h files in the C source tree """
CPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "C", "Include", "Common")
BuildVersionH = os.path.join(CPath, "BuildVersion.h")
fd_ = open(os.path.normpath(BuildVersionH), 'r')
contents = fd_.readlines()
fd_.close()
if opts.HAVE_SVN is False:
BuildVersionOrig = os.path.join(CPath, "orig_BuildVersion.h")
fd_ = open(BuildVersionOrig, 'w')
for line in contents:
fd_.write(line)
fd_.flush()
fd_.close()
new_content = []
for line in contents:
if line.strip().startswith("#define"):
new_line = "#define __BUILD_VERSION \"Developer Build based on Revision: %s\"" % Rev
if UserModified:
new_line = "#define __BUILD_VERSION \"Developer Build based on Revision: %s with Modified Sources\"" % \
Rev
new_content.append(new_line)
continue
new_content.append(line)
fd_ = open(os.path.normpath(BuildVersionH), 'w')
for line in new_content:
fd_.write(line)
fd_.close()
def RevertCmd(Filename, Opt):
""" This is the shell command that does the SVN revert """
CmdLine = "svn revert %s" % Filename.replace("\\", "/").strip()
try:
subprocess.check_output(args=shlex.split(CmdLine))
except subprocess.CalledProcessError as err_val:
if not Opt.silent:
sys.stderr.write("Subprocess ERROR : %s\n" % err_val)
sys.stderr.flush()
except IOError as (errno, strerror):
if not Opt.silent:
sys.stderr.write("I/O ERROR : %d : %s\n" % (str(errno), strerror))
sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine)
sys.stderr.flush()
except OSError as (errno, strerror):
if not Opt.silent:
sys.stderr.write("OS ERROR : %d : %s\n" % (str(errno), strerror))
sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine)
sys.stderr.flush()
except KeyboardInterrupt:
if not Opt.silent:
sys.stderr.write("ERROR : Command terminated by user : %s\n" % CmdLine)
sys.stderr.flush()
if Opt.verbose:
sys.stdout.write("Reverted this file: %s\n" % Filename)
sys.stdout.flush()
def GetSvnRevision(opts):
""" Get the current revision of the BaseTools/Source tree, and check if any of the files have been modified """
Revision = "Unknown"
Modified = False
if opts.HAVE_SVN is False:
sys.stderr.write("WARNING: the svn command-line tool is not available.\n")
return (Revision, Modified)
SrcPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source")
# Check if there are modified files.
Cwd = os.getcwd()
os.chdir(SrcPath)
StatusCmd = "svn st -v --depth infinity --non-interactive"
contents = ShellCommandResults(StatusCmd, opts)
os.chdir(Cwd)
if type(contents) is ListType:
for line in contents:
if line.startswith("M "):
Modified = True
break
# Get the repository revision of BaseTools/Source
InfoCmd = "svn info %s" % SrcPath.replace("\\", "/").strip()
Revision = 0
contents = ShellCommandResults(InfoCmd, opts)
if type(contents) is IntType:
return 0, Modified
for line in contents:
line = line.strip()
if line.startswith("Revision:"):
Revision = line.replace("Revision:", "").strip()
break
return (Revision, Modified)
def CheckSvn(opts):
"""
This routine will return True if an svn --version command succeeds, or False if it fails.
If it failed, SVN is not available.
"""
OriginalSilent = opts.silent
opts.silent = True
VerCmd = "svn --version"
contents = ShellCommandResults(VerCmd, opts)
opts.silent = OriginalSilent
if type(contents) is IntType:
if opts.verbose:
sys.stdout.write("SVN does not appear to be available.\n")
sys.stdout.flush()
return False
if opts.verbose:
sys.stdout.write("Found %s" % contents[0])
sys.stdout.flush()
return True
def CopyOrig(Src, Dest, Opt):
""" Overwrite the Dest File with the Src File content """
try:
fd_ = open(Src, 'r')
contents = fd_.readlines()
fd_.close()
fd_ = open(Dest, 'w')
for line in contents:
fd_.write(line)
fd_.flush()
fd_.close()
except IOError:
if not Opt.silent:
sys.stderr.write("Unable to restore this file: %s\n" % Dest)
sys.stderr.flush()
return 1
os.remove(Src)
if Opt.verbose:
sys.stdout.write("Restored this file: %s\n" % Src)
sys.stdout.flush()
return 0
def CheckOriginals(Opts):
"""
If SVN was not available, then the tools may have made copies of the original BuildVersion.* files using
orig_BuildVersion.* for the name. If they exist, replace the existing BuildVersion.* file with the corresponding
orig_BuildVersion.* file.
Returns 0 if this succeeds, or 1 if the copy function fails. It will also return 0 if the orig_BuildVersion.* file
does not exist.
"""
CPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "C", "Include", "Common")
BuildVersionH = os.path.join(CPath, "BuildVersion.h")
OrigBuildVersionH = os.path.join(CPath, "orig_BuildVersion.h")
if not os.path.exists(OrigBuildVersionH):
return 0
if CopyOrig(OrigBuildVersionH, BuildVersionH, Opts):
return 1
for SubDir in ["Common", "UPT"]:
PyPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "Python", SubDir)
BuildVersionPy = os.path.join(PyPath, "BuildVersion.h")
OrigBuildVersionPy = os.path.join(PyPath, "orig_BuildVersion.h")
if not os.path.exists(OrigBuildVersionPy):
return 0
if CopyOrig(OrigBuildVersionPy, BuildVersionPy, Opts):
return 1
return 0
def RevertBuildVersionFiles(opts):
"""
This routine will attempt to perform an SVN --revert on each of the BuildVersion.* files
"""
if not opts.HAVE_SVN:
if CheckOriginals(opts):
return 1
return 0
# SVN is available
BuildVersionH = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "C", "Include", "Common", "BuildVersion.h")
RevertCmd(BuildVersionH, opts)
for SubDir in ["Common", "UPT"]:
BuildVersionPy = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "Python", SubDir, "BuildVersion.py")
RevertCmd(BuildVersionPy, opts)
def UpdateRevisionFiles():
""" Main routine that will update the BuildVersion.py and BuildVersion.h files."""
options = ParseOptions()
# Check the working environment
if "WORKSPACE" not in os.environ.keys():
sys.stderr.write(SYS_ENV_ERR % 'WORKSPACE')
return 1
if 'BASE_TOOLS_PATH' not in os.environ.keys():
sys.stderr.write(SYS_ENV_ERR % 'BASE_TOOLS_PATH')
return 1
if not os.path.exists(os.environ['BASE_TOOLS_PATH']):
sys.stderr.write("Unable to locate the %s directory." % os.environ['BASE_TOOLS_PATH'])
return 1
options.HAVE_SVN = CheckSvn(options)
if options.TEST_SVN:
return (not options.HAVE_SVN)
# done processing the option, now use the option.HAVE_SVN as a flag. True = Have it, False = Don't have it.
if options.REVERT:
# Just revert the tools an exit
RevertBuildVersionFiles(options)
else:
# Revert any changes in the BuildVersion.* files before setting them again.
RevertBuildVersionFiles(options)
Revision, Modified = GetSvnRevision(options)
if options.verbose:
sys.stdout.write("Revision: %s is Modified: %s\n" % (Revision, Modified))
sys.stdout.flush()
UpdateBuildVersionH(Revision, Modified, options)
UpdateBuildVersionPython(Revision, Modified, options)
return 0
if __name__ == "__main__":
sys.exit(UpdateRevisionFiles())
| 37.142145
| 120
| 0.625151
|
027f006fa3e52abbaeb56769ddc741fa9aae8536
| 11,075
|
py
|
Python
|
sdk/python/pulumi_azure_native/web/web_app_host_name_binding_slot.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/web/web_app_host_name_binding_slot.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/web/web_app_host_name_binding_slot.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = ['WebAppHostNameBindingSlot']
class WebAppHostNameBindingSlot(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
azure_resource_name: Optional[pulumi.Input[str]] = None,
azure_resource_type: Optional[pulumi.Input['AzureResourceType']] = None,
custom_host_name_dns_record_type: Optional[pulumi.Input['CustomHostNameDnsRecordType']] = None,
domain_id: Optional[pulumi.Input[str]] = None,
host_name: Optional[pulumi.Input[str]] = None,
host_name_type: Optional[pulumi.Input['HostNameType']] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
ssl_state: Optional[pulumi.Input['SslState']] = None,
thumbprint: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A hostname binding object.
API Version: 2020-10-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] azure_resource_name: Azure resource name.
:param pulumi.Input['AzureResourceType'] azure_resource_type: Azure resource type.
:param pulumi.Input['CustomHostNameDnsRecordType'] custom_host_name_dns_record_type: Custom DNS record type.
:param pulumi.Input[str] domain_id: Fully qualified ARM domain resource URI.
:param pulumi.Input[str] host_name: Hostname in the hostname binding.
:param pulumi.Input['HostNameType'] host_name_type: Hostname type.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] site_name: App Service app name.
:param pulumi.Input[str] slot: Name of the deployment slot. If a slot is not specified, the API will create a binding for the production slot.
:param pulumi.Input['SslState'] ssl_state: SSL type
:param pulumi.Input[str] thumbprint: SSL certificate thumbprint
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['azure_resource_name'] = azure_resource_name
__props__['azure_resource_type'] = azure_resource_type
__props__['custom_host_name_dns_record_type'] = custom_host_name_dns_record_type
__props__['domain_id'] = domain_id
__props__['host_name'] = host_name
__props__['host_name_type'] = host_name_type
__props__['kind'] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['site_name'] = site_name
if slot is None and not opts.urn:
raise TypeError("Missing required property 'slot'")
__props__['slot'] = slot
__props__['ssl_state'] = ssl_state
__props__['thumbprint'] = thumbprint
__props__['system_data'] = None
__props__['type'] = None
__props__['virtual_ip'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-native:web/latest:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-nextgen:web/latest:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-native:web/v20150801:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppHostNameBindingSlot"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppHostNameBindingSlot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppHostNameBindingSlot, __self__).__init__(
'azure-native:web:WebAppHostNameBindingSlot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppHostNameBindingSlot':
"""
Get an existing WebAppHostNameBindingSlot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["azure_resource_name"] = None
__props__["azure_resource_type"] = None
__props__["custom_host_name_dns_record_type"] = None
__props__["domain_id"] = None
__props__["host_name_type"] = None
__props__["kind"] = None
__props__["name"] = None
__props__["site_name"] = None
__props__["ssl_state"] = None
__props__["system_data"] = None
__props__["thumbprint"] = None
__props__["type"] = None
__props__["virtual_ip"] = None
return WebAppHostNameBindingSlot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="azureResourceName")
def azure_resource_name(self) -> pulumi.Output[Optional[str]]:
"""
Azure resource name.
"""
return pulumi.get(self, "azure_resource_name")
@property
@pulumi.getter(name="azureResourceType")
def azure_resource_type(self) -> pulumi.Output[Optional[str]]:
"""
Azure resource type.
"""
return pulumi.get(self, "azure_resource_type")
@property
@pulumi.getter(name="customHostNameDnsRecordType")
def custom_host_name_dns_record_type(self) -> pulumi.Output[Optional[str]]:
"""
Custom DNS record type.
"""
return pulumi.get(self, "custom_host_name_dns_record_type")
@property
@pulumi.getter(name="domainId")
def domain_id(self) -> pulumi.Output[Optional[str]]:
"""
Fully qualified ARM domain resource URI.
"""
return pulumi.get(self, "domain_id")
@property
@pulumi.getter(name="hostNameType")
def host_name_type(self) -> pulumi.Output[Optional[str]]:
"""
Hostname type.
"""
return pulumi.get(self, "host_name_type")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="siteName")
def site_name(self) -> pulumi.Output[Optional[str]]:
"""
App Service app name.
"""
return pulumi.get(self, "site_name")
@property
@pulumi.getter(name="sslState")
def ssl_state(self) -> pulumi.Output[Optional[str]]:
"""
SSL type
"""
return pulumi.get(self, "ssl_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def thumbprint(self) -> pulumi.Output[Optional[str]]:
"""
SSL certificate thumbprint
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualIP")
def virtual_ip(self) -> pulumi.Output[str]:
"""
Virtual IP address assigned to the hostname if IP based SSL is enabled.
"""
return pulumi.get(self, "virtual_ip")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.389344
| 1,491
| 0.657698
|
5617e11caa2e1be0f9bec258233f522b191895ad
| 7,359
|
py
|
Python
|
stdplugins/gmquote.py
|
ppppspsljdhdd/Pepe
|
1e57825ddb0ab3ba15a19cad0ecfbf2622f6b851
|
[
"Apache-2.0"
] | 20
|
2020-01-25T05:08:26.000Z
|
2022-01-18T07:37:53.000Z
|
stdplugins/gmquote.py
|
ishaizz/PepeBot
|
7440cadc8228106d221fc8e436a0809a86be5159
|
[
"Apache-2.0"
] | 15
|
2019-11-07T07:53:56.000Z
|
2022-01-23T09:21:17.000Z
|
stdplugins/gmquote.py
|
ishaizz/PepeBot
|
7440cadc8228106d221fc8e436a0809a86be5159
|
[
"Apache-2.0"
] | 62
|
2019-10-20T06:35:19.000Z
|
2021-01-23T17:26:05.000Z
|
# Edited by @Smart_S54
"""
good morning plugin
command: `.gm`
"""
import asyncio
import random
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="ggm"))
async def _(event):
if event.fwd_from:
return
await event.edit("`Wishing GM...`")
await asyncio.sleep(2)
x = random.randrange(1, 35)
if x == 1:
await event.edit(
'`"🥰 Life is full of uncertainties. But there will always be a sunrise after every sunset. Good morning!"`'
)
if x == 2:
await event.edit(
'`"🥰 It doesn’t matter how bad was your yesterday. Today, you are going to make it a good one. Wishing you a good morning!"`'
)
if x == 3:
await event.edit(
'`"🥰 If you want to gain health and beauty, you should wake up early. Good morning!"`'
)
if x == 4:
await event.edit(
'`"🥰 May this morning offer you new hope for life! May you be happy and enjoy every moment of it. Good morning!"`'
)
if x == 5:
await event.edit(
'`"🥰 May the sun shower you with blessings and prosperity in the days ahead. Good morning."`'
)
if x == 6:
await event.edit(
'`"🥰 Every sunrise marks the rise of life over death, hope over despair and happiness over suffering. Wishing you a very enjoyable morning today!"`'
)
if x == 7:
await event.edit(
'`"🥰 Wake up and make yourself a part of this beautiful morning. A beautiful world is waiting outside your door. Have an enjoyable time!"`'
)
if x == 8:
await event.edit(
'`"🥰 Welcome this beautiful morning with a smile on your face. I hope you’ll have a great day today. Wishing you a very good morning!"`'
)
if x == 9:
await event.edit(
'`"🥰 You have been blessed with yet another day. What a wonderful way of welcoming the blessing with such a beautiful morning! Good morning to you!"`'
)
if x == 10:
await event.edit(
'`"🥰 Waking up in such a beautiful morning is a guaranty for a day that’s beyond amazing. I hope you’ll make the best of it. Good morning!"`'
)
if x == 11:
await event.edit(
'`"🥰 Nothing is more refreshing than a beautiful morning that calms your mind and gives you reasons to smile. Good morning! Wishing you a great day."`'
)
if x == 12:
await event.edit(
'`"🥰 Another day has just started. Welcome the blessings of this beautiful morning. Rise and shine like you always do. Wishing you a wonderful morning!"`'
)
if x == 13:
await event.edit(
'`"🥰 Wake up like the sun every morning and light up the world your awesomeness. You have so many great things to achieve today. Good morning!"`'
)
if x == 14:
await event.edit(
'`"🥰 A new day has come with so many new opportunities for you. Grab them all and make the best out of your day. Here’s me wishing you a good morning!"`'
)
if x == 15:
await event.edit(
'`"🥰 The darkness of night has ended. A new sun is up there to guide you towards a life so bright and blissful. Good morning dear!"`'
)
if x == 16:
await event.edit(
'`"🥰 Wake up, have your cup of morning tea and let the morning wind freshen you up like a happiness pill. Wishing you a good morning and a good day ahead!"`'
)
if x == 17:
await event.edit(
'`"🥰 Sunrises are the best; enjoy a cup of coffee or tea with yourself because this day is yours, good morning! Have a wonderful day ahead."`'
)
if x == 18:
await event.edit(
'`"🥰 A bad day will always have a good morning, hope all your worries are gone and everything you wish could find a place. Good morning!"`'
)
if x == 19:
await event.edit(
'`"🥰 A great end may not be decided but a good creative beginning can be planned and achieved. Good morning, have a productive day!"`'
)
if x == 20:
await event.edit(
'`"🥰 Having a sweet morning, a cup of coffee, a day with your loved ones is what sets your “Good Morning” have a nice day!"`'
)
if x == 21:
await event.edit(
'`"🥰 Anything can go wrong in the day but the morning has to be beautiful, so I am making sure your morning starts beautiful. Good morning!"`'
)
if x == 22:
await event.edit(
'`"🥰 Open your eyes with a smile, pray and thank god that you are waking up to a new beginning. Good morning!"`'
)
if x == 23:
await event.edit(
'`"🥰 Morning is not only sunrise but A Beautiful Miracle of God that defeats the darkness and spread light. Good Morning."`'
)
if x == 24:
await event.edit(
'`"🥰 Life never gives you a second chance. So, enjoy every bit of it. Why not start with this beautiful morning. Good Morning!"`'
)
if x == 25:
await event.edit(
'`"🥰 If you want to gain health and beauty, you should wake up early. Good Morning!"`'
)
if x == 26:
await event.edit(
'`"🥰 Birds are singing sweet melodies and a gentle breeze is blowing through the trees, what a perfect morning to wake you up. Good morning!"`'
)
if x == 27:
await event.edit(
'`"🥰 This morning is so relaxing and beautiful that I really don’t want you to miss it in any way. So, wake up dear friend. A hearty good morning to you!"`'
)
if x == 28:
await event.edit(
'`"🥰 Mornings come with a blank canvas. Paint it as you like and call it a day. Wake up now and start creating your perfect day. Good morning!"`'
)
if x == 29:
await event.edit(
'`"🥰 Every morning brings you new hopes and new opportunities. Don’t miss any one of them while you’re sleeping. Good morning!"`'
)
if x == 30:
await event.edit(
'`"🥰 Start your day with solid determination and great attitude. You’re going to have a good day today. Good morning my friend!"`'
)
if x == 31:
await event.edit(
'`"🥰 Friendship is what makes life worth living. I want to thank you for being such a special friend of mine. Good morning to you!"`'
)
if x == 32:
await event.edit(
'`"🥰 A friend like you is pretty hard to come by in life. I must consider myself lucky enough to have you. Good morning. Wish you an amazing day ahead!"`'
)
if x == 33:
await event.edit(
'`"🥰 The more you count yourself as blessed, the more blessed you will be. Thank God for this beautiful morning and let friendship and love prevail this morning."`'
)
if x == 34:
await event.edit(
'`"🥰 Wake up and sip a cup of loving friendship. Eat your heart out from a plate of hope. To top it up, a fork full of kindness and love. Enough for a happy good morning!`'
)
if x == 35:
await event.edit(
'`"🥰 It is easy to imagine the world coming to an end. But it is difficult to imagine spending a day without my friends. Good morning."`'
)
| 36.979899
| 184
| 0.598043
|
94c54dd323d93eb5613f76765381e5d06fdc40a5
| 6,597
|
py
|
Python
|
homeassistant/components/remote/__init__.py
|
zechfox/core
|
a90d3a051fc402957c24ec782fca1d2f6d9cf8dc
|
[
"Apache-2.0"
] | 1
|
2021-07-31T15:19:30.000Z
|
2021-07-31T15:19:30.000Z
|
homeassistant/components/remote/__init__.py
|
zechfox/core
|
a90d3a051fc402957c24ec782fca1d2f6d9cf8dc
|
[
"Apache-2.0"
] | 25
|
2021-05-04T08:33:38.000Z
|
2022-03-31T06:10:33.000Z
|
homeassistant/components/remote/__init__.py
|
zechfox/core
|
a90d3a051fc402957c24ec782fca1d2f6d9cf8dc
|
[
"Apache-2.0"
] | null | null | null |
"""Support to interface with universal remote control devices."""
from __future__ import annotations
from collections.abc import Iterable
from datetime import timedelta
import functools as ft
import logging
from typing import Any, cast, final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_COMMAND,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.loader import bind_hass
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_ACTIVITY = "activity"
ATTR_ACTIVITY_LIST = "activity_list"
ATTR_CURRENT_ACTIVITY = "current_activity"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE = "device"
ATTR_NUM_REPEATS = "num_repeats"
ATTR_DELAY_SECS = "delay_secs"
ATTR_HOLD_SECS = "hold_secs"
ATTR_ALTERNATIVE = "alternative"
ATTR_TIMEOUT = "timeout"
DOMAIN = "remote"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SERVICE_SEND_COMMAND = "send_command"
SERVICE_LEARN_COMMAND = "learn_command"
SERVICE_DELETE_COMMAND = "delete_command"
SERVICE_SYNC = "sync"
DEFAULT_NUM_REPEATS = 1
DEFAULT_DELAY_SECS = 0.4
DEFAULT_HOLD_SECS = 0
SUPPORT_LEARN_COMMAND = 1
SUPPORT_DELETE_COMMAND = 2
SUPPORT_ACTIVITY = 4
REMOTE_SERVICE_ACTIVITY_SCHEMA = make_entity_service_schema(
{vol.Optional(ATTR_ACTIVITY): cv.string}
)
@bind_hass
def is_on(hass: HomeAssistantType, entity_id: str) -> bool:
"""Return if the remote is on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Track states and offer events for remotes."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_OFF, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_turn_off"
)
component.async_register_entity_service(
SERVICE_TURN_ON, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_turn_on"
)
component.async_register_entity_service(
SERVICE_TOGGLE, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_toggle"
)
component.async_register_entity_service(
SERVICE_SEND_COMMAND,
{
vol.Required(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DEVICE): cv.string,
vol.Optional(
ATTR_NUM_REPEATS, default=DEFAULT_NUM_REPEATS
): cv.positive_int,
vol.Optional(ATTR_DELAY_SECS): vol.Coerce(float),
vol.Optional(ATTR_HOLD_SECS, default=DEFAULT_HOLD_SECS): vol.Coerce(float),
},
"async_send_command",
)
component.async_register_entity_service(
SERVICE_LEARN_COMMAND,
{
vol.Optional(ATTR_DEVICE): cv.string,
vol.Optional(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_COMMAND_TYPE): cv.string,
vol.Optional(ATTR_ALTERNATIVE): cv.boolean,
vol.Optional(ATTR_TIMEOUT): cv.positive_int,
},
"async_learn_command",
)
component.async_register_entity_service(
SERVICE_DELETE_COMMAND,
{
vol.Required(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DEVICE): cv.string,
},
"async_delete_command",
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
return await cast(EntityComponent, hass.data[DOMAIN]).async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await cast(EntityComponent, hass.data[DOMAIN]).async_unload_entry(entry)
class RemoteEntity(ToggleEntity):
"""Base class for remote entities."""
@property
def supported_features(self) -> int:
"""Flag supported features."""
return 0
@property
def current_activity(self) -> str | None:
"""Active activity."""
return None
@property
def activity_list(self) -> list[str] | None:
"""List of available activities."""
return None
@final
@property
def state_attributes(self) -> dict[str, Any] | None:
"""Return optional state attributes."""
if not self.supported_features & SUPPORT_ACTIVITY:
return None
return {
ATTR_ACTIVITY_LIST: self.activity_list,
ATTR_CURRENT_ACTIVITY: self.current_activity,
}
def send_command(self, command: Iterable[str], **kwargs: Any) -> None:
"""Send commands to a device."""
raise NotImplementedError()
async def async_send_command(self, command: Iterable[str], **kwargs: Any) -> None:
"""Send commands to a device."""
await self.hass.async_add_executor_job(
ft.partial(self.send_command, command, **kwargs)
)
def learn_command(self, **kwargs: Any) -> None:
"""Learn a command from a device."""
raise NotImplementedError()
async def async_learn_command(self, **kwargs: Any) -> None:
"""Learn a command from a device."""
await self.hass.async_add_executor_job(ft.partial(self.learn_command, **kwargs))
def delete_command(self, **kwargs: Any) -> None:
"""Delete commands from the database."""
raise NotImplementedError()
async def async_delete_command(self, **kwargs: Any) -> None:
"""Delete commands from the database."""
await self.hass.async_add_executor_job(
ft.partial(self.delete_command, **kwargs)
)
class RemoteDevice(RemoteEntity):
"""Representation of a remote (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"RemoteDevice is deprecated, modify %s to extend RemoteEntity",
cls.__name__,
)
| 30.971831
| 88
| 0.695771
|
1491254ce933c49373d589978913e2692f36f4c9
| 4,810
|
py
|
Python
|
main.py
|
atotev/AMNet
|
5d9e6925cbae402ef9825d1a84c3ab4b41921c05
|
[
"MIT"
] | null | null | null |
main.py
|
atotev/AMNet
|
5d9e6925cbae402ef9825d1a84c3ab4b41921c05
|
[
"MIT"
] | null | null | null |
main.py
|
atotev/AMNet
|
5d9e6925cbae402ef9825d1a84c3ab4b41921c05
|
[
"MIT"
] | null | null | null |
__author__ = 'Jiri Fajtl'
__email__ = 'ok1zjf@gmail.com'
__version__= '3.5'
__status__ = "Research"
__date__ = "30/1/2018"
__license__= "MIT License"
import argparse
from amnet_model import *
import amnet_model as amnet_model
from amnet import *
import amnet as amnet
from config import *
from utils import *
def main():
parser = argparse.ArgumentParser(description='AMNet Image memorability prediction with attention')
parser.add_argument('--gpu', default=0, type=int, help='GPU ID. If -1 the application will run on CPU')
parser.add_argument('--model-weights', default='', type=str, help='pkl file with the model weights')
parser.add_argument('--cnn', default='ResNet50FC', type=str, help='Name of CNN model for features extraction [ResNet18FC, ResNet50FC, ResNet101FC, VGG16FC, ResNet50FT]')
parser.add_argument('--att-off', action="store_true", help='Runs training/testing without the visual attention')
parser.add_argument('--lstm-steps', default=3, type=int,
help='Number of LSTM steps. Default 3. To disable LSTM set to zero')
parser.add_argument('--last-step-prediction', action="store_true",
help='Predicts memorability only at the last LSTM step')
parser.add_argument('--test', action='store_true', help='Run evaluation')
parser.add_argument('--eval-images', default='', type=str, help='Directory or a csv file with images to predict memorability for')
parser.add_argument('--csv-out', default='', type=str, help='File where to save prediced memorabilities in csv format')
parser.add_argument('--att-maps-out', default='', type=str, help='Directory where to store attention maps')
# Training
parser.add_argument('--epoch-max', default=-1, type=int,
help='If not specified, number of epochs will be set according to selected dataset')
parser.add_argument('--epoch-start', default=0, type=int,
help='Allows to resume training from a specific epoch')
parser.add_argument('--train-batch-size', default=-1, type=int,
help='If not specified a default size will be set according to selected dataset')
parser.add_argument('--test-batch-size', default=-1, type=int,
help='If not specified a default size will be set according to selected dataset')
# Dataset configuration
parser.add_argument('--dataset', default='lamem', type=str, help='Dataset name [lamem, sun]')
parser.add_argument('--experiment', default='', type=str, help='Experiment name. Usually no need to set' )
parser.add_argument('--dataset-root', default='', type=str, help='Dataset root directory')
parser.add_argument('--images-dir', default='images', type=str, help='Relative path to the test/train images')
parser.add_argument('--splits-dir', default='splits', type=str, help='Relative path to directory with split files')
parser.add_argument('--train-split', default='', type=str, help='Train split filename e.g. train_2')
parser.add_argument('--val-split', default='', type=str, help='Validation split filename e.g. val_2')
parser.add_argument('--test-split', default='', type=str, help='Test split filename e.g. test_2')
args = parser.parse_args()
hps = get_amnet_config(args)
print("Configuration")
print("----------------------------------------------------------------------")
print(hps)
amnet = AMNet()
amnet.init(hps)
if hps.test_split != '':
split_files = get_split_files(hps.dataset_root, hps.splits_dir, hps.test_split)
if hps.model_weights == '':
weight_files = get_weight_files(split_files, experiment_name=hps.experiment_name, max_rc_checkpoints=True)
else:
weight_files = [hps.model_weights]
print("Splits: ", split_files)
print("Model weights: ", weight_files)
amnet.eval_models(weight_files, split_files)
return
if hps.eval_images != '':
if hps.model_weights == '' or not os.path.isfile(hps.model_weights):
print("You need to specify path to model weights with parameter --model-weights")
return
print("Images filename/path: ", hps.eval_images)
print("Model weights: ", hps.model_weights)
result = amnet.predict_memorability(hps.eval_images)
result.write_stdout()
if args.csv_out != '':
print("Saving memorabilities to:",args.csv_out)
result.write_csv(args.csv_out)
if args.att_maps_out != '':
print("Saving attention maps to:", args.att_maps_out)
result.write_attention_maps(args.att_maps_out)
return
# Training phase
amnet.train()
if __name__ == "__main__":
print_pkg_versions()
main()
| 44.953271
| 173
| 0.663825
|
21aa19fb9daf139b6ec120c31982ffc8794646e4
| 122
|
py
|
Python
|
src/losses/vicreg.py
|
johanattia/tensorflow-saint
|
95bbfaf51c1f1be96a2884dc664b10e9dd183984
|
[
"MIT"
] | 3
|
2022-03-30T20:07:55.000Z
|
2022-03-30T21:40:14.000Z
|
src/losses/vicreg.py
|
johanattia/tensorflow-saint
|
95bbfaf51c1f1be96a2884dc664b10e9dd183984
|
[
"MIT"
] | null | null | null |
src/losses/vicreg.py
|
johanattia/tensorflow-saint
|
95bbfaf51c1f1be96a2884dc664b10e9dd183984
|
[
"MIT"
] | null | null | null |
"""Self-supervision with VICReg Objective"""
import tensorflow as tf
class VICRegLoss(tf.keras.losses.Loss):
pass
| 13.555556
| 44
| 0.737705
|
a72b3b6b4c727903c9ea87fe6d7166e0bd1b8aee
| 1,571
|
py
|
Python
|
debian/python-nova/usr/lib/python2.7/dist-packages/nova/tests/test_vmmode.py
|
bopopescu/stacklab-nova
|
4ab1698659b663ef222255610d1a5c042706dd65
|
[
"Apache-2.0"
] | null | null | null |
debian/python-nova/usr/lib/python2.7/dist-packages/nova/tests/test_vmmode.py
|
bopopescu/stacklab-nova
|
4ab1698659b663ef222255610d1a5c042706dd65
|
[
"Apache-2.0"
] | null | null | null |
debian/python-nova/usr/lib/python2.7/dist-packages/nova/tests/test_vmmode.py
|
bopopescu/stacklab-nova
|
4ab1698659b663ef222255610d1a5c042706dd65
|
[
"Apache-2.0"
] | 1
|
2020-07-24T08:31:57.000Z
|
2020-07-24T08:31:57.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import test
from nova.compute import vm_mode
class ComputeVMModeTest(test.TestCase):
def test_case(self):
inst = dict(vm_mode="HVM")
mode = vm_mode.get_from_instance(inst)
self.assertEqual(mode, "hvm")
def test_legacy_pv(self):
inst = dict(vm_mode="pv")
mode = vm_mode.get_from_instance(inst)
self.assertEqual(mode, "xen")
def test_legacy_hv(self):
inst = dict(vm_mode="hv")
mode = vm_mode.get_from_instance(inst)
self.assertEqual(mode, "hvm")
def test_bogus(self):
inst = dict(vm_mode="wibble")
self.assertRaises(exception.Invalid,
vm_mode.get_from_instance,
inst)
def test_good(self):
inst = dict(vm_mode="hvm")
mode = vm_mode.get_from_instance(inst)
self.assertEqual(mode, "hvm")
| 31.42
| 78
| 0.65818
|
13e5c4cdc6d640b1791bef9d6e364fdf8a3c6329
| 8,304
|
py
|
Python
|
openpeerpower/components/binary_sensor/device_condition.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
openpeerpower/components/binary_sensor/device_condition.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
openpeerpower/components/binary_sensor/device_condition.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Implement device conditions for binary sensor."""
from __future__ import annotations
import voluptuous as vol
from openpeerpower.components.device_automation.const import CONF_IS_OFF, CONF_IS_ON
from openpeerpower.const import ATTR_DEVICE_CLASS, CONF_ENTITY_ID, CONF_FOR, CONF_TYPE
from openpeerpower.core import OpenPeerPower, callback
from openpeerpower.helpers import condition, config_validation as cv
from openpeerpower.helpers.entity_registry import (
async_entries_for_device,
async_get_registry,
)
from openpeerpower.helpers.typing import ConfigType
from . import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_BATTERY_CHARGING,
DEVICE_CLASS_COLD,
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GARAGE_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_LOCK,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_MOVING,
DEVICE_CLASS_OCCUPANCY,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PLUG,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESENCE,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
DEVICE_CLASS_VIBRATION,
DEVICE_CLASS_WINDOW,
DOMAIN,
)
DEVICE_CLASS_NONE = "none"
CONF_IS_BAT_LOW = "is_bat_low"
CONF_IS_NOT_BAT_LOW = "is_not_bat_low"
CONF_IS_CHARGING = "is_charging"
CONF_IS_NOT_CHARGING = "is_not_charging"
CONF_IS_COLD = "is_cold"
CONF_IS_NOT_COLD = "is_not_cold"
CONF_IS_CONNECTED = "is_connected"
CONF_IS_NOT_CONNECTED = "is_not_connected"
CONF_IS_GAS = "is_gas"
CONF_IS_NO_GAS = "is_no_gas"
CONF_IS_HOT = "is_hot"
CONF_IS_NOT_HOT = "is_not_hot"
CONF_IS_LIGHT = "is_light"
CONF_IS_NO_LIGHT = "is_no_light"
CONF_IS_LOCKED = "is_locked"
CONF_IS_NOT_LOCKED = "is_not_locked"
CONF_IS_MOIST = "is_moist"
CONF_IS_NOT_MOIST = "is_not_moist"
CONF_IS_MOTION = "is_motion"
CONF_IS_NO_MOTION = "is_no_motion"
CONF_IS_MOVING = "is_moving"
CONF_IS_NOT_MOVING = "is_not_moving"
CONF_IS_OCCUPIED = "is_occupied"
CONF_IS_NOT_OCCUPIED = "is_not_occupied"
CONF_IS_PLUGGED_IN = "is_plugged_in"
CONF_IS_NOT_PLUGGED_IN = "is_not_plugged_in"
CONF_IS_POWERED = "is_powered"
CONF_IS_NOT_POWERED = "is_not_powered"
CONF_IS_PRESENT = "is_present"
CONF_IS_NOT_PRESENT = "is_not_present"
CONF_IS_PROBLEM = "is_problem"
CONF_IS_NO_PROBLEM = "is_no_problem"
CONF_IS_UNSAFE = "is_unsafe"
CONF_IS_NOT_UNSAFE = "is_not_unsafe"
CONF_IS_SMOKE = "is_smoke"
CONF_IS_NO_SMOKE = "is_no_smoke"
CONF_IS_SOUND = "is_sound"
CONF_IS_NO_SOUND = "is_no_sound"
CONF_IS_VIBRATION = "is_vibration"
CONF_IS_NO_VIBRATION = "is_no_vibration"
CONF_IS_OPEN = "is_open"
CONF_IS_NOT_OPEN = "is_not_open"
IS_ON = [
CONF_IS_BAT_LOW,
CONF_IS_CHARGING,
CONF_IS_COLD,
CONF_IS_CONNECTED,
CONF_IS_GAS,
CONF_IS_HOT,
CONF_IS_LIGHT,
CONF_IS_NOT_LOCKED,
CONF_IS_MOIST,
CONF_IS_MOTION,
CONF_IS_MOVING,
CONF_IS_OCCUPIED,
CONF_IS_OPEN,
CONF_IS_PLUGGED_IN,
CONF_IS_POWERED,
CONF_IS_PRESENT,
CONF_IS_PROBLEM,
CONF_IS_SMOKE,
CONF_IS_SOUND,
CONF_IS_UNSAFE,
CONF_IS_VIBRATION,
CONF_IS_ON,
]
IS_OFF = [
CONF_IS_NOT_BAT_LOW,
CONF_IS_NOT_CHARGING,
CONF_IS_NOT_COLD,
CONF_IS_NOT_CONNECTED,
CONF_IS_NOT_HOT,
CONF_IS_LOCKED,
CONF_IS_NOT_MOIST,
CONF_IS_NOT_MOVING,
CONF_IS_NOT_OCCUPIED,
CONF_IS_NOT_OPEN,
CONF_IS_NOT_PLUGGED_IN,
CONF_IS_NOT_POWERED,
CONF_IS_NOT_PRESENT,
CONF_IS_NOT_UNSAFE,
CONF_IS_NO_GAS,
CONF_IS_NO_LIGHT,
CONF_IS_NO_MOTION,
CONF_IS_NO_PROBLEM,
CONF_IS_NO_SMOKE,
CONF_IS_NO_SOUND,
CONF_IS_NO_VIBRATION,
CONF_IS_OFF,
]
ENTITY_CONDITIONS = {
DEVICE_CLASS_BATTERY: [
{CONF_TYPE: CONF_IS_BAT_LOW},
{CONF_TYPE: CONF_IS_NOT_BAT_LOW},
],
DEVICE_CLASS_BATTERY_CHARGING: [
{CONF_TYPE: CONF_IS_CHARGING},
{CONF_TYPE: CONF_IS_NOT_CHARGING},
],
DEVICE_CLASS_COLD: [{CONF_TYPE: CONF_IS_COLD}, {CONF_TYPE: CONF_IS_NOT_COLD}],
DEVICE_CLASS_CONNECTIVITY: [
{CONF_TYPE: CONF_IS_CONNECTED},
{CONF_TYPE: CONF_IS_NOT_CONNECTED},
],
DEVICE_CLASS_DOOR: [{CONF_TYPE: CONF_IS_OPEN}, {CONF_TYPE: CONF_IS_NOT_OPEN}],
DEVICE_CLASS_GARAGE_DOOR: [
{CONF_TYPE: CONF_IS_OPEN},
{CONF_TYPE: CONF_IS_NOT_OPEN},
],
DEVICE_CLASS_GAS: [{CONF_TYPE: CONF_IS_GAS}, {CONF_TYPE: CONF_IS_NO_GAS}],
DEVICE_CLASS_HEAT: [{CONF_TYPE: CONF_IS_HOT}, {CONF_TYPE: CONF_IS_NOT_HOT}],
DEVICE_CLASS_LIGHT: [{CONF_TYPE: CONF_IS_LIGHT}, {CONF_TYPE: CONF_IS_NO_LIGHT}],
DEVICE_CLASS_LOCK: [{CONF_TYPE: CONF_IS_LOCKED}, {CONF_TYPE: CONF_IS_NOT_LOCKED}],
DEVICE_CLASS_MOISTURE: [{CONF_TYPE: CONF_IS_MOIST}, {CONF_TYPE: CONF_IS_NOT_MOIST}],
DEVICE_CLASS_MOTION: [{CONF_TYPE: CONF_IS_MOTION}, {CONF_TYPE: CONF_IS_NO_MOTION}],
DEVICE_CLASS_MOVING: [{CONF_TYPE: CONF_IS_MOVING}, {CONF_TYPE: CONF_IS_NOT_MOVING}],
DEVICE_CLASS_OCCUPANCY: [
{CONF_TYPE: CONF_IS_OCCUPIED},
{CONF_TYPE: CONF_IS_NOT_OCCUPIED},
],
DEVICE_CLASS_OPENING: [{CONF_TYPE: CONF_IS_OPEN}, {CONF_TYPE: CONF_IS_NOT_OPEN}],
DEVICE_CLASS_PLUG: [
{CONF_TYPE: CONF_IS_PLUGGED_IN},
{CONF_TYPE: CONF_IS_NOT_PLUGGED_IN},
],
DEVICE_CLASS_POWER: [
{CONF_TYPE: CONF_IS_POWERED},
{CONF_TYPE: CONF_IS_NOT_POWERED},
],
DEVICE_CLASS_PRESENCE: [
{CONF_TYPE: CONF_IS_PRESENT},
{CONF_TYPE: CONF_IS_NOT_PRESENT},
],
DEVICE_CLASS_PROBLEM: [
{CONF_TYPE: CONF_IS_PROBLEM},
{CONF_TYPE: CONF_IS_NO_PROBLEM},
],
DEVICE_CLASS_SAFETY: [{CONF_TYPE: CONF_IS_UNSAFE}, {CONF_TYPE: CONF_IS_NOT_UNSAFE}],
DEVICE_CLASS_SMOKE: [{CONF_TYPE: CONF_IS_SMOKE}, {CONF_TYPE: CONF_IS_NO_SMOKE}],
DEVICE_CLASS_SOUND: [{CONF_TYPE: CONF_IS_SOUND}, {CONF_TYPE: CONF_IS_NO_SOUND}],
DEVICE_CLASS_VIBRATION: [
{CONF_TYPE: CONF_IS_VIBRATION},
{CONF_TYPE: CONF_IS_NO_VIBRATION},
],
DEVICE_CLASS_WINDOW: [{CONF_TYPE: CONF_IS_OPEN}, {CONF_TYPE: CONF_IS_NOT_OPEN}],
DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_IS_ON}, {CONF_TYPE: CONF_IS_OFF}],
}
CONDITION_SCHEMA = cv.DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(IS_OFF + IS_ON),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
async def async_get_conditions(
opp: OpenPeerPower, device_id: str
) -> list[dict[str, str]]:
"""List device conditions."""
conditions: list[dict[str, str]] = []
entity_registry = await async_get_registry(opp)
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == DOMAIN
]
for entry in entries:
device_class = DEVICE_CLASS_NONE
state = opp.states.get(entry.entity_id)
if state and ATTR_DEVICE_CLASS in state.attributes:
device_class = state.attributes[ATTR_DEVICE_CLASS]
templates = ENTITY_CONDITIONS.get(
device_class, ENTITY_CONDITIONS[DEVICE_CLASS_NONE]
)
conditions.extend(
{
**template,
"condition": "device",
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
}
for template in templates
)
return conditions
@callback
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Evaluate state based on configuration."""
if config_validation:
config = CONDITION_SCHEMA(config)
condition_type = config[CONF_TYPE]
if condition_type in IS_ON:
stat = "on"
else:
stat = "off"
state_config = {
condition.CONF_CONDITION: "state",
condition.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
condition.CONF_STATE: stat,
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
return condition.state_from_config(state_config)
async def async_get_condition_capabilities(opp: OpenPeerPower, config: dict) -> dict:
"""List condition capabilities."""
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
| 30.529412
| 88
| 0.719894
|
ca9e18977722c870c880367d6d868084c1ce6e8f
| 1,499
|
py
|
Python
|
makeapp/app_templates/webscaff/__module_name__/uwsgicfg.py
|
idlesign/makeapp
|
ff4324e91cdde09bf78bfb946f584414ac11160e
|
[
"BSD-3-Clause"
] | 12
|
2016-07-17T11:49:06.000Z
|
2021-07-05T06:22:54.000Z
|
makeapp/app_templates/webscaff/__module_name__/uwsgicfg.py
|
idlesign/makeapp
|
ff4324e91cdde09bf78bfb946f584414ac11160e
|
[
"BSD-3-Clause"
] | 4
|
2016-06-27T07:36:45.000Z
|
2020-03-18T05:56:33.000Z
|
makeapp/app_templates/webscaff/__module_name__/uwsgicfg.py
|
idlesign/makeapp
|
ff4324e91cdde09bf78bfb946f584414ac11160e
|
[
"BSD-3-Clause"
] | 1
|
2015-09-13T10:44:08.000Z
|
2015-09-13T10:44:08.000Z
|
from uwsgiconf.config import configure_uwsgi
from uwsgiconf.presets.nice import PythonSection
def get_configurations() -> PythonSection:
from django.conf import settings
in_production = settings.IN_PRODUCTION
project = settings.PROJECT_NAME
domain = settings.PROJECT_DOMAIN
dir_state = settings.PROJECT_DIR_STATE
section = PythonSection.bootstrap(
f'http://:{80 if in_production else 8000}',
allow_shared_sockets=in_production,
wsgi_module=f'{project}.wsgi',
process_prefix=f'[{project}] ',
workers=3,
threads=3,
log_dedicated=True,
ignore_write_errors=True,
touch_reload=f"{dir_state / 'reloader'}",
owner=project if in_production else None,
)
section.set_runtime_dir(f'{settings.PROJECT_DIR_RUN}'.replace(f'/{project}', ''))
section.main_process.change_dir(f'{dir_state}')
section.workers.set_reload_params(max_requests=10000)
section.spooler.add(f"{dir_state / 'spool'}")
if in_production and domain:
section.configure_certbot_https(
domain=domain,
webroot=f"{dir_state / 'certbot'}",
allow_shared_sockets=True,
# For initial certificate issuing we use HTTP w/o redirects.
http_redirect=True,
)
section.configure_maintenance_mode(
f"{dir_state / 'maintenance'}", section.get_bundled_static_path('503.html'))
return section
configure_uwsgi(get_configurations)
| 27.759259
| 85
| 0.681788
|
1982069bcbd4ae6b1abc5d6da658214690d30c1f
| 8,556
|
py
|
Python
|
extensions/GnuplotClBridge.py
|
dszmaj/wikidpad
|
1127375665935524ddb623da8dd5137038c7e53e
|
[
"Apache-2.0",
"MIT"
] | 16
|
2015-02-05T17:32:04.000Z
|
2022-01-14T13:46:36.000Z
|
extensions/GnuplotClBridge.py
|
dszmaj/wikidpad
|
1127375665935524ddb623da8dd5137038c7e53e
|
[
"Apache-2.0",
"MIT"
] | 8
|
2015-06-20T20:02:41.000Z
|
2016-02-23T14:52:32.000Z
|
extensions/GnuplotClBridge.py
|
dszmaj/wikidpad
|
1127375665935524ddb623da8dd5137038c7e53e
|
[
"Apache-2.0",
"MIT"
] | 11
|
2015-05-19T09:17:16.000Z
|
2017-09-14T00:43:13.000Z
|
import os, os.path
import subprocess
import wx
from pwiki.TempFileSet import createTempFile
from pwiki.StringOps import mbcsEnc, mbcsDec, lineendToOs
WIKIDPAD_PLUGIN = (("InsertionByKey", 1), ("Options", 1))
def describeInsertionKeys(ver, app):
"""
API function for "InsertionByKey" plugins
Returns a sequence of tuples describing the supported
insertion keys. Each tuple has the form (insKey, exportTypes, handlerFactory)
where insKey is the insertion key handled, exportTypes is a sequence of
strings describing the supported export types and handlerFactory is
a factory function (normally a class) taking the wxApp object as
parameter and returning a handler object fulfilling the protocol
for "insertion by key" (see EqnHandler as example).
ver -- API version (can only be 1 currently)
app -- wxApp object
"""
return (
(u"gnuplot", ("html_single", "html_previewWX", "html_preview", "html_multi"), GptHandler),
)
class GptHandler:
"""
Base class fulfilling the "insertion by key" protocol.
"""
def __init__(self, app):
self.app = app
self.extAppExe = None
def taskStart(self, exporter, exportType):
"""
This is called before any call to createContent() during an
export task.
An export task can be a single HTML page for
preview or a single page or a set of pages for export.
exporter -- Exporter object calling the handler
exportType -- string describing the export type
Calls to createContent() will only happen after a
call to taskStart() and before the call to taskEnd()
"""
# Find Gnuplot executable by configuration setting
self.extAppExe = self.app.getGlobalConfig().get("main",
"plugin_gnuplot_exePath", "")
if self.extAppExe:
self.extAppExe = os.path.join(self.app.getWikiAppDir(),
self.extAppExe)
def taskEnd(self):
"""
Called after export task ended and after the last call to
createContent().
"""
pass
def createContent(self, exporter, exportType, insToken):
"""
Handle an insertion and create the appropriate content.
exporter -- Exporter object calling the handler
exportType -- string describing the export type
insToken -- insertion token to create content for
An insertion token has the following member variables:
key: insertion key (unistring)
value: value of an insertion (unistring)
appendices: sequence of strings with the appendices
Meaning and type of return value is solely defined by the type
of the calling exporter.
For HtmlExporter a unistring is returned with the HTML code
to insert instead of the insertion.
"""
if not insToken.value:
# Nothing in, nothing out
return u""
if self.extAppExe == "":
# No path to Gnuplot executable -> show message
return u'<pre>' + _(u'[Please set path to Gnuplot executable]') +\
u'</pre>'
# Get exporters temporary file set (manages creation and deletion of
# temporary files)
tfs = exporter.getTempFileSet()
pythonUrl = (exportType != "html_previewWX")
dstFullPath = tfs.createTempFile("", ".png", relativeTo="")
url = tfs.getRelativeUrl(None, dstFullPath, pythonUrl=pythonUrl)
baseDir = os.path.dirname(exporter.getMainControl().getWikiConfigPath())
# Prepend source code with appropriate settings for PNG output
srcCode = ("set terminal png\nset output '%s'\n" % dstFullPath) + \
insToken.value
# Retrieve quoted content of the insertion
bstr = lineendToOs(mbcsEnc(srcCode, "replace")[0])
# Store token content in a temporary file
srcfilepath = createTempFile(bstr, ".gpt")
try:
cmdline = subprocess.list2cmdline((self.extAppExe, srcfilepath))
# Run external application
# childIn, childOut, childErr = os.popen3(cmdline, "b")
popenObject = subprocess.Popen(cmdline, shell=True,
stderr=subprocess.PIPE, stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
childErr = popenObject.stderr
# See http://bytes.com/topic/python/answers/634409-subprocess-handle-invalid-error
# why this is necessary
popenObject.stdin.close()
popenObject.stdout.close()
if u"noerror" in [a.strip() for a in insToken.appendices]:
childErr.read()
errResponse = ""
else:
errResponse = childErr.read()
childErr.close()
finally:
os.unlink(srcfilepath)
if errResponse != "":
errResponse = mbcsDec(errResponse, "replace")[0]
return u'<pre>' + _(u'[Gnuplot error: %s]') % errResponse +\
u'</pre>'
# Return appropriate HTML code for the image
if exportType == "html_previewWX":
# Workaround for internal HTML renderer
return (u'<img src="%s" border="0" align="bottom" alt="gnuplot" />'
u' ') % url
else:
return u'<img src="%s" border="0" align="bottom" alt="gnuplot" />' \
% url
def getExtraFeatures(self):
"""
Returns a list of bytestrings describing additional features supported
by the plugin. Currently not specified further.
"""
return ()
def registerOptions(ver, app):
"""
API function for "Options" plugins
Register configuration options and their GUI presentation
ver -- API version (can only be 1 currently)
app -- wxApp object
"""
# Register option
app.getDefaultGlobalConfigDict()[("main", "plugin_gnuplot_exePath")] = u""
# Register panel in options dialog
app.addOptionsDlgPanel(GnuplotOptionsPanel, u" Gnuplot")
class GnuplotOptionsPanel(wx.Panel):
def __init__(self, parent, optionsDlg, app):
"""
Called when "Options" dialog is opened to show the panel.
Transfer here all options from the configuration file into the
text fields, check boxes, ...
"""
wx.Panel.__init__(self, parent)
self.app = app
pt = self.app.getGlobalConfig().get("main", "plugin_gnuplot_exePath", "")
self.tfPath = wx.TextCtrl(self, -1, pt)
mainsizer = wx.BoxSizer(wx.VERTICAL)
inputsizer = wx.BoxSizer(wx.HORIZONTAL)
inputsizer.Add(wx.StaticText(self, -1, _(u"Path to Gnuplot:")), 0,
wx.ALL | wx.EXPAND, 5)
inputsizer.Add(self.tfPath, 1, wx.ALL | wx.EXPAND, 5)
mainsizer.Add(inputsizer, 0, wx.EXPAND)
self.SetSizer(mainsizer)
self.Fit()
def setVisible(self, vis):
"""
Called when panel is shown or hidden. The actual wxWindow.Show()
function is called automatically.
If a panel is visible and becomes invisible because another panel is
selected, the plugin can veto by returning False.
When becoming visible, the return value is ignored.
"""
return True
def checkOk(self):
"""
Called when "OK" is pressed in dialog. The plugin should check here if
all input values are valid. If not, it should return False, then the
Options dialog automatically shows this panel.
There should be a visual indication about what is wrong (e.g. red
background in text field). Be sure to reset the visual indication
if field is valid again.
"""
return True
def handleOk(self):
"""
This is called if checkOk() returned True for all panels. Transfer here
all values from text fields, checkboxes, ... into the configuration
file.
"""
pt = self.tfPath.GetValue()
self.app.getGlobalConfig().set("main", "plugin_gnuplot_exePath", pt)
| 36.72103
| 103
| 0.591164
|
7055b21aec54373037a015406597451bca7a1790
| 1,200
|
py
|
Python
|
olpxek_bot/eval_py.py
|
kexplo/olpxek-bot
|
d4ec11d97ea906651a2df225081f33a235e9bd65
|
[
"MIT"
] | 7
|
2020-08-18T22:07:24.000Z
|
2022-01-01T07:32:20.000Z
|
olpxek_bot/eval_py.py
|
kexplo/olpxek_bot
|
2fe5352ab1e584f877ba5445ff5af2d179c9b2c8
|
[
"MIT"
] | null | null | null |
olpxek_bot/eval_py.py
|
kexplo/olpxek_bot
|
2fe5352ab1e584f877ba5445ff5af2d179c9b2c8
|
[
"MIT"
] | null | null | null |
from RestrictedPython import compile_restricted_eval
from RestrictedPython import (
limited_builtins,
# PrintCollector,
safe_builtins,
utility_builtins,
)
from RestrictedPython.Eval import default_guarded_getitem
from RestrictedPython.Guards import (
# guarded_setattr,
# guarded_delattr
full_write_guard,
guarded_iter_unpack_sequence,
guarded_unpack_sequence,
safer_getattr,
)
def eval_py(source: str) -> str:
builtins = safe_builtins.copy()
builtins.update(utility_builtins)
builtins.update(limited_builtins)
restricted_globals = {
"__builtins__": builtins,
# '_print_': PrintCollector,
"_getattr_": safer_getattr,
"_write_": full_write_guard,
# "_getiter_": iter,
"_getitem_": default_guarded_getitem,
"_iter_unpack_sequence_": guarded_iter_unpack_sequence,
"_unpack_sequence_": guarded_unpack_sequence,
}
compiled = compile_restricted_eval(source)
if compiled.errors:
return ", ".join(compiled.errors)
try:
ret = eval(compiled.code, restricted_globals) # noqa: S307
return str(ret)
except Exception as e:
return str(e)
| 27.906977
| 67
| 0.7
|
184e4b6d15505b299ca3b92611e5a175867a6a3d
| 8,988
|
py
|
Python
|
user_service_sdk/model/topology/container_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
user_service_sdk/model/topology/container_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
user_service_sdk/model/topology/container_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: container.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from user_service_sdk.model.topology import property_pb2 as user__service__sdk_dot_model_dot_topology_dot_property__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='container.proto',
package='topology',
syntax='proto3',
serialized_options=_b('ZBgo.easyops.local/contracts/protorepo-models/easyops/model/topology'),
serialized_pb=_b('\n\x0f\x63ontainer.proto\x12\x08topology\x1a.user_service_sdk/model/topology/property.proto\"\xad\x02\n\tContainer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ndataSource\x18\x02 \x01(\t\x12\n\n\x02id\x18\x03 \x01(\t\x12$\n\x08property\x18\x04 \x01(\x0b\x32\x12.topology.Property\x12\x10\n\x08\x63ollapse\x18\x05 \x01(\x08\x12\x0f\n\x07\x63reator\x18\x06 \x01(\t\x12\x10\n\x08modifier\x18\x07 \x01(\t\x12\r\n\x05\x63time\x18\x08 \x01(\x05\x12\r\n\x05mtime\x18\t \x01(\x05\x12(\n\x05style\x18\n \x01(\x0b\x32\x19.topology.Container.Style\x1aO\n\x05Style\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\r\n\x05width\x18\x03 \x01(\x02\x12\x0e\n\x06height\x18\x04 \x01(\x02\x12\x11\n\tclassName\x18\x05 \x01(\tBDZBgo.easyops.local/contracts/protorepo-models/easyops/model/topologyb\x06proto3')
,
dependencies=[user__service__sdk_dot_model_dot_topology_dot_property__pb2.DESCRIPTOR,])
_CONTAINER_STYLE = _descriptor.Descriptor(
name='Style',
full_name='topology.Container.Style',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='topology.Container.Style.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='y', full_name='topology.Container.Style.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='topology.Container.Style.width', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='topology.Container.Style.height', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='className', full_name='topology.Container.Style.className', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=300,
serialized_end=379,
)
_CONTAINER = _descriptor.Descriptor(
name='Container',
full_name='topology.Container',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='topology.Container.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataSource', full_name='topology.Container.dataSource', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='topology.Container.id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='property', full_name='topology.Container.property', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='collapse', full_name='topology.Container.collapse', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='topology.Container.creator', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='modifier', full_name='topology.Container.modifier', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='topology.Container.ctime', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mtime', full_name='topology.Container.mtime', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='style', full_name='topology.Container.style', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CONTAINER_STYLE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=78,
serialized_end=379,
)
_CONTAINER_STYLE.containing_type = _CONTAINER
_CONTAINER.fields_by_name['property'].message_type = user__service__sdk_dot_model_dot_topology_dot_property__pb2._PROPERTY
_CONTAINER.fields_by_name['style'].message_type = _CONTAINER_STYLE
DESCRIPTOR.message_types_by_name['Container'] = _CONTAINER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Container = _reflection.GeneratedProtocolMessageType('Container', (_message.Message,), {
'Style' : _reflection.GeneratedProtocolMessageType('Style', (_message.Message,), {
'DESCRIPTOR' : _CONTAINER_STYLE,
'__module__' : 'container_pb2'
# @@protoc_insertion_point(class_scope:topology.Container.Style)
})
,
'DESCRIPTOR' : _CONTAINER,
'__module__' : 'container_pb2'
# @@protoc_insertion_point(class_scope:topology.Container)
})
_sym_db.RegisterMessage(Container)
_sym_db.RegisterMessage(Container.Style)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 43.631068
| 827
| 0.738763
|
19f11a6fc9d9baf5d5e20b24bc5dd0da749d1d75
| 2,198
|
py
|
Python
|
configure.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
configure.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
configure.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
# [SublimeLinter @python:3]
import sys
import os
from pathlib import Path
import platform
import ctypes
DUFI_VERSION = "0.9.10"
TCL_VERSION = "8.6"
SDK_BASE_DIR = "%ProgramFiles(x86)%\\Windows Kits\\10\\Include"
VC_DIR = "%ProgramFiles(x86)%\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\\Build"
def main(argv=sys.argv):
# Check permissions
if len(argv) > 1 and argv[1] == "--check-admin":
if not ctypes.windll.shell32.IsUserAnAdmin():
raise ValueError("Should be run with admin rights!")
# Check dependencies
if sys.version_info[:2] != (3, 8):
print("Warning: python 3.8 is required to build exe!")
buildenv = platform.uname()
if buildenv.system != "Windows" \
or buildenv.release != "10" \
or buildenv.machine != "AMD64":
raise Exception("Windows 10 x64 required!")
for file in ("vcvars32.bat", "vcvars64.bat"):
if not (Path(os.path.expandvars(VC_DIR)) / file).exists():
raise Exception("Visual Studio 2019 Community required!")
# Create config.bat
python_dir = Path(sys.executable).parent
tcl_dir = python_dir / "tcl" / "tcl{}".format(TCL_VERSION)
tk_dir = python_dir / "tcl" / "tk{}".format(TCL_VERSION)
if not tcl_dir.exists() or not tk_dir.exists():
raise Exception("tcl/tk not found!")
sdk_dir = Path(os.path.expandvars(SDK_BASE_DIR))
sdk_versions = [x.name for x in sdk_dir.iterdir() if x.is_dir()]
sdk_versions.sort(reverse=True)
if not sdk_versions:
raise Exception("Windows Kits not found!")
with open("config.bat", "w", encoding="ascii") as fp:
fp.write('@SET "TCL_LIBRARY={}"\n'.format(tcl_dir))
fp.write('@SET "TK_LIBRARY={}"\n'.format(tk_dir))
fp.write('@SET "VCVARS32={}\\vcvars32.bat"\n'.format(VC_DIR))
fp.write('@SET "VCVARS64={}\\vcvars64.bat"\n'.format(VC_DIR))
fp.write('@SET "SDK_VERSION={}"\n'.format(sdk_versions[0]))
fp.write('@SET "SDK_DIR={}\\%SDK_VERSION%"\n'.format(SDK_BASE_DIR))
fp.write('@SET "VERSION={}"\n'.format(DUFI_VERSION))
print("configure.py: done!")
if __name__ == "__main__":
sys.exit(main())
| 30.957746
| 94
| 0.635123
|
e3759941f4454821b292af4ebc6bd2708938437a
| 63
|
py
|
Python
|
contentstore/__init__.py
|
praekeltfoundation/seed-stage-based-messaging
|
c1d39601c0d16fb32cebe7c2e288076c1dc4225b
|
[
"BSD-3-Clause"
] | 1
|
2017-08-17T14:17:53.000Z
|
2017-08-17T14:17:53.000Z
|
contentstore/__init__.py
|
praekelt/seed-stage-based-messaging
|
c1d39601c0d16fb32cebe7c2e288076c1dc4225b
|
[
"BSD-3-Clause"
] | 69
|
2016-02-19T06:58:00.000Z
|
2018-11-26T09:43:42.000Z
|
contentstore/__init__.py
|
praekeltfoundation/seed-stage-based-messaging
|
c1d39601c0d16fb32cebe7c2e288076c1dc4225b
|
[
"BSD-3-Clause"
] | 2
|
2016-09-28T09:32:00.000Z
|
2017-08-18T06:18:36.000Z
|
default_app_config = "contentstore.apps.ContentStoreAppConfig"
| 31.5
| 62
| 0.873016
|
3ef63fd8ac926a717c2090468d593b85fa6014cf
| 2,941
|
py
|
Python
|
test_panexport.py
|
shepherdjay/pan-os-scripts
|
03bacaa688ba74146532869e636e7a338c922fef
|
[
"MIT"
] | 2
|
2017-12-10T14:20:15.000Z
|
2018-03-29T21:49:19.000Z
|
test_panexport.py
|
shepherdjay/pan-os-scripts
|
03bacaa688ba74146532869e636e7a338c922fef
|
[
"MIT"
] | 29
|
2017-02-01T18:02:24.000Z
|
2022-03-21T14:32:05.000Z
|
test_panexport.py
|
shepherdjay/pan-os-scripts
|
03bacaa688ba74146532869e636e7a338c922fef
|
[
"MIT"
] | 2
|
2017-10-16T16:28:32.000Z
|
2017-12-10T14:20:16.000Z
|
import datetime
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import xmltodict
from pandas import read_excel
import panexport
TEST_FILE_DIR = "testfiles/"
def get_test_path(file):
path = os.path.join(os.path.dirname(__file__), TEST_FILE_DIR + file)
return path
class TestPanExport(TestCase):
def test_pad_digits(self):
number_to_pad = 5
expected = "05"
padded = panexport.pad_to_two_digits(number_to_pad)
self.assertEqual(padded, expected)
@patch('panexport.datetime')
def test_filename_format(self, mock_now):
firewall = "test_firewall"
expected_filename = "2016-01-01-{}-combined-rules.xlsx".format(firewall)
mock_now.now.return_value = datetime.date(year=2016, month=1, day=1)
filename = panexport.get_filename(firewall)
self.assertEqual(filename, expected_filename)
def test_safe_get_simple(self):
key = "key"
test_dict = {
key: 0
}
output_nokey = panexport.safeget(test_dict, "nokey")
output_key = panexport.safeget(test_dict, "key")
self.assertEqual(output_nokey, [])
self.assertEqual(output_key, [0])
def test_safe_get_nested(self):
key1 = "key1"
key2 = "key2"
nested_dict = {
key1: {
key2: "success"
}
}
output_nokey = panexport.safeget(nested_dict, "key1", "nokey")
output_key = panexport.safeget(nested_dict, "key1", "key2")
self.assertEqual(output_nokey, [])
self.assertEqual(output_key, ["success"])
class FileTests(TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def doCleanups(self):
shutil.rmtree(self.tmp_dir)
def excel_to_dictionary(self, filepath):
"""
Uses pandas to convert an excel sheet to a python dictionary.
:param filepath: Path to excel file
:return: Python Dictionary
"""
data = read_excel(filepath)
return data.to_dict()
def test_write_to_excel(self):
self.maxDiff = None
test_filename = os.path.join(self.tmp_dir, "test_write_to_excel.xlsx")
with open(get_test_path('test_rules.xml'), mode='r') as file:
example_rules = xmltodict.parse(file.read())['rules']['entry']
panexport.write_to_excel(rule_list=example_rules,
filename=test_filename,
headers_to_remove=panexport.HEADERS_REMOVE,
preferred_header_order=panexport.HEADERS_ORDER,
default_map=panexport.HEADERS_DEFAULT_MAP)
golden_file = self.excel_to_dictionary(get_test_path("panexport_golden_output.xlsx"))
test_file = self.excel_to_dictionary(test_filename)
self.assertEqual(golden_file, test_file)
| 29.41
| 93
| 0.637538
|
8026336f951e3ec41a982719a5987ad8a6fd0272
| 257
|
py
|
Python
|
polls/urls.py
|
K0Te/django-polls
|
f9743eb6fbd26c2cea860ee3b175f63c5f090076
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
K0Te/django-polls
|
f9743eb6fbd26c2cea860ee3b175f63c5f090076
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
K0Te/django-polls
|
f9743eb6fbd26c2cea860ee3b175f63c5f090076
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path
from polls.core import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name='index'),
path('polls/<int:question_id>/', views.detail, name='detail'),
]
| 21.416667
| 66
| 0.684825
|
af9df78ab7b34f391e8d69e5ba9c154290119855
| 36,087
|
py
|
Python
|
g3py/processes/stochastic.py
|
griosd/g3py
|
10402f045d10f1df6d3adf5320e9fb9103b5a6b5
|
[
"MIT"
] | 6
|
2016-12-20T19:04:56.000Z
|
2020-07-07T23:09:04.000Z
|
g3py/processes/stochastic.py
|
griosd/g3py
|
10402f045d10f1df6d3adf5320e9fb9103b5a6b5
|
[
"MIT"
] | 49
|
2016-12-20T05:44:12.000Z
|
2017-09-16T04:13:38.000Z
|
g3py/processes/stochastic.py
|
griosd/g3py
|
10402f045d10f1df6d3adf5320e9fb9103b5a6b5
|
[
"MIT"
] | 5
|
2017-02-15T17:06:12.000Z
|
2020-05-23T03:06:40.000Z
|
import os
import types
import matplotlib.pyplot as plt
import numpy as np
import theano as th
import theano.tensor as tt
from ..bayesian.average import mcmc_ensemble, chains_to_datatrace, plot_datatrace
from ..bayesian.models import GraphicalModel, PlotModel
from ..bayesian.selection import optimize
from ..libs import DictObj, save_pkl, load_pkl, load_datatrace, save_datatrace
from ..libs.tensors import tt_to_num, makefn, gradient
from multiprocessing import Pool
# from ..bayesian.models import TheanoBlackBox
zero32 = np.float32(0.0)
class StochasticProcess(PlotModel):#TheanoBlackBox
def __init__(self, space=None, order=None, inputs=None, outputs=None, hidden=None, index=None,
name='SP', distribution=None, active=False, precompile=False, file=None, load=True, compile_logp=True,
*args, **kwargs):
if file is not None and load:
try:
load = load_pkl(file)
self.__dict__.update(load.__dict__)
self._compile_methods(compile_logp)
print('Loaded model ' + file)
self.set_space(space=space, hidden=hidden, order=order, inputs=inputs, outputs=outputs, index=index)
return
except:
print('Model Not Found in '+str(file))
ndim = 1
self.makefn = makefn
if space is not None:
if hasattr(space, 'shape'):
if len(space.shape) > 1:
ndim = space.shape[1]
else:
ndim = int(space)
self.nspace = ndim
self.name = name
self.th_order = th.shared(np.array([0.0, 1.0], dtype=th.config.floatX),
name=self.name + '_order', borrow=False, allow_downcast=True)
self.th_space = th.shared(np.array([[0.0, 1.0]]*self.nspace, dtype=th.config.floatX).T,
name=self.name + '_space', borrow=False, allow_downcast=True)
self.th_index = th.shared(np.array([0.0, 1.0], dtype=th.config.floatX),
name=self.name + '_index', borrow=False, allow_downcast=True)
self.th_inputs = th.shared(np.array([[0.0, 1.0]]*self.nspace, dtype=th.config.floatX).T,
name=self.name + '_inputs', borrow=False, allow_downcast=True)
self.th_outputs = th.shared(np.array([0.0, 1.0], dtype=th.config.floatX),
name=self.name + '_outputs', borrow=False, allow_downcast=True)
self.is_observed = False
self.np_hidden = None
self.th_space_ = tt.matrix(self.name + '_space_th', dtype=th.config.floatX)
self.th_inputs_ = tt.matrix(self.name + '_inputs_th', dtype=th.config.floatX)
self.th_outputs_ = tt.vector(self.name + '_outputs_th', dtype=th.config.floatX)
self.th_space_.tag.test_value = np.array([[0.0, 1.0]]*self.nspace, dtype=th.config.floatX).T
self.th_inputs_.tag.test_value = np.array([[0.0, 1.0]]*self.nspace, dtype=th.config.floatX).T
self.th_outputs_.tag.test_value = np.array([0.0, 1.0], dtype=th.config.floatX)
self.th_scalar = tt.scalar(self.name + '_scalar_th', dtype=th.config.floatX)
self.th_scalar.tag.test_value = np.float32(1)
self.th_vector = tt.vector(self.name + '_vector_th', dtype=th.config.floatX)
self.th_vector.tag.test_value = np.array([0.0, 1.0], dtype=th.config.floatX)
self.th_matrix = tt.matrix(self.name + '_matrix_th', dtype=th.config.floatX)
self.th_matrix.tag.test_value = np.array([[0.0, 1.0]]*self.nspace, dtype=th.config.floatX).T
self.distribution = distribution
if active is True:
if GraphicalModel.active is None:
GraphicalModel.active = GraphicalModel('GM_' + self.name)
self.active = GraphicalModel.active
elif active is False:
self.active = GraphicalModel('GM_' + self.name)
else:
self.active = active
self.active.add_component(self)
self.compiles = DictObj()
self.precompile = precompile
super().__init__(*args, **kwargs)
#print('_define_process')
with self.model:
self._check_hypers()
self.th_define_process()
self.active.compile_components()
#print('set_space')
self.set_space(space=space, hidden=hidden, order=order, inputs=inputs, outputs=outputs, index=index)
#print('_compile_methods')
self._compile_methods(compile_logp)
if hidden is None:
self.hidden = hidden
#print('StochasticProcess__end_')
if file is not None:
self.file = file
try:
self.save()
except:
print('Error in file '+str(file))
def save(self, path=None, params=None):
if path is None:
path = self.file
if params is not None:
self.set_params(params)
try:
if os.path.isfile(path):
os.remove(path)
with self.model:
save_pkl(self, path)
print('Model saved on '+path)
except Exception as details:
print('Error saving model '+path, details)
def set_params(self, *args, **kwargs):
return self.active.set_params(*args, **kwargs)
def params_random(self, *args, **kwargs):
"""
Alias for the method .active.params_random()
"""
return self.active.params_random(*args, **kwargs)
def params_datatrace(self, *args, **kwargs):
return self.active.params_datatrace(*args, **kwargs)
def transform_params(self, *args, **kwargs):
return self.active.transform_params(*args, **kwargs)
def params_process(self, process=None, params=None, current=None, fixed=False):
if process is None:
process = self
if params is None:
params = process.params
if current is None:
current = self.params
params_transform = {k.replace(process.name, self.name, 1): v for k, v in params.items()}
params_return = DictObj({k: v for k, v in params_transform.items() if k in current.keys()})
params_return.update({k: v for k, v in current.items() if k not in params_transform.keys()})
if fixed:
params_return.update(self.params_fixed)
return params_return
def set_space(self, space=None, hidden=None, order=None, inputs=None, outputs=None, index=None):
if space is not None:
if len(space.shape) < 2:
space = space.reshape(len(space), 1)
self.space = space
if hidden is not None:
if len(hidden.shape) > 1:
hidden = hidden.reshape(len(hidden))
self.hidden = hidden
if order is not None:
if len(order.shape) > 1:
order = order.reshape(len(order))
self.order = order
elif self.nspace == 1:
self.order = self.space.reshape(len(self.space))
if inputs is not None:
if len(inputs.shape) < 2:
inputs = inputs.reshape(len(inputs), 1)
self.inputs = inputs
if outputs is not None:
if len(outputs.shape) > 1:
outputs = outputs.reshape(len(outputs))
self.outputs = outputs
if index is not None:
if len(index.shape) > 1:
index = index.reshape(len(index))
self.index = index
elif self.nspace == 1:
self.index = self.inputs.reshape(len(self.inputs))
#check dims
if len(self.order) != len(self.space):
self.order = np.arange(len(self.space))
if len(self.index) != len(self.inputs):
self.index = np.arange(len(self.inputs))
def observed(self, inputs=None, outputs=None, order=None, index=None, hidden=None):
"""
This function asign the observations to the gp and calculates the default parameters
Args:
inputs (numpy.ndarray): the inputs of the process
outputs (numpy.ndarray): the outputs (observations) of the process
order (numpy.ndarray): For multidimensional process, the order indicates the order in
which the domain (space) is plotted.:
index (numpy.ndarray): It is the index of the observations
hidden (numpy.ndarray): The set of values from where the observations are taken
"""
self.set_space(inputs=inputs, outputs=outputs, order=order, index=index, hidden=hidden)
if inputs is None and outputs is None:
self.is_observed = False
else:
self.is_observed = True
@property
def model(self):
return self.active.model
@property
def params(self):
return self.active.params
@property
def params_default(self):
return self.active.params_default
@property
def params_test(self):
return self.active.params_test
@property
def space(self):
return self.th_space.get_value(borrow=False)
@space.setter
def space(self, value):
self.th_space.set_value(value, borrow=False)
@property
def hidden(self):
return self.np_hidden
@hidden.setter
def hidden(self, value):
self.np_hidden = value
@property
def inputs(self):
return self.th_inputs.get_value(borrow=False)
@inputs.setter
def inputs(self, value):
self.th_inputs.set_value(value, borrow=False)
@property
def outputs(self):
return self.th_outputs.get_value(borrow=False)
@outputs.setter
def outputs(self, value):
self.th_outputs.set_value(value, borrow=False)
@property
def order(self):
return self.th_order.get_value(borrow=False)
@order.setter
def order(self, value):
self.th_order.set_value(value, borrow=False)
@property
def index(self):
return self.th_index.get_value(borrow=False)
@index.setter
def index(self, value):
self.th_index.set_value(value, borrow=False)
def default_hypers(self):
pass
def _check_hypers(self):
pass
def th_define_process(self):
pass
def sampler(self, samples=1, prior=False, noise=False):
pass
def quantiler(self, q=0.975, prior=False, noise=False, simulations=None):
pass
def th_median(self, prior=False, noise=False, simulations=None):
pass
def th_mean(self, prior=False, noise=False, simulations=None):
pass
def th_variance(self, prior=False, noise=False, simulations=None):
pass
def th_covariance(self, prior=False, noise=False):
pass
def th_logpredictive(self, prior=False, noise=False):
pass
def th_cross_mean(self, prior=False, noise=False, cross_kernel=None):
pass
def th_std(self, *args, **kwargs):
if self.th_variance(*args, **kwargs) is not None:
return tt.sqrt(self.th_variance(*args, **kwargs))
else:
return None
def th_logp(self, prior=False, noise=False):
if prior:
random_vars = self.model.free_RVs
else:
random_vars = self.model.basic_RVs
factors = [var.logpt for var in random_vars] + self.model.potentials
return tt.add(*map(tt.sum, factors))
def th_dlogp(self, dvars=None, *args, **kwargs):
return tt_to_num(gradient(self.th_logp(*args, **kwargs), dvars))
def th_loglike(self, prior=False, noise=False):
factors = [var.logpt for var in self.model.observed_RVs]
return tt.add(*map(tt.sum, factors))
def th_error_l1(self, prior=False, noise=False):
mean = self.th_mean(prior=prior, noise=noise)
if mean is not None:
return tt.mean(tt.abs_(self.th_vector - mean))
def th_error_l2(self, prior=False, noise=False):
mean = self.th_mean(prior=prior, noise=noise)
if mean is not None:
return tt.mean(tt.pow(self.th_vector - mean, 2))
def th_error_mse(self, prior=False, noise=False):
return tt.mean(tt.abs_(self.th_vector - self.th_outputs))**2 + tt.var(tt.abs_(self.th_vector - self.th_outputs))
def _compile_methods(self, compile_logp=True):
reset_space = self.space
reset_hidden = self.hidden
reset_order = self.order
reset_inputs = self.inputs
reset_outputs = self.outputs
reset_index = self.index
reset_observed = self.is_observed
self.set_space(space=self.th_space_.tag.test_value, hidden=self.th_vector.tag.test_value,
inputs=self.th_inputs_.tag.test_value, outputs=self.th_outputs_.tag.test_value)
if self.compiles is None:
self.compiles = DictObj()
if self.th_mean() is not None:
self.mean = types.MethodType(self._method_name('th_mean'), self)
if self.th_median() is not None:
self.median = types.MethodType(self._method_name('th_median'), self)
if self.th_variance() is not None:
self.variance = types.MethodType(self._method_name('th_variance'), self)
if self.th_std() is not None:
self.std = types.MethodType(self._method_name('th_std'), self)
if self.th_covariance() is not None:
self.covariance = types.MethodType(self._method_name('th_covariance'), self)
if self.th_logpredictive() is not None:
self.logpredictive = types.MethodType(self._method_name('th_logpredictive'), self)
if self.th_error_l1() is not None:
self.error_l1 = types.MethodType(self._method_name('th_error_l1'), self)
if self.th_error_l2() is not None:
self.error_l2 = types.MethodType(self._method_name('th_error_l2'), self)
if self.th_error_mse() is not None:
self.error_mse = types.MethodType(self._method_name('th_error_mse'), self)
# self.density = types.MethodType(self._method_name('th_density'), self)
#self.quantiler = types.MethodType(self._method_name('_quantiler'), self)
#self.sampler = types.MethodType(self._method_name('_sampler'), self)
self.logp = types.MethodType(self._method_name('th_logp'), self)
self.dlogp = types.MethodType(self._method_name('th_dlogp'), self)
self.loglike = types.MethodType(self._method_name('th_loglike'), self)
self.is_observed = True
if compile_logp:
_ = self.logp(array=True)
_ = self.logp(array=True, prior=True)
# _ = self.loglike(array=True)
try:
_ = self.dlogp(array=True)
except Exception as m:
print('Compiling dlogp error:', m)
self.is_observed = reset_observed
self.set_space(space=reset_space, hidden=reset_hidden, order=reset_order,
inputs=reset_inputs, outputs=reset_outputs, index=reset_index)
def lambda_method(self, *args, **kwargs):
pass
@staticmethod
def _method_name(method=None):
def lambda_method(self, params=None, space=None, inputs=None, outputs=None, vector=[], prior=False, noise=False, array=False, *args, **kwargs):
if params is None:
if array:
params = self.active.dict_to_array(self.params)
else:
params = self.params
elif not array:
params = self.filter_params(params)
if inputs is None and not self.is_observed:
prior = True
if space is None:
space = self.space
if inputs is None:
inputs = self.inputs
if outputs is None:
outputs = self.outputs
#return self._jit_compile(method, prior=prior, noise=noise, array=array, *args, **kwargs)(self.space, self.inputs, self.outputs, params)
name = ''
if prior:
name += 'prior'
else:
name += 'posterior'
name += method.replace('th', '') # delete th
if noise:
name += '_noise'
if len(args) > 0:
name += str(args)
if len(kwargs) > 0:
name += str(kwargs)
if not hasattr(self.compiles, name):
#print(method)
#if method in ['th_logpredictive', 'th_error_l1', 'th_error_l2']:
# th_vars = [self.th_space_, self.th_inputs_, self.th_outputs_, self.th_vector] + self.model.vars
#else:
th_vars = [self.th_space_, self.th_inputs_, self.th_outputs_, self.th_vector] + self.model.vars
self.compiles[name] = self.makefn(th_vars, getattr(self, method)(prior=prior, noise=noise, *args, **kwargs),
givens = [(self.th_space, self.th_space_), (self.th_inputs, self.th_inputs_), (self.th_outputs, self.th_outputs_)],
bijection=None, precompile=self.precompile)
if array:
if not hasattr(self.compiles, 'array_' + name):
self.compiles['array_' + name] = self.compiles[name].clone(self.active.bijection.rmap)
name = 'array_' + name
return self.compiles[name](params, space, inputs, outputs, vector)
return lambda_method
@property
def executed(self):
return {k: v.executed for k, v in self.compiles.items()}
@property
def transformations(self):
return self.active.transformations
@property
def potentials(self):
return self.active.potentials
def predict(self, params=None, space=None, inputs=None, outputs=None, mean=True, std=True, var=False, cov=False,
median=False, quantiles=False, quantiles_noise=False, samples=0, distribution=False,
prior=False, noise=False, simulations=None):
"""
Predict a stochastic process with each feature of the process.
Args:
params (g3py.libs.DictObj): Contains the hyperparameters of the stochastic process
space (numpy.ndarray): the domain space of the process
inputs (numpy.ndarray): the inputs of the process
outputs (numpy.ndarray): the outputs (observations) of the process
mean (bool): Determines whether the mean is displayed
std (bool): Determines whether the standard deviation is displayed
var (bool): Determines whether the variance is displayed
cov (bool): Determines whether the covariance is displayed
median (bool): Determines whether the median is displayed
quantiles (bool): Determines whether the quantiles (95% of confidence) are displayed
quantiles_noise (bool): Determines whether the noise is considered for calculating (the
quantile and it is displayed
samples (int): the number of samples of the stochastic process that are generated
distribution (bool): whether it returns the log predictive function
prior (bool): whether the prediction considers the prior
noise (bool): wheter the prediction considers noise
simulations (int): the number of simulation for the aproximation of the value of the
stadistics
Returns:
Returns a dictionary which contains the information of the mean, std, var, cov, median,
quantiles, quantiles_noise and distribution whether they are required.
"""
if params is None:
params = self.params
if not self.is_observed:
prior = True
if space is None:
space = self.space
if inputs is None:
inputs = self.inputs
if outputs is None:
outputs = self.outputs
n_simulations = 1
if type(simulations) is int:
n_simulations = simulations
simulations = self.sampler(params, space, inputs, outputs, prior=prior, noise=noise, samples=simulations)
values = DictObj()
if mean:
values['mean'] = self.mean(params, space, inputs, outputs, prior=prior, noise=noise, simulations=simulations)
if var:
values['variance'] = self.variance(params, space, inputs, outputs, prior=prior, noise=noise, simulations=simulations)
if std:
values['std'] = self.std(params, space, inputs, outputs, prior=prior, noise=noise, simulations=simulations)
if cov:
values['covariance'] = self.covariance(params, space, inputs, outputs, prior=prior, noise=noise)
if median:
values['median'] = self.median(params, space, inputs, outputs, prior=prior, noise=noise, simulations=simulations)
if quantiles:
values['quantile_up'] = self.quantiler(params, space, inputs, outputs, q=0.975, prior=prior, noise=noise, simulations=simulations)
values['quantile_down'] = self.quantiler(params, space, inputs, outputs, q=0.025, prior=prior, noise=noise, simulations=simulations)
if quantiles_noise:
simulations_noise = self.sampler(params, space, inputs, outputs, prior=prior, noise=True, samples=n_simulations)
values['noise_std'] = self.std(params, space, inputs, outputs, prior=prior, noise=True, simulations=simulations_noise)
values['noise_up'] = self.quantiler(params, space, inputs, outputs, q=0.975, prior=prior, noise=True, simulations=n_simulations)
values['noise_down'] = self.quantiler(params, space, inputs, outputs, q=0.025, prior=prior, noise=True, simulations=n_simulations)
if samples > 0:
values['samples'] = self.sampler(params, space, inputs, outputs, samples=samples, prior=prior, noise=noise)
if distribution:
#values['logp'] = lambda x: self.compiles['posterior_logp'](x, space, inputs, outputs, **params)
#values['logpred'] = lambda x: self.compiles['posterior_logpred'](x, space, inputs, outputs, **params)
values['logpredictive'] = lambda x: self.logpredictive(params, space, inputs, outputs, vector=x, prior=prior, noise=True)
return values
#TODO: Vectorized
def logp_chain(self, chain, prior=False):
out = np.empty(len(chain))
for i in range(len(out)):
out[i] = self.logp(chain[i], array=True, prior=prior)
return out
#@jit
def fixed_logp(self, sampling_params, return_array=False):
self.active.fixed_chain[:, self.active.sampling_dims] = sampling_params
r = np.zeros(len(self.active.fixed_chain))
for i, p in enumerate(self.active.fixed_chain):
r[i] = self.compiles.array_posterior_logp(p, self.space, self.inputs, self.outputs)
if return_array:
return r
else:
return np.mean(r)
#@jit
def fixed_dlogp(self, sampling_params, return_array=False):
self.active.fixed_chain[:, self.active.sampling_dims] = sampling_params
r = list()
for i, p in enumerate(self.active.fixed_chain):
r.append(self.compiles.array_posterior_dlogp(p, self.space, self.inputs, self.outputs)[self.active.sampling_dims])
if return_array:
return np.array(r)
else:
return np.mean(np.array(r), axis=0)
#@jit
def fixed_loglike(self, sampling_params, return_array=False):
self.active.fixed_chain[:, self.active.sampling_dims] = sampling_params
r = np.zeros(len(self.active.fixed_chain))
for i, p in enumerate(self.active.fixed_chain):
r[i] = self.compiles.array_posterior_loglike(p, self.space, self.inputs, self.outputs)
if return_array:
return r
else:
return np.mean(r)
#@jit
def fixed_logprior(self, sampling_params, return_array=False):
self.active.fixed_chain[:, self.active.sampling_dims] = sampling_params
r = np.zeros(len(self.active.fixed_chain))
for i, p in enumerate(self.active.fixed_chain):
r[i] = self.compiles.array_prior_logp(p, self.space, self.inputs, self.outputs)
if return_array:
return r
else:
return np.mean(r)
def find_MAP(self, start=None, points=1, return_points=False, plot=False, display=True,
powell=True, bfgs=True, init='bfgs', max_time=None):
"""
This function calculates the Maximun A Posteriori alternating the bfgs and powell algorithms,
Args:
start (g3py.libs.DictObj): The initial parameters to start the optimization.
The default value correspond to the default parameters of the gp. This could be a list
of initial points.
points (int): the number of (meta) iterations of the optimization problem
return_points (bool): Determines whether the parameters points of the optimization
are displayed.
plot (bool): Determines whether the result it is plotted.
display (bool): Determines whether the information of the optimization is displayed.
powell (bool): Whether the powell algotithm it is used
bfgs (bool): Whether the bfgs algotithm it is used
init (str): The algorith with which it starts in the first iteration.
max_time (int): the maximum number of seconds for every step in the optimization
Returns:
This function returns the optimal parameters of the loglikelihood function.
"""
points_list = list()
if start is None:
start = self.params
if self.active.fixed_datatrace is None:
logp = lambda p: self.compiles.array_posterior_logp(p, self.space, self.inputs, self.outputs)
dlogp = lambda p: self.compiles.array_posterior_dlogp(p, self.space, self.inputs, self.outputs)
else:
logp = self.fixed_logp
dlogp = self.fixed_dlogp
try:
dlogp(self.active.sampling_params(start))
except Exception as m:
print(m)
dlogp = None
if type(start) is list:
i = 0
for s in start:
i += 1
points_list.append(('start' + str(i), logp(self.active.sampling_params(s)), s))
else:
points_list.append(('start', logp(self.active.sampling_params(start)), start))
n_starts = len(points_list)
if self.outputs is None: # .get_value()
print('For find_MAP it is necessary to have observations')
return start
if display:
print('Starting function value (-logp): ' + str(-logp(self.active.sampling_params(points_list[0][2]))))
if plot:
plt.figure(0)
self.plot(params=points_list[0][2], title='start')
plt.show()
if init is 'bfgs':
check = 0
else:
check = 1
with self.model:
i = -1
points -= 1
while i < points:
i += 1
#try:
if powell:
name, _, start = points_list[i // 2]
else:
name, _, start = points_list[i]
if (i % 2 == check or not powell) and bfgs: #
if name.endswith('_bfgs'):
if i > n_starts:
points += 1
continue
name += '_bfgs'
if display:
print(name)
new = optimize(logp=logp, start=self.active.sampling_params(start), dlogp=dlogp, fmin='bfgs',
max_time=max_time, disp=display)
else:
if name.endswith('_powell'):
if i > n_starts:
points += 1
continue
name += '_powell'
if display:
print(name)
new = optimize(logp=logp, start=self.active.sampling_params(start), fmin='powell', max_time=max_time,
disp=display)
points_list.append((name, logp(new), self.active.dict_from_sampling_array(new)))
if plot:
plt.figure(i + 1)
self.plot(params=self.active.dict_from_sampling_array(new), title=name)
plt.show()
#except Exception as error:
# print(error)
# pass
optimal = points_list[0]
for test in points_list:
if test[1] > optimal[1]:
optimal = test
_name, _ll, params = optimal
params = DictObj(params)
if display:
print('find_MAP', params)
if return_points is False:
return params
else:
return params, points_list
def sample_hypers(self, start=None, samples=1000, chains=None, ntemps=None, raw=False, noise_mult=0.1, noise_sum=0.01,
burnin_tol=0.001, burnin_method='multi-sum', outlayer_percentile=0.0005, clusters=None, prior=False, parallel=False, threads=1,
plot=False, file=None, load=True):
"""
This function find the optimal hyperparameters of the logpredictive function using the
'Ensemble MCMC' algorithm.
Args:
start (g3py.libs.DictObj): The initial parameters for the optimization. If start is None,
it starts with the parameters obtained using find_MAP algorithm.
samples (int): the number of iterations performed by the algorithm
chains (int): the number of markov chains used in the sampling. The number of chains needs
to be an even number and more than twice the dimension of the parameter space.
ntemps (int): the number of temperatures used.
raw (bool): this argument determines whether the result returned is raw or is pre-processed
noise_mult (float): the variance of the multiplicative noise
noise_sum (float): the variance of the aditive noise
burnin_tol (float): It is the tolerance for the burnin.
burnin_method (str): This set the algorith used to calculates the burnin
outlayer_percentile (float): this takes a value between 0 and 1, and represent the value
of the percentile to let out as outlayers.
clusters (int): the number of clusters in which the sample is divided
prior (bool): Whether the prior its considered
parallel (bool): Whether the algorithm works in paralell or not.
threads (int): the number of process to paralelize the algorithm
plot (bool): whether the information of the datatrace are plotted or not.
file (str): a path for save the datatrace
load (bool): if load is True, a datatrace will be searched in the path given by file
Returns:
This function returns the information given by the Ensemble Markov Chain Monte Carlo Algorithm
The information could be given tranformed or raw, depending of the boolean 'raw'.
In the raw case, the information given contains evolution of each chain (which contains
the parameters) across the iterations and the value of the loglikelihood in each iteration.
Otherwhise, the function returns a datatrace, whose columns contains the values of
every parameter, it transformation (logaritm transformation), the chain number to which it
belong, the iteration number, and the 'burnin' and the 'outlayer' booleans.
"""
ndim = len(self.active.sampling_dims)
if chains is None:
chains = 2*ndim
if file is not None and load:
try:
datatrace = load_datatrace(file)
if datatrace is not None:
if (datatrace._niter.max() == samples-1) and (datatrace._nchain.max() == chains-1):
if plot:
plot_datatrace(datatrace)
return datatrace
except Exception as m:
pass
if start is None:
start = self.find_MAP(display=False)
if isinstance(start, dict):
start = self.active.dict_to_array(start)
if len(start.shape) == 1:
start = start[self.active.sampling_dims]
elif len(start.shape) == 2:
start = start[:, self.active.sampling_dims]
elif len(start.shape) == 3:
start = start[:, :, self.active.sampling_dims]
if self.active.fixed_datatrace is None:
if ntemps is None:
if prior is False:
logp = lambda p: self.compiles.array_posterior_logp(p, self.space, self.inputs, self.outputs)
else:
logp = lambda p: self.compiles.array_prior_logp(p, self.space, self.inputs, self.outputs)
loglike = None
logprior = None
else:
logp = None
logprior = lambda p: self.compiles.array_prior_logp(p, self.space, self.inputs, self.outputs)
if prior is False:
loglike = lambda p: self.compiles.array_posterior_loglike(p, self.space, self.inputs, self.outputs)
else:
loglike = lambda p: zero32
else:
if ntemps is None:
if prior is False:
logp = self.fixed_logp
else:
logp = self.fixed_logprior
loglike = None
logprior = None
else:
logp = None
if prior is False:
loglike = self.fixed_loglike
else:
loglike = lambda p: zero32
logprior = self.fixed_logprior
def parallel_mcmc(nchains):
return mcmc_ensemble(ndim, samples=samples, chains=nchains, ntemps=ntemps, start=start,
logp=logp, loglike=loglike, logprior=logprior,
noise_mult=noise_mult, noise_sum=noise_sum, threads=threads)
if parallel in [None, 0, 1]:
lnprob, echain = parallel_mcmc(nchains=chains)
else:
import multiprocessing as mp
p = mp.Pool(parallel)
r = p.map(parallel_mcmc, list([chains/parallel]*parallel))
lnprob, echain = [], []
for k in r:
lk, le = k
lnprob = np.concatenate([lnprob, lk])
echain = np.concatenate([echain, le])
complete_chain = np.empty((echain.shape[0], echain.shape[1], self.ndim))
complete_chain[:, :, self.active.sampling_dims] = echain
if self.active.fixed_datatrace is not None:
print("TODO: Check THIS complete_chain with MEAN")
complete_chain[:, :, self.active.fixed_dims] = self.active.fixed_chain[:, self.active.fixed_dims].mean(axis=0)
if raw:
return complete_chain, lnprob
else:
datatrace = chains_to_datatrace(self, complete_chain, ll=lnprob, burnin_tol=burnin_tol,
burnin_method=burnin_method, burnin_dims=self.active.sampling_dims,
outlayer_percentile=outlayer_percentile, clusters=clusters)
if file is not None:
save_datatrace(datatrace, file)
if plot:
plot_datatrace(datatrace)
return datatrace
@property
def ndim(self):
return self.active.ndim
| 44.717472
| 160
| 0.597694
|
b6fc1f2a49e1e2d638f1a30ec506a1e22ed4acc7
| 2,822
|
py
|
Python
|
tests/components/switcher_kis/test_sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 7
|
2019-08-15T13:36:58.000Z
|
2020-03-18T10:46:29.000Z
|
tests/components/switcher_kis/test_sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 100
|
2020-06-17T22:22:41.000Z
|
2022-03-31T06:24:19.000Z
|
tests/components/switcher_kis/test_sensor.py
|
Vaarlion/core
|
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
|
[
"Apache-2.0"
] | 11
|
2020-12-16T13:48:14.000Z
|
2022-02-01T00:28:05.000Z
|
"""Test the Switcher Sensor Platform."""
import pytest
from homeassistant.components.switcher_kis.const import DATA_DEVICE, DOMAIN
from homeassistant.helpers import entity_registry as er
from homeassistant.util import slugify
from . import init_integration
from .consts import DUMMY_PLUG_DEVICE, DUMMY_SWITCHER_DEVICES, DUMMY_WATER_HEATER_DEVICE
DEVICE_SENSORS_TUPLE = (
(
DUMMY_PLUG_DEVICE,
[
"power_consumption",
"electric_current",
],
),
(
DUMMY_WATER_HEATER_DEVICE,
[
"power_consumption",
"electric_current",
"remaining_time",
],
),
)
@pytest.mark.parametrize("mock_bridge", [DUMMY_SWITCHER_DEVICES], indirect=True)
async def test_sensor_platform(hass, mock_bridge):
"""Test sensor platform."""
await init_integration(hass)
assert mock_bridge
assert mock_bridge.is_running is True
assert len(hass.data[DOMAIN]) == 2
assert len(hass.data[DOMAIN][DATA_DEVICE]) == 2
for device, sensors in DEVICE_SENSORS_TUPLE:
for sensor in sensors:
entity_id = f"sensor.{slugify(device.name)}_{sensor}"
state = hass.states.get(entity_id)
assert state.state == str(getattr(device, sensor))
async def test_sensor_disabled(hass, mock_bridge):
"""Test sensor disabled by default."""
await init_integration(hass)
assert mock_bridge
mock_bridge.mock_callbacks([DUMMY_WATER_HEATER_DEVICE])
await hass.async_block_till_done()
registry = er.async_get(hass)
device = DUMMY_WATER_HEATER_DEVICE
unique_id = f"{device.device_id}-{device.mac_address}-auto_off_set"
entity_id = f"sensor.{slugify(device.name)}_auto_shutdown"
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == unique_id
assert entry.disabled is True
assert entry.disabled_by == er.DISABLED_INTEGRATION
# Test enabling entity
updated_entry = registry.async_update_entity(
entry.entity_id, **{"disabled_by": None}
)
assert updated_entry != entry
assert updated_entry.disabled is False
@pytest.mark.parametrize("mock_bridge", [[DUMMY_WATER_HEATER_DEVICE]], indirect=True)
async def test_sensor_update(hass, mock_bridge, monkeypatch):
"""Test sensor update."""
await init_integration(hass)
assert mock_bridge
device = DUMMY_WATER_HEATER_DEVICE
sensor = "power_consumption"
entity_id = f"sensor.{slugify(device.name)}_{sensor}"
state = hass.states.get(entity_id)
assert state.state == str(getattr(device, sensor))
monkeypatch.setattr(device, sensor, 1431)
mock_bridge.mock_callbacks([DUMMY_WATER_HEATER_DEVICE])
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "1431"
| 30.021277
| 88
| 0.705882
|
f5adece609f840f9f179b7430e59c978aac499a6
| 29,364
|
py
|
Python
|
nebula2_fork/fbthrift/protocol/TSimpleJSONProtocol.py
|
linzhiming0826/nebula-python
|
0c508aa9780981c1fef2bf826d624a69ca2c64b0
|
[
"Apache-2.0"
] | null | null | null |
nebula2_fork/fbthrift/protocol/TSimpleJSONProtocol.py
|
linzhiming0826/nebula-python
|
0c508aa9780981c1fef2bf826d624a69ca2c64b0
|
[
"Apache-2.0"
] | null | null | null |
nebula2_fork/fbthrift/protocol/TSimpleJSONProtocol.py
|
linzhiming0826/nebula-python
|
0c508aa9780981c1fef2bf826d624a69ca2c64b0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyre-unsafe
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from base64 import b64encode, b64decode
import json
import sys
from nebula2_fork.fbthrift.protocol.TProtocol import TProtocolBase, TProtocolException
from nebula2_fork.fbthrift.Thrift import TType
JSON_OBJECT_START = b'{'
JSON_OBJECT_END = b'}'
JSON_ARRAY_START = b'['
JSON_ARRAY_END = b']'
JSON_NEW_LINE = b'\n'
JSON_PAIR_SEPARATOR = b':'
JSON_ELEM_SEPARATOR = b','
JSON_BACKSLASH = b'\\'
JSON_BACKSLASH_VALUE = ord(JSON_BACKSLASH)
JSON_STRING_DELIMITER = b'"'
JSON_ZERO_CHAR = b'0'
JSON_TAB = b" "
JSON_CARRIAGE_RETURN = b'\r'
JSON_SPACE = b' '
TAB = b'\t'
JSON_ESCAPE_CHAR = b'u'
JSON_ESCAPE_PREFIX = b"\\u00"
THRIFT_VERSION_1 = 1
THRIFT_NAN = b"NaN"
THRIFT_INFINITY = b"Infinity"
THRIFT_NEGATIVE_INFINITY = b"-Infinity"
JSON_CHAR_TABLE = [ \
# 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 0, 0, 0, 0, 0, 0, 0,b'b',b't',b'n', 0,b'f',b'r', 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
1, 1,b'"', 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
]
JSON_CHARS_TO_ESCAPE = set()
for ch_value, mode in enumerate(JSON_CHAR_TABLE):
if mode == 1:
continue
if sys.version_info[0] == 3:
JSON_CHARS_TO_ESCAPE.add(chr(ch_value).encode('ascii'))
JSON_CHARS_TO_ESCAPE.add(chr(ch_value))
else:
JSON_CHARS_TO_ESCAPE.add(chr(ch_value))
JSON_CHARS_TO_ESCAPE.add(chr(ch_value).encode('utf-8'))
JSON_CHARS_TO_ESCAPE.add(JSON_BACKSLASH)
JSON_CHARS_TO_ESCAPE.add(JSON_BACKSLASH.decode('utf-8'))
ESCAPE_CHARS = b"\"\\bfnrt"
ESCAPE_CHAR_VALS = [b'"', b'\\', b'\b', b'\f', b'\n', b'\r', b'\t']
NUMERIC_CHAR = b'+-.0123456789Ee'
WHITESPACE_CHARS = {
JSON_NEW_LINE,
TAB,
JSON_CARRIAGE_RETURN,
JSON_SPACE,
}
def hexChar(x):
x &= 0x0f
return hex(x)[2:]
def hexVal(ch):
if ch >= '0' and ch <= '9':
return int(ch) - int('0')
elif ch >= 'a' and ch <= 'f':
return int(ch) - int('a') + 10
raise TProtocolException(TProtocolException.INVALID_DATA,
"Unexpected hex value")
class TJSONContext:
def __init__(self, protocol, indentLevel=0):
self.indentLevel = indentLevel
self.protocol = protocol
def write(self, trans):
return
def read(self, reader):
return
def escapeNum(self):
return False
def writeNewLine(self, trans):
trans.write(JSON_NEW_LINE)
self.indent(trans)
def indent(self, trans):
trans.write(JSON_TAB * self.indentLevel)
class TJSONPairContext(TJSONContext):
def __init__(self, protocol, indentLevel=0, isMapPair=False):
TJSONContext.__init__(self, protocol, indentLevel)
self.first = True
self.colon = True
self.isMapPair = isMapPair
self.skipColon = False
def write(self, trans):
if self.first:
self.first = False
self.colon = True
else:
if self.colon:
trans.write(JSON_PAIR_SEPARATOR + b" ")
else:
trans.write(JSON_ELEM_SEPARATOR)
if self.isMapPair:
self.writeNewLine(trans)
self.colon = not self.colon
def read(self, reader):
if self.first:
self.first = False
self.colon = True
else:
self.protocol.skipWhitespace()
if self.colon:
if self.skipColon:
self.skipColon = False
else:
self.protocol.readJSONSyntaxChar(JSON_PAIR_SEPARATOR)
else:
self.protocol.readJSONSyntaxChar(JSON_ELEM_SEPARATOR)
self.colon = not self.colon
def escapeNum(self):
return self.colon
class TJSONListContext(TJSONContext):
def __init__(self, protocol, indentLevel=0):
TJSONContext.__init__(self, protocol, indentLevel)
self.first = True
def read(self, reader):
if self.first:
self.first = False
else:
self.protocol.skipWhitespace()
self.protocol.readJSONSyntaxChar(JSON_ELEM_SEPARATOR)
def write(self, trans):
if self.first:
self.first = False
else:
trans.write(JSON_ELEM_SEPARATOR)
self.writeNewLine(trans)
class LookaheadReader():
def __init__(self, protocol):
self.protocol = protocol
self.hasData = False
self.data = b''
def read(self):
if self.hasData is True:
self.hasData = False
else:
self.data = self.protocol.trans.read(1)
return self.data
def peek(self):
if self.hasData is False:
self.data = self.protocol.trans.read(1)
self.hasData = True
return self.data
class ThriftSpec():
def __init__(self, spec):
self.spec = spec
self.nextSpec = None
class StructSpec(ThriftSpec):
'''
Wraps thrift_spec of a thrift struct.
'''
def readFieldBegin(self, fname, guess_func):
field_spec = None
self.nextSpec = None
if sys.version_info[0] >= 3:
fname = fname.decode()
for s in self.spec:
if s is not None and s[2] == fname:
field_spec = s
break
if field_spec is not None:
if field_spec[1] == TType.STRUCT:
self.nextSpec = StructSpec(field_spec[3][1])
elif field_spec[1] in (TType.SET, TType.LIST):
self.nextSpec = ListOrSetSpec(field_spec[3])
elif field_spec[1] == TType.MAP:
self.nextSpec = MapSpec(field_spec[3])
return (fname, field_spec[1], field_spec[0])
else:
return (fname, guess_func(), 0)
def getNextSpec(self):
return self.nextSpec
class ListOrSetSpec(ThriftSpec):
'''Wraps a list or set's 2-tuple nested type spec.
getNextSpec is called in readListBegin to *prepare* the spec of
the list element which may/may not be used depending on whether
the list is empty.
For example, to read list<SomeStruct> the following methods will
be called:
readListBegin()
readStructBegin()
readStructEnd()
...
readListEnd()
After readListBegin is called the current spec is still
ListOrSetSpec and its nextSpec is prepared for its element.
readStructBegin/End will push/pop the element's StructSpec
whenever a SomeStruct is read.
-1 tells the generated code that the size of this list is
undetermined so it needs to use peekList to detect the end of
the list.
'''
def readListBegin(self):
self.getNextSpec()
return (self.spec[0], -1)
readSetBegin = readListBegin
def getNextSpec(self):
if self.nextSpec is None:
if self.spec[0] == TType.STRUCT:
self.nextSpec = StructSpec(self.spec[1][1])
elif self.spec[0] in (TType.LIST, TType.SET):
self.nextSpec = ListOrSetSpec(self.spec[1])
elif self.spec[0] == TType.MAP:
self.nextSpec = MapSpec(self.spec[1])
return self.nextSpec
class MapSpec(ThriftSpec):
'''Wraps a map's 4-tuple key/vale type spec.
'''
def __init__(self, spec):
ThriftSpec.__init__(self, spec)
self.key = True
self.keySpec = None
if self.spec[1] is not None:
if self.spec[0] == TType.STRUCT:
self.keySpec = StructSpec(self.spec[1][1])
elif self.spec[0] in (TType.LIST, TType.SET):
self.keySpec = ListOrSetSpec(self.spec[1])
elif self.spec[0] == TType.MAP:
self.keySpec = MapSpec(self.spec[1])
self.valueSpec = None
if self.spec[3] is not None:
if self.spec[2] == TType.STRUCT:
self.valueSpec = StructSpec(self.spec[3][1])
elif self.spec[2] in (TType.LIST, TType.SET):
self.valueSpec = ListOrSetSpec(self.spec[3])
elif self.spec[2] == TType.MAP:
self.valueSpec = MapSpec(self.spec[3])
def readMapBegin(self):
self.getNextSpec()
return (self.spec[0], self.spec[2], -1)
def getNextSpec(self):
if self.keySpec is not None and self.valueSpec is not None:
self.nextSpec = self.keySpec if self.key is True else \
self.valueSpec
self.key = not self.key
else:
self.nextSpec = self.keySpec if self.keySpec is not None else \
self.valueSpec
return self.nextSpec
class TSimpleJSONProtocolBase(TProtocolBase, object):
def __init__(self, trans, spec=None):
TProtocolBase.__init__(self, trans)
# Used as stack for contexts.
self.contexts = [TJSONContext(protocol=self)]
self.context = TJSONContext(protocol=self)
self.pair_context_class = TJSONPairContext
self.list_context_class = TJSONListContext
self.reader = LookaheadReader(self)
self.specs = []
self.spec = StructSpec(spec)
def pushContext(self, newContext):
self.contexts.append(self.context)
self.context = newContext
def popContext(self):
if len(self.contexts) > 0:
self.context = self.contexts.pop()
def pushSpec(self, newSpec):
self.specs.append(self.spec)
self.spec = newSpec
def popSpec(self):
if len(self.specs) > 0:
self.spec = self.specs.pop()
def skipWhitespace(self):
skipped = 0
while True:
ch = self.reader.peek()
if ch not in WHITESPACE_CHARS:
break
self.reader.read()
skipped += 1
return skipped
def skip(self, _type):
self.context.read(self.reader)
self.skipWhitespace()
type = self.guessTypeIdFromFirstByte()
# Since self.context.read is called at the beginning of all readJSONxxx
# methods and we have already called it here, push an empty context so that
# it becomes a no-op.
self.pushContext(TJSONContext(protocol=self))
if type == TType.STRUCT:
self.readJSONObjectStart()
while True:
(_, ftype, _) = self.readFieldBegin()
if ftype == TType.STOP:
break
self.skip(TType.VOID)
self.readJSONObjectEnd()
elif type == TType.LIST:
self.readJSONArrayStart()
while self.peekList():
self.skip(TType.VOID)
self.readJSONArrayEnd()
elif type == TType.STRING:
self.readJSONString()
elif type == TType.DOUBLE:
self.readJSONDouble()
elif type == TType.BOOL:
self.readJSONBool()
else:
raise TProtocolException(
TProtocolException.INVALID_DATA,
"Unexpected type {} guessed when skipping".format(type)
)
self.popContext()
def guessTypeIdFromFirstByte(self):
self.skipWhitespace()
byte = self.reader.peek()
if byte == JSON_OBJECT_END or byte == JSON_ARRAY_END:
return TType.STOP
elif byte == JSON_STRING_DELIMITER:
return TType.STRING
elif byte == JSON_OBJECT_START:
return TType.STRUCT
elif byte == JSON_ARRAY_START:
return TType.LIST
elif byte == b't' or byte == b'f':
return TType.BOOL
elif byte in (b'+', b'-', b'0', b'1', b'2', b'3', b'4', b'5',
b'6', b'7', b'8', b'9'):
return TType.DOUBLE
else:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Unrecognized byte: {}".format(byte))
def writeJSONEscapeChar(self, ch):
self.trans.write(JSON_ESCAPE_PREFIX)
self.trans.write(hexChar(ch >> 4))
self.trans.write(hexChar(ch))
def writeJSONChar(self, ch):
charValue = ord(ch)
if charValue >= 0x30:
# The only special character >= 0x30 is '\'.
if charValue == JSON_BACKSLASH_VALUE:
self.trans.write(JSON_BACKSLASH)
self.trans.write(JSON_BACKSLASH)
else:
self.trans.write(ch)
else:
outCh = JSON_CHAR_TABLE[charValue]
if outCh == 1:
self.trans.write(ch)
elif outCh:
self.trans.write(JSON_BACKSLASH)
self.trans.write(outCh)
else:
self.writeJSONEscapeChar(charValue)
def writeJSONString(self, outStr):
self.context.write(self.trans)
self.trans.write(JSON_STRING_DELIMITER)
outStrLen = len(outStr)
if outStrLen > 0:
is_int = isinstance(outStr[0], int)
pos = 0
for idx, ch in enumerate(outStr):
if is_int:
ch = outStr[idx:idx + 1]
if ch in JSON_CHARS_TO_ESCAPE:
if pos < idx:
# Write previous chunk not requiring escaping
self.trans.write(outStr[pos:idx])
# Write current char with escaping
self.writeJSONChar(ch)
# Advance pos
pos = idx + 1
if pos < outStrLen:
# Write last chunk till outStrLen
self.trans.write(outStr[pos:outStrLen])
self.trans.write(JSON_STRING_DELIMITER)
def writeJSONBase64(self, outStr):
self.context.write(self.trans)
self.trans.write(JSON_STRING_DELIMITER)
b64Str = b64encode(outStr)
self.trans.write(b64Str)
self.trans.write(JSON_STRING_DELIMITER)
def writeJSONInteger(self, num):
self.context.write(self.trans)
escapeNum = self.context.escapeNum()
numStr = str(num)
if escapeNum:
self.trans.write(JSON_STRING_DELIMITER)
self.trans.write(numStr)
if escapeNum:
self.trans.write(JSON_STRING_DELIMITER)
def writeJSONBool(self, boolVal):
self.context.write(self.trans)
if self.context.escapeNum():
self.trans.write(JSON_STRING_DELIMITER)
if boolVal:
self.trans.write(b"true")
else:
self.trans.write(b"false")
if self.context.escapeNum():
self.trans.write(JSON_STRING_DELIMITER)
def writeJSONDouble(self, num):
self.context.write(self.trans)
numStr = str(num)
special = False
if numStr == "nan":
numStr = THRIFT_NAN
special = True
elif numStr == "inf":
numStr = THRIFT_INFINITY
special = True
elif numStr == "-inf":
numStr = THRIFT_NEGATIVE_INFINITY
special = True
escapeNum = special or self.context.escapeNum()
if escapeNum:
self.trans.write(JSON_STRING_DELIMITER)
self.trans.write(numStr)
if escapeNum:
self.trans.write(JSON_STRING_DELIMITER)
def writeJSONObjectStart(self):
self.context.write(self.trans)
self.trans.write(JSON_OBJECT_START)
self.pushContext(self.pair_context_class(
protocol=self,
indentLevel=len(self.contexts)))
def writeJSONObjectEnd(self):
self.popContext()
self.context.writeNewLine(self.trans)
self.trans.write(JSON_OBJECT_END)
def writeJSONArrayStart(self):
self.context.write(self.trans)
self.trans.write(JSON_ARRAY_START)
self.pushContext(self.list_context_class(
protocol=self,
indentLevel=len(self.contexts)))
def writeJSONArrayEnd(self):
self.popContext()
self.context.writeNewLine(self.trans)
self.trans.write(JSON_ARRAY_END)
def writeJSONMapStart(self):
self.context.write(self.trans)
self.trans.write(JSON_OBJECT_START)
self.pushContext(self.list_context_class(
protocol=self,
indentLevel=len(self.contexts)))
def writeJSONMapEnd(self):
self.popContext()
self.context.writeNewLine(self.trans)
self.trans.write(JSON_OBJECT_END)
def readJSONSyntaxChar(self, char):
ch = self.reader.read()
if ch != char:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Unexpected character: %s" % ch)
def readJSONString(self, skipContext=False):
self.skipWhitespace()
if skipContext is False:
self.context.read(self.reader)
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_STRING_DELIMITER)
string = []
while True:
ch = self.reader.read()
if ch == JSON_STRING_DELIMITER:
break
if ch == JSON_BACKSLASH:
ch = self.reader.read()
if ch == b'u':
self.readJSONSyntaxChar(JSON_ZERO_CHAR)
self.readJSONSyntaxChar(JSON_ZERO_CHAR)
data = self.trans.read(2)
if sys.version_info[0] >= 3 and isinstance(data, bytes):
ch = json.JSONDecoder().decode(
'"\\u00%s"' % str(data, 'utf-8')).encode('utf-8')
else:
ch = json.JSONDecoder().decode('"\\u00%s"' % data)
else:
idx = ESCAPE_CHARS.find(ch)
if idx == -1:
raise TProtocolException(
TProtocolException.INVALID_DATA,
"Expected control char")
ch = ESCAPE_CHAR_VALS[idx]
string.append(ch)
return b''.join(string)
def isJSONNumeric(self, ch):
return NUMERIC_CHAR.find(ch) >= 0
def readJSONNumericChars(self):
numeric = []
while True:
ch = self.reader.peek()
if self.isJSONNumeric(ch) is False:
break
numeric.append(self.reader.read())
return b''.join(numeric)
def readJSONInteger(self):
self.context.read(self.reader)
self.skipWhitespace()
if self.context.escapeNum():
self.readJSONSyntaxChar(JSON_STRING_DELIMITER)
numeric = self.readJSONNumericChars()
if self.context.escapeNum():
self.readJSONSyntaxChar(JSON_STRING_DELIMITER)
try:
return int(numeric)
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
def readJSONDouble(self):
self.context.read(self.reader)
self.skipWhitespace()
if self.reader.peek() == JSON_STRING_DELIMITER:
string = self.readJSONString(True)
try:
double = float(string)
if (self.context.escapeNum is False and
double != float('inf') and
double != float('-inf') and
double != float('nan')
):
raise TProtocolException(TProtocolException.INVALID_DATA,
"Numeric data unexpectedly quoted")
return double
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encountered in numeric data")
else:
try:
return float(self.readJSONNumericChars())
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encountered in numeric data")
def readJSONBase64(self):
string = self.readJSONString()
return b64decode(string)
def readJSONBool(self):
self.context.read(self.reader)
self.skipWhitespace()
if self.context.escapeNum():
self.readJSONSyntaxChar(JSON_STRING_DELIMITER)
if self.reader.peek() == b't':
true_string = b'true'
for i in range(4):
if self.reader.read() != true_string[i:i+1]:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encountered in bool")
boolVal = True
elif self.reader.peek() == b'f':
false_string = b'false'
for i in range(5):
if self.reader.read() != false_string[i:i+1]:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encountered in bool")
boolVal = False
else:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encountered in bool")
if self.context.escapeNum():
self.readJSONSyntaxChar(JSON_STRING_DELIMITER)
return boolVal
def readJSONArrayStart(self):
self.context.read(self.reader)
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_ARRAY_START)
self.pushContext(self.list_context_class(
protocol=self,
indentLevel=len(self.contexts)))
def readJSONArrayEnd(self):
self.popContext()
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_ARRAY_END)
def readJSONMapStart(self):
self.context.read(self.reader)
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_OBJECT_START)
self.pushContext(self.list_context_class(
protocol=self,
indentLevel=len(self.contexts)))
def readJSONMapEnd(self):
self.popContext()
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_OBJECT_END)
def readJSONObjectStart(self):
self.context.read(self.reader)
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_OBJECT_START)
self.pushContext(self.pair_context_class(
protocol=self,
indentLevel=len(self.contexts)))
def readJSONObjectEnd(self):
self.popContext()
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_OBJECT_END)
class TSimpleJSONProtocol(TSimpleJSONProtocolBase):
"""
JSON protocol implementation for Thrift. This protocol is write-only, and
produces a simple output format that conforms to the JSON standard.
"""
def writeMessageBegin(self, name, messageType, seqId):
self.writeJSONArrayStart()
self.context.writeNewLine(self.trans)
self.writeJSONInteger(THRIFT_VERSION_1)
self.writeJSONString(name)
self.writeJSONInteger(messageType)
self.writeJSONInteger(seqId)
def writeMessageEnd(self):
self.writeJSONArrayEnd()
def writeStructBegin(self, name):
self.writeJSONObjectStart()
def writeStructEnd(self):
self.writeJSONObjectEnd()
def writeFieldBegin(self, name, fieldType, fieldId):
self.context.write(self.trans)
self.popContext()
self.pushContext(self.pair_context_class(
protocol=self,
indentLevel=len(self.contexts)))
self.context.writeNewLine(self.trans)
self.writeJSONString(name)
def writeFieldEnd(self):
return
def writeFieldStop(self):
return
def writeMapBegin(self, keyType, valType, size):
self.writeJSONMapStart()
self.context.writeNewLine(self.trans)
self.pushContext(self.pair_context_class(
protocol=self,
indentLevel=len(self.contexts) - 1, isMapPair=True))
def writeMapEnd(self):
self.popContext()
self.writeJSONMapEnd()
def writeListBegin(self, elemType, size):
self.writeJSONArrayStart()
self.context.writeNewLine(self.trans)
def writeListEnd(self):
self.writeJSONArrayEnd()
def writeSetBegin(self, elemType, size):
self.writeJSONArrayStart()
self.context.writeNewLine(self.trans)
def writeSetEnd(self):
self.writeJSONArrayEnd()
def writeBool(self, val):
self.writeJSONBool(val)
def writeByte(self, byte):
self.writeJSONInteger(byte)
def writeI16(self, i16):
self.writeJSONInteger(i16)
def writeI32(self, i32):
self.writeJSONInteger(i32)
def writeI64(self, i64):
self.writeJSONInteger(i64)
def writeDouble(self, d):
self.writeJSONDouble(d)
def writeFloat(self, f):
self.writeJSONDouble(f)
def writeString(self, outStr):
self.writeJSONString(outStr)
def writeBinary(self, outStr):
self.writeJSONBase64(outStr)
def readMessageBegin(self):
self.readJSONArrayStart()
self.skipWhitespace()
if self.readJSONInteger() != THRIFT_VERSION_1:
raise TProtocolException(TProtocolException.BAD_VERSION,
"Message contained bad version.")
name = self.readJSONString()
mtype = self.readJSONInteger()
seqid = self.readJSONInteger()
return (name, mtype, seqid)
def readMessageEnd(self):
self.readJSONArrayEnd()
def readStructBegin(self):
self.readJSONObjectStart()
# This is needed because of the very first call
if self.spec.nextSpec is not None:
self.pushSpec(self.spec.getNextSpec())
def readStructEnd(self):
self.readJSONObjectEnd()
self.popSpec()
def readFieldBegin(self):
self.skipWhitespace()
ch = self.reader.peek()
if ch == JSON_OBJECT_END:
return (None, TType.STOP, 0)
self.context.read(self.reader)
self.popContext()
self.pushContext(self.pair_context_class(
protocol=self,
indentLevel=len(self.contexts)))
self.skipWhitespace()
fname = self.readJSONString()
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_PAIR_SEPARATOR)
self.context.skipColon = True
self.skipWhitespace()
if self.reader.peek() == b'n':
for i in range(4):
if self.reader.read() != b'null'[i:i + 1]:
raise TProtocolException(
TProtocolException.INVALID_DATA,
"Bad data encountered in null",
)
self.context.read(self.reader) # "consume" the colon we skipped
return self.readFieldBegin()
assert isinstance(self.spec, StructSpec)
return self.spec.readFieldBegin(
fname,
self.guessTypeIdFromFirstByte)
def readFieldEnd(self):
return
def readFieldStop(self):
return
def readNumber(self):
return self.readJSONInteger()
readByte = readNumber
readI16 = readNumber
readI32 = readNumber
readI64 = readNumber
def readDouble(self):
return self.readJSONDouble()
def readFloat(self):
return self.readJSONDouble()
def readString(self):
return self.readJSONString()
def readBinary(self):
return self.readJSONBase64()
def readBool(self):
return self.readJSONBool()
def readMapBegin(self):
self.readJSONMapStart()
self.skipWhitespace()
self.pushContext(self.pair_context_class(
protocol=self,
indentLevel=len(self.contexts) - 1, isMapPair=True))
self.pushSpec(self.spec.getNextSpec())
return self.spec.readMapBegin()
def readMapEnd(self):
self.popContext()
self.readJSONMapEnd()
self.popSpec()
def peekMap(self):
self.skipWhitespace()
return self.reader.peek() != JSON_OBJECT_END
def peekList(self):
self.skipWhitespace()
return self.reader.peek() != JSON_ARRAY_END
peekSet = peekList
def readListBegin(self):
self.skipWhitespace()
self.readJSONArrayStart()
self.pushSpec(self.spec.getNextSpec())
return self.spec.readListBegin()
readSetBegin = readListBegin
def readListEnd(self):
self.skipWhitespace()
self.readJSONArrayEnd()
self.popSpec()
readSetEnd = readListEnd
class TSimpleJSONProtocolFactory:
def getProtocol(self, trans, spec=None):
prot = TSimpleJSONProtocol(trans, spec)
return prot
| 32.056769
| 86
| 0.592017
|
db5d42b472b560c53d97b07909720521b7db6871
| 3,084
|
py
|
Python
|
source/COMRegistrationFixes/__init__.py
|
SWEN-712/screen-reader-brandonp728
|
e30c25ad2d10ce632fac0548696a61a872328f59
|
[
"bzip2-1.0.6"
] | null | null | null |
source/COMRegistrationFixes/__init__.py
|
SWEN-712/screen-reader-brandonp728
|
e30c25ad2d10ce632fac0548696a61a872328f59
|
[
"bzip2-1.0.6"
] | null | null | null |
source/COMRegistrationFixes/__init__.py
|
SWEN-712/screen-reader-brandonp728
|
e30c25ad2d10ce632fac0548696a61a872328f59
|
[
"bzip2-1.0.6"
] | null | null | null |
# -*- coding: UTF-8 -*-
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2018 NV Access Limited
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""Utilities to re-register particular system COM interfaces needed by NVDA."""
import os
import subprocess
import winVersion
from logHandler import log
# Particular 64 bit / 32 bit system paths
systemRoot=os.path.expandvars('%SYSTEMROOT%')
system32=os.path.join(systemRoot,'system32')
sysWow64=os.path.join(systemRoot,'syswow64')
systemDrive=os.path.expandvars('%SYSTEMDRIVE%\\')
programFiles=os.path.join(systemDrive,'program files')
programFilesX86=os.path.join(systemDrive,'program files (x86)')
def registerServer(fileName,wow64=False):
"""
Registers the COM proxy dll with the given file name
Using regsvr32.
@param fileName: the path to the dll
@type fileName: str
@param wow64: If true then the 32 bit (wow64) version of regsvr32 will be used.
@type wow64: bool
"""
regsvr32=os.path.join(sysWow64 if wow64 else system32,'regsvr32.exe')
try:
subprocess.check_call([regsvr32,'/s',fileName])
except subprocess.CalledProcessError as e:
log.error("Error registering %s, %s"%(fileName,e))
else:
log.debug("Registered %s"%fileName)
def applyRegistryPatch(fileName,wow64=False):
"""
Applies the registry patch with the given file name
using regedit.
@param fileName: the path to the dll
@type fileName: str
"""
regedit=os.path.join(sysWow64 if wow64 else systemRoot,'regedit.exe')
try:
subprocess.check_call([regedit,'/s',fileName])
except subprocess.CalledProcessError as e:
log.error("Error applying registry patch: %s with %s, %s"%(fileName,regedit,e))
else:
log.debug("Applied registry patch: %s with %s"%(fileName,regedit))
def fixCOMRegistrations():
"""
Registers most common COM proxies, in case they had accidentally been unregistered or overwritten by 3rd party software installs/uninstalls.
"""
is64bit=os.environ.get("PROCESSOR_ARCHITEW6432","").endswith('64')
OSMajorMinor=winVersion.winVersion[:2]
log.debug("Fixing COM registration for Windows %s.%s, %s"%(OSMajorMinor[0],OSMajorMinor[1],"64 bit" if is64bit else "32 bit"))
# Commands taken from NVDA issue #2807 comment https://github.com/nvaccess/nvda/issues/2807#issuecomment-320149243
# OLEACC (MSAA) proxies
applyRegistryPatch(os.path.join('COMRegistryFixes','oleaccProxy.reg'))
if is64bit:
applyRegistryPatch(os.path.join('COMRegistryFixes','oleaccProxy.reg'),wow64=True)
# IDispatch and other common OLE interfaces
registerServer(os.path.join(system32,'oleaut32.dll'))
registerServer(os.path.join(system32,'actxprxy.dll'))
if is64bit:
registerServer(os.path.join(sysWow64,'oleaut32.dll'),wow64=True)
registerServer(os.path.join(sysWow64,'actxprxy.dll'),wow64=True)
# IServiceProvider on windows 7 can become unregistered
if OSMajorMinor==(6,1): # Windows 7
registerServer(os.path.join(programFiles,'Internet Explorer','ieproxy.dll'))
if is64bit:
registerServer(os.path.join(programFilesX86,'Internet Explorer','ieproxy.dll'),wow64=True)
| 40.051948
| 141
| 0.759728
|
df64e239a2e467c8e4429dbeb7039f1aa9965ecc
| 1,141
|
py
|
Python
|
samza-test/src/main/python/integration_tests.py
|
xiefan46/samza
|
daeecf51153c3984da90bfabdba69cd97449c86d
|
[
"Apache-2.0"
] | 860
|
2015-01-25T17:00:10.000Z
|
2022-03-30T10:15:32.000Z
|
samza-test/src/main/python/integration_tests.py
|
xiefan46/samza
|
daeecf51153c3984da90bfabdba69cd97449c86d
|
[
"Apache-2.0"
] | 837
|
2015-07-23T18:48:57.000Z
|
2022-03-14T22:49:57.000Z
|
samza-test/src/main/python/integration_tests.py
|
xiefan46/samza
|
daeecf51153c3984da90bfabdba69cd97449c86d
|
[
"Apache-2.0"
] | 418
|
2015-01-29T18:21:24.000Z
|
2022-03-26T13:31:00.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
dir = os.path.dirname(os.path.abspath(__file__))
test = {
'deployment_code': os.path.join(dir, 'deployment.py'),
'perf_code': os.path.join(dir, 'perf.py'),
'configs_directory': os.path.join(dir, 'configs'),
'test_code': [
os.path.join(dir, 'tests', 'smoke_tests.py'),
os.path.join(dir, 'tests', 'performance_tests.py'),
],
}
| 36.806452
| 62
| 0.734443
|
13802923fe01cec546d2888a939e947709686031
| 170,188
|
py
|
Python
|
server/bottle.py
|
Perlatecnica/Blockly4Nucleo
|
de917fdcd7df0019190ee6ca032d57287c245c12
|
[
"MIT"
] | 23
|
2017-01-03T02:35:57.000Z
|
2020-01-24T06:41:57.000Z
|
server/bottle.py
|
Perlatecnica/Blockly4Nucleo
|
de917fdcd7df0019190ee6ca032d57287c245c12
|
[
"MIT"
] | 15
|
2017-02-16T04:31:57.000Z
|
2021-03-25T21:41:08.000Z
|
server/bottle.py
|
Perlatecnica/Blockly4Nucleo
|
de917fdcd7df0019190ee6ca032d57287c245c12
|
[
"MIT"
] | 3
|
2017-02-15T03:31:47.000Z
|
2018-11-12T16:19:03.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2009-2018, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# Command-line interface ######################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to _main() is at the end of the file.
def _cli_parse(args): # pragma: no coverage
from argparse import ArgumentParser
parser = ArgumentParser(prog=args[0], usage="%(prog)s [options] package.module:app")
opt = parser.add_argument
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
opt('app', help='WSGI app entry point.', nargs='?')
cli_args = parser.parse_args(args[1:])
return cli_args, parser
def _cli_patch(cli_args): # pragma: no coverage
parsed_args, _ = _cli_parse(cli_args)
opts = parsed_args
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ##########################################
###############################################################################
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, tempfile, threading, time, warnings, weakref, hashlib
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
try:
from ujson import dumps as json_dumps, loads as json_lds
except ImportError:
from json import dumps as json_dumps, loads as json_lds
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(param.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
try:
from inspect import getfullargspec
def getargspec(func):
spec = getfullargspec(func)
kwargs = makelist(spec[0]) + makelist(spec.kwonlyargs)
return kwargs, spec[1], spec[2], spec[3]
except ImportError:
from inspect import getargspec
py3k = sys.version_info.major > 2
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie, Morsel, CookieError
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
import configparser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie, Morsel, CookieError
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
import ConfigParser as configparser
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
exec(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
if isinstance(s, unicode):
return s.encode(enc)
return b'' if s is None else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
return unicode("" if s is None else s)
tonat = touni if py3k else tob
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(major, minor, cause, fix):
text = "Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\n"\
"Cause: %s\n"\
"Fix: %s\n" % (major, minor, cause, fix)
if DEBUG == 'strict':
raise DeprecationWarning(text)
warnings.warn(text, DeprecationWarning, stacklevel=3)
return DeprecationWarning(text)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
update_wrapper(self, func)
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events #######################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if g[2] is not None:
depr(0, 13, "Use of old route syntax.",
"Use <name> instead of :name in routes.")
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError as E:
raise RouteBuildError('Missing URL argument: %r' % E.args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(method)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = app.config._make_overlay()
self.config.load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
depr(0, 13, "Route.get_config() is deprectated.",
"The Route.config property already includes values from the"
" application config for missing keys. Access it directly.")
return self.config.get(key, default)
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
@lazy_attribute
def _global_config(cls):
cfg = ConfigDict()
cfg.meta_set('catchall', 'validate', bool)
return cfg
def __init__(self, **kwargs):
#: A :class:`ConfigDict` for app specific configuration.
self.config = self._global_config._make_overlay()
self.config._add_change_listener(
functools.partial(self.trigger_hook, 'config'))
self.config.update({
"catchall": True
})
if kwargs.get('catchall') is False:
depr(0, 13, "Bottle(catchall) keyword argument.",
"The 'catchall' setting is now part of the app "
"configuration. Fix: `app.config['catchall'] = False`")
self.config['catchall'] = False
if kwargs.get('autojson') is False:
depr(0, 13, "Bottle(autojson) keyword argument.",
"The 'autojson' setting is now part of the app "
"configuration. Fix: `app.config['json.enable'] = False`")
self.config['json.disable'] = True
self._mounts = []
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = {'after_request'}
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def _mount_wsgi(self, prefix, app, **options):
segments = [p for p in prefix.split('/') if p]
if not segments:
raise ValueError('WSGI applications cannot be mounted to "/".')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def _mount_app(self, prefix, app, **options):
if app in self._mounts or '_mount.app' in app.config:
depr(0, 13, "Application mounted multiple times. Falling back to WSGI mount.",
"Clone application before mounting to a different location.")
return self._mount_wsgi(prefix, app, **options)
if options:
depr(0, 13, "Unsupported mount options. Falling back to WSGI mount.",
"Do not specify any route options when mounting bottle application.")
return self._mount_wsgi(prefix, app, **options)
if not prefix.endswith("/"):
depr(0, 13, "Prefix must end in '/'. Falling back to WSGI mount.",
"Consider adding an explicit redirect from '/prefix' to '/prefix/' in the parent application.")
return self._mount_wsgi(prefix, app, **options)
self._mounts.append(app)
app.config['_mount.prefix'] = prefix
app.config['_mount.app'] = self
for route in app.routes:
route.rule = prefix + route.rule.lstrip('/')
self.add_route(route)
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
parent_app.mount('/prefix/', child_app)
:param prefix: path prefix or `mount-point`.
:param app: an instance of :class:`Bottle` or a WSGI application.
Plugins from the parent application are not applied to the routes
of the mounted child application. If you need plugins in the child
application, install them separately.
While it is possible to use path wildcards within the prefix path
(:class:`Bottle` childs only), it is highly discouraged.
The prefix path must end with a slash. If you want to access the
root of the child application via `/prefix` in addition to
`/prefix/`, consider adding a route with a 307 redirect to the
parent application.
"""
if not prefix.startswith('/'):
raise ValueError("Prefix must start with '/'")
if isinstance(app, Bottle):
return self._mount_app(prefix, app, **options)
else:
return self._mount_wsgi(prefix, app, **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500, callback=None):
""" Register an output handler for a HTTP error code. Can
be used as a decorator or called directly ::
def error_handler_500(error):
return 'error_handler_500'
app.error(code=500, callback=error_handler_500)
@app.error(404)
def error_handler_404(error):
return 'error_handler_404'
"""
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
self.error_handler[int(code)] = callback
return callback
return decorator(callback) if callback else decorator
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res, template_settings=dict(name='__ERROR_PAGE_TEMPLATE')))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8', 'ignore')
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
while True: # Remove in 0.14 together with RouteReset
out = None
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
out = route.call(**args)
break
except HTTPResponse as E:
out = E
break
except RouteReset:
depr(0, 13, "RouteReset exception deprecated",
"Call route.call() after route.reset() and "
"return the result.")
route.reset()
continue
finally:
if isinstance(out, HTTPResponse):
out.apply(response)
try:
self.trigger_hook('after_request')
except HTTPResponse as E:
out = E
out.apply(response)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
environ['wsgi.errors'].flush()
out = HTTPError(500, "Internal Server Error", E, stacktrace)
out.apply(response)
return out
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse as E:
first = E
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as error:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', error, format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(E)), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
environ['wsgi.errors'].flush()
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError("Attribute %s already defined. Plugin conflict?" % name)
self.__dict__[name] = value
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None, digestmod=hashlib.sha256):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret:
# See BaseResponse.set_cookie for details on signed cookies.
if value and value.startswith('!') and '?' in value:
sig, msg = map(tob, value[1:].split('?', 1))
hash = hmac.new(tob(secret), msg, digestmod=digestmod).digest()
if _lscmp(sig, base64.b64encode(hash)):
dst = pickle.loads(base64.b64decode(msg))
if dst and dst[0] == key:
return dst[1]
return default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json`` or
``application/json-rpc``, this property holds the parsed content
of the request body. Only requests smaller than :attr:`MEMFILE_MAX`
are processed to avoid memory exhaustion.
Invalid JSON raises a 400 error response.
"""
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype in ('application/json', 'application/json-rpc'):
b = self._get_body_string()
if not b:
return None
try:
return json_loads(b)
except (ValueError, TypeError):
raise HTTPError(400, 'Invalid JSON')
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
key = 'bottle.request.ext.%s' % name
if key in self.environ:
raise AttributeError("Attribute already defined: %s" % name)
self.environ[key] = value
def __delattr__(self, name):
try:
del self.environ['bottle.request.ext.%s' % name]
except KeyError:
raise AttributeError("Attribute not defined: %s" % name)
def _hkey(key):
if '\n' in key or '\r' in key or '\0' in key:
raise ValueError("Header names must not contain control characters: %r" % key)
return key.title().replace('_', '-')
def _hval(value):
value = tonat(value)
if '\n' in value or '\r' in value or '\0' in value:
raise ValueError("Header value must not contain control characters: %r" % value)
return value
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=None, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.get_header(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj[self.name] = self.writer(value) if self.writer else value
def __delete__(self, obj):
del obj[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: frozenset(('Content-Type', 'Content-Length')),
304: frozenset(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
cookies = copy._cookies = SimpleCookie()
for k,v in self._cookies.items():
cookies[k] = v.value
cookies[k].update(v) # also copy cookie attributes
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [_hval(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [_hval(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(_hval(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', _hval(c.OutputString())))
if py3k:
out = [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, digestmod=hashlib.sha256, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param maxage: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
:param samesite: disables third-party use for a cookie.
Allowed attributes: `lax` and `strict`.
In strict mode the cookie will never be sent.
In lax mode the cookie is only sent with a top-level GET request.
If neither `expires` nor `maxage` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Pickle is a potentially dangerous format. If an attacker
gains access to the secret key, he could forge cookies that execute
code on server side if unpickeld. Using pickle is discouraged and
support for it will be removed in later versions of bottle.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
# Monkey-patch Cookie lib to support 'SameSite' parameter
# https://tools.ietf.org/html/draft-west-first-party-cookies-07#section-4.1
Morsel._reserved.setdefault('samesite', 'SameSite')
if secret:
if not isinstance(value, basestring):
depr(0, 13, "Pickling of arbitrary objects into cookies is "
"deprecated.", "Only store strings in cookies. "
"JSON strings are fine, too.")
encoded = base64.b64encode(pickle.dumps([name, value], -1))
sig = base64.b64encode(hmac.new(tob(secret), encoded,
digestmod=digestmod).digest())
value = touni(tob('!') + sig + tob('?') + encoded)
elif not isinstance(value, basestring):
raise TypeError('Secret key required for non-string cookies.')
# Cookie size plus options must not exceed 4kb.
if len(name) + len(value) > 3800:
raise ValueError('Content does not fit into a cookie.')
self._cookies[name] = value
for key, value in options.items():
if key in ('max_age', 'maxage'): # 'maxage' variant added in 0.13
key = 'max-age'
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('same_site', 'samesite'): # 'samesite' variant added in 0.13
key = 'samesite'
if value.lower() not in ('lax', 'strict'):
raise CookieError("Invalid value samesite=%r (expected 'lax' or 'strict')" % (key,))
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **more_headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def setup(self, app):
app.config._define('json.enable', default=True, validate=bool,
help="Enable or disable automatic dict->json filter.")
app.config._define('json.ascii', default=False, validate=bool,
help="Use only 7-bit ASCII characters in output.")
app.config._define('json.indent', default=True, validate=bool,
help="Add whitespace to make json more readable.")
app.config._define('json.dump_func', default=None,
help="If defined, use this function to transform"
" dict into json. The other options no longer"
" apply.")
def apply(self, callback, route):
dumps = self.json_dumps
if not self.json_dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPResponse as resp:
rv = resp
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def setup(self, app):
app.tpl = self
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [_hval(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(_hval(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [_hval(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in (_hkey(n) for n in names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
_UNSET = object()
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, overlays and more.
This dict-like class is heavily optimized for read access. All read-only
methods as well as item access should be as fast as the built-in dict.
"""
__slots__ = ('_meta', '_change_listener', '_overlays', '_virtual_keys', '_source', '__weakref__')
def __init__(self):
self._meta = {}
self._change_listener = []
#: Weak references of overlays that need to be kept in sync.
self._overlays = []
#: Config that is the source for this overlay.
self._source = None
#: Keys of values copied from the source (values we do not own)
self._virtual_keys = set()
def load_module(self, path, squash=True):
"""Load values from a Python module.
Example modue ``config.py``::
DEBUG = True
SQLITE = {
"db": ":memory:"
}
>>> c = ConfigDict()
>>> c.load_module('config')
{DEBUG: True, 'SQLITE.DB': 'memory'}
>>> c.load_module("config", False)
{'DEBUG': True, 'SQLITE': {'DB': 'memory'}}
:param squash: If true (default), dictionary values are assumed to
represent namespaces (see :meth:`load_dict`).
"""
config_obj = load(path)
obj = {key: getattr(config_obj, key) for key in dir(config_obj)
if key.isupper()}
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self
def load_config(self, filename, **options):
""" Load values from an ``*.ini`` style config file.
A configuration file consists of sections, each led by a
``[section]`` header, followed by key/value entries separated by
either ``=`` or ``:``. Section names and keys are case-insensitive.
Leading and trailing whitespace is removed from keys and values.
Values can be omitted, in which case the key/value delimiter may
also be left out. Values can also span multiple lines, as long as
they are indented deeper than the first line of the value. Commands
are prefixed by ``#`` or ``;`` and may only appear on their own on
an otherwise empty line.
Both section and key names may contain dots (``.``) as namespace
separators. The actual configuration parameter name is constructed
by joining section name and key name together and converting to
lower case.
The special sections ``bottle`` and ``ROOT`` refer to the root
namespace and the ``DEFAULT`` section defines default values for all
other sections.
With Python 3, extended string interpolation is enabled.
:param filename: The path of a config file, or a list of paths.
:param options: All keyword parameters are passed to the underlying
:class:`python:configparser.ConfigParser` constructor call.
"""
options.setdefault('allow_no_value', True)
if py3k:
options.setdefault('interpolation',
configparser.ExtendedInterpolation())
conf = configparser.ConfigParser(**options)
conf.read(filename)
for section in conf.sections():
for key in conf.options(section):
value = conf.get(section, key)
if section not in ['bottle', 'ROOT']:
key = section + '.' + key
self[key.lower()] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, basestring):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
>>> c = ConfigDict()
>>> c.update('some.namespace', key='value')
"""
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
self._virtual_keys.discard(key)
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
for overlay in self._iter_overlays():
overlay._set_virtual(key, value)
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
if key in self._virtual_keys:
raise KeyError("Virtual keys cannot be deleted: %s" % key)
if self._source and key in self._source:
# Not virtual, but present in source -> Restore virtual value
dict.__delitem__(self, key)
self._set_virtual(key, self._source[key])
else: # not virtual, not present in source. This is OUR value
self._on_change(key, None)
dict.__delitem__(self, key)
for overlay in self._iter_overlays():
overlay._delete_virtual(key)
def _set_virtual(self, key, value):
""" Recursively set or update virtual keys. Do nothing if non-virtual
value is present. """
if key in self and key not in self._virtual_keys:
return # Do nothing for non-virtual keys.
self._virtual_keys.add(key)
if key in self and self[key] is not value:
self._on_change(key, value)
dict.__setitem__(self, key, value)
for overlay in self._iter_overlays():
overlay._set_virtual(key, value)
def _delete_virtual(self, key):
""" Recursively delete virtual entry. Do nothing if key is not virtual.
"""
if key not in self._virtual_keys:
return # Do nothing for non-virtual keys.
if key in self:
self._on_change(key, None)
dict.__delitem__(self, key)
self._virtual_keys.discard(key)
for overlay in self._iter_overlays():
overlay._delete_virtual(key)
def _on_change(self, key, value):
for cb in self._change_listener:
if cb(self, key, value):
return True
def _add_change_listener(self, func):
self._change_listener.append(func)
return func
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. """
self._meta.setdefault(key, {})[metafield] = value
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
def _define(self, key, default=_UNSET, help=_UNSET, validate=_UNSET):
""" (Unstable) Shortcut for plugins to define own config parameters. """
if default is not _UNSET:
self.setdefault(key, default)
if help is not _UNSET:
self.meta_set(key, 'help', help)
if validate is not _UNSET:
self.meta_set(key, 'validate', validate)
def _iter_overlays(self):
for ref in self._overlays:
overlay = ref()
if overlay is not None:
yield overlay
def _make_overlay(self):
""" (Unstable) Create a new overlay that acts like a chained map: Values
missing in the overlay are copied from the source map. Both maps
share the same meta entries.
Entries that were copied from the source are called 'virtual'. You
can not delete virtual keys, but overwrite them, which turns them
into non-virtual entries. Setting keys on an overlay never affects
its source, but may affect any number of child overlays.
Other than collections.ChainMap or most other implementations, this
approach does not resolve missing keys on demand, but instead
actively copies all values from the source to the overlay and keeps
track of virtual and non-virtual keys internally. This removes any
lookup-overhead. Read-access is as fast as a build-in dict for both
virtual and non-virtual keys.
Changes are propagated recursively and depth-first. A failing
on-change handler in an overlay stops the propagation of virtual
values and may result in an partly updated tree. Take extra care
here and make sure that on-change handlers never fail.
Used by Route.config
"""
# Cleanup dead references
self._overlays[:] = [ref for ref in self._overlays if ref() is not None]
overlay = ConfigDict()
overlay._meta = self._meta
overlay._source = self
self._overlays.append(weakref.ref(overlay))
for key in self:
overlay._set_virtual(key, self[key])
return overlay
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self.default
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
new_app = push
@property
def default(self):
try:
return self[-1]
except IndexError:
return self.push()
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
def get_header(self, name, default=None):
""" Return the value of a header within the mulripart part. """
return self.headers.get(name, default)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024, close=False):
""" Yield chunks from a range in a file, optionally closing it at the end.
No chunk is bigger than maxread. """
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part:
break
bytes -= len(part)
yield part
if close:
fp.close()
def static_file(filename, root,
mimetype=True,
download=False,
charset='UTF-8',
etag=None):
""" Open a file in a safe way and return an instance of :exc:`HTTPResponse`
that can be sent back to the client.
:param filename: Name or path of the file to send, relative to ``root``.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Provide the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset for files with a ``text/*`` mime-type.
(default: UTF-8)
:param etag: Provide a pre-computed ETag header. If set to ``False``,
ETag handling is disabled. (default: auto-generate ETag header)
While checking user input is always a good idea, this function provides
additional protection against malicious ``filename`` parameters from
breaking out of the ``root`` directory and leaking sensitive information
to an attacker.
Read-protected files or files outside of the ``root`` directory are
answered with ``403 Access Denied``. Missing files result in a
``404 Not Found`` response. Conditional requests (``If-Modified-Since``,
``If-None-Match``) are answered with ``304 Not Modified`` whenever
possible. ``HEAD`` and ``Range`` requests (used by download managers to
check or continue partial downloads) are also handled automatically.
"""
root = os.path.join(os.path.abspath(root), '')
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype is True:
if download and download is not True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if (mimetype[:5] == 'text/' or mimetype == 'application/javascript')\
and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download is True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
headers['Last-Modified'] = email.utils.formatdate(stats.st_mtime,
usegmt=True)
headers['Date'] = email.utils.formatdate(time.time(), usegmt=True)
getenv = request.environ.get
if etag is None:
etag = '%d:%d:%d:%d:%s' % (stats.st_dev, stats.st_ino, stats.st_mtime,
clen, filename)
etag = hashlib.sha1(tob(etag)).hexdigest()
if etag:
headers['ETag'] = etag
check = getenv('HTTP_IF_NONE_MATCH')
if check and check == etag:
return HTTPResponse(status=304, **headers)
ims = getenv('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
range_header = getenv('HTTP_RANGE')
if range_header:
ranges = list(parse_range_header(range_header, clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset, close=True)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
#: Header tokenizer used by _parse_http_header()
_hsplit = re.compile('(?:(?:"((?:[^"\\\\]+|\\\\.)*)")|([^;,=]+))([;,=]?)').findall
def _parse_http_header(h):
""" Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values
and parameters. For non-standard or broken input, this implementation may return partial results.
:param h: A header string (e.g. ``text/html,text/plain;q=0.9,*/*;q=0.8``)
:return: List of (value, params) tuples. The second element is a (possibly empty) dict.
"""
values = []
if '"' not in h: # INFO: Fast path without regexp (~2x faster)
for value in h.split(','):
parts = value.split(';')
values.append((parts[0].strip(), {}))
for attr in parts[1:]:
name, value = attr.split('=', 1)
values[-1][1][name.strip()] = value.strip()
else:
lop, key, attrs = ',', None, {}
for quoted, plain, tok in _hsplit(h):
value = plain.strip() if plain else quoted.replace('\\"', '"')
if lop == ',':
attrs = {}
values.append((value, attrs))
elif lop == ';':
if tok == '=':
key = value
else:
attrs[value] = ''
elif lop == '=' and key:
attrs[key] = value
key = None
lop = tok
return values
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key, digestmod=None):
""" Encode and sign a pickle-able object. Return a (byte) string """
depr(0, 13, "cookie_encode() will be removed soon.",
"Do not use this API directly.")
digestmod = digestmod or hashlib.sha256
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=digestmod).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key, digestmod=None):
""" Verify and decode an encoded string. Return an object or None."""
depr(0, 13, "cookie_decode() will be removed soon.",
"Do not use this API directly.")
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
digestmod = digestmod or hashlib.sha256
hashed = hmac.new(tob(key), msg, digestmod=digestmod).digest()
if _lscmp(sig[1:], base64.b64encode(hashed)):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
depr(0, 13, "cookie_is_encoded() will be removed soon.",
"Do not use this API directly.")
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
# Before you edit or add a server adapter, please read:
# - https://github.com/bottlepy/bottle/pull/647#issuecomment-60152870
# - https://github.com/bottlepy/bottle/pull/865#issuecomment-242795341
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
depr(0, 13, "The wsgi server part of cherrypy was split into a new "
"project called 'cheroot'.", "Use the 'cheroot' server "
"adapter instead of cherrypy.")
from cherrypy import wsgiserver # This will fail for CherryPy >= 9
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class CherootServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cheroot import wsgi
from cheroot.ssl import builtin
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.pop('certfile', None)
keyfile = self.options.pop('keyfile', None)
chainfile = self.options.pop('chainfile', None)
server = wsgi.Server(**self.options)
if certfile and keyfile:
server.ssl_adapter = builtin.BuiltinSSLAdapter(
certfile, keyfile, chainfile)
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet, **self.options)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
depr(0, 13, "AppEngineServer no longer required",
"Configure your application directly in your app.yaml")
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AsyncioServerAdapter(ServerAdapter):
""" Extend ServerAdapter for adding custom event loop """
def get_event_loop(self):
pass
class AiohttpServer(AsyncioServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def get_event_loop(self):
import asyncio
return asyncio.new_event_loop()
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = self.get_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AiohttpUVLoopServer(AiohttpServer):
"""uvloop
https://github.com/MagicStack/uvloop
"""
def get_event_loop(self):
import uvloop
return uvloop.new_event_loop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
CherootServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'cheroot': CherootServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'uvloop': AiohttpUVLoopServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets too old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(BottleException):
pass
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
raise depr(0, 12, "Empty template lookup path.", "Configure a template lookup path.")
if os.path.isabs(name):
raise depr(0, 12, "Use of absolute path for template name.",
"Refer to templates with names or paths relative to the lookup path.")
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.name)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
if name == self.filename:
fname = name
else:
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return (f.read().decode(self.encoding), fname, lambda: False)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
raise depr(0, 11, 'Unsupported template encodings.', 'Use utf-8 for templates.')
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup, syntax=self.syntax)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
exec(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''(
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
# add the flag in front of the regexp to avoid Deprecation warning (see Issue #949)
# verbose and dot-matches-newline mode
_re_tok = '(?mx)' + _re_tok
_re_inl = '(?mx)' + _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if syntax not in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line = _blk1
self.indent += 1
self.indent_mod -= 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line = _blk2
self.indent_mod -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
elif _end:
self.indent -= 1
self.indent_mod += 1
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
for dictarg in args[1:]:
kwargs.update(dictarg)
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[451] = "Unavailable For Legal Reasons" # RFC 7725
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
%%try:
%%exc = repr(e.exception)
%%except:
%%exc = '<unprintable %%s object>' %% type(e.exception).__name__
%%end
<pre>{{exc}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multi-threaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app now deferred until needed)
# BC: 0.6.4 and needed for run()
apps = app = default_app = AppStack()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
def _main(argv): # pragma: no coverage
args, parser = _cli_parse(argv)
def _cli_error(cli_msg):
parser.print_help()
_stderr('\nError: %s\n' % cli_msg)
sys.exit(1)
if args.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args.app:
_cli_error("No application entry point specified.")
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (args.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
config = ConfigDict()
for cfile in args.conf or []:
try:
if cfile.endswith('.json'):
with open(cfile, 'rb') as fp:
config.load_dict(json_loads(fp.read()))
else:
config.load_config(cfile)
except configparser.Error as parse_error:
_cli_error(parse_error)
except IOError:
_cli_error("Unable to read config file %r" % cfile)
except (UnicodeError, TypeError, ValueError) as error:
_cli_error("Unable to parse config file %r: %s" % (cfile, error))
for cval in args.param or []:
if '=' in cval:
config.update((cval.split('=', 1),))
else:
config[cval] = True
run(args.app,
host=host,
port=int(port),
server=args.server,
reloader=args.reload,
plugins=args.plugin,
debug=args.debug,
config=config)
if __name__ == '__main__': # pragma: no coverage
_main(sys.argv)
| 38.538949
| 117
| 0.581598
|
55dfc5c46fc0b1c07805669b0bc3eb1224dca2c8
| 6,308
|
py
|
Python
|
doc/source/conf.py
|
hugovk/smmap
|
f0b322afcf6934501bade7776c5331619485a06c
|
[
"BSD-3-Clause"
] | 41
|
2015-01-16T12:05:11.000Z
|
2021-10-15T05:57:53.000Z
|
doc/source/conf.py
|
hugovk/smmap
|
f0b322afcf6934501bade7776c5331619485a06c
|
[
"BSD-3-Clause"
] | 25
|
2015-01-07T12:44:54.000Z
|
2022-01-16T17:17:01.000Z
|
doc/source/conf.py
|
hugovk/smmap
|
f0b322afcf6934501bade7776c5331619485a06c
|
[
"BSD-3-Clause"
] | 19
|
2015-04-12T17:15:57.000Z
|
2022-03-15T21:32:38.000Z
|
#
# smmap documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 8 15:14:25 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../../'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'smmap'
copyright = '2011, Sebastian Thiel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8.0'
# The full version, including alpha/beta/rc tags.
release = '0.8.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'smmapdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'smmap.tex', 'smmap Documentation',
'Sebastian Thiel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| 32.348718
| 80
| 0.722416
|
c80fd2ba15effe3c21ce7d105cda1b815abb4c7b
| 1,829
|
py
|
Python
|
oc_chess_club/models/match.py
|
PabloLec/oc_chess_club
|
69a6ce3111afadce73710d314579af6e6f0cbce6
|
[
"MIT"
] | null | null | null |
oc_chess_club/models/match.py
|
PabloLec/oc_chess_club
|
69a6ce3111afadce73710d314579af6e6f0cbce6
|
[
"MIT"
] | null | null | null |
oc_chess_club/models/match.py
|
PabloLec/oc_chess_club
|
69a6ce3111afadce73710d314579af6e6f0cbce6
|
[
"MIT"
] | 1
|
2021-07-15T06:49:39.000Z
|
2021-07-15T06:49:39.000Z
|
class Match:
"""Model for match. All matches are associated with a round which is also associated with a tournament.
Attributes:
tournament_id (int): Unique id of the parent tournament.
round_id (int): Unique id of the parent round.
winner (int): Winner of the match. 1 for Player 1, 2 for Player 2, 0 for a draw and None if TBD.
id_num (int): Unique id of this match.
player_1 (Player): Arbitrary first player.
player_2 (Player): Arbitrary second player.
"""
def __init__(self, players: tuple, tournament_id: int, round_id: int, winner: int, id_num: int):
"""Constructor for Match.
Args:
players (tuple[Player]): The two participating players.
tournament_id (int): Unique id of the parent tournament.
round_id (int): Unique id of the parent round.
winner (int): Winner of the match. 1 for Player 1, 2 for Player 2, 0 for a draw and None if TBD.
id_num (int): Unique id of this match.
"""
self.tournament_id = tournament_id
self.round_id = round_id
self.winner = winner
self.id_num = id_num
self.player_1 = players[0]
self.player_2 = players[1]
def __str__(self):
stdout_content = " - {f_name_1} {l_name_1} ({elo_1}) vs {f_name_2} {l_name_2} ({elo_2})\n".format(
f_name_1=self.player_1.first_name,
l_name_1=self.player_1.last_name,
elo_1=self.player_1.elo,
f_name_2=self.player_2.first_name,
l_name_2=self.player_2.last_name,
elo_2=self.player_2.elo,
)
stdout_content += " Winner : {winner}\n".format(winner=self.winner)
stdout_content += " id : {id}\n".format(id=self.id_num)
return stdout_content
| 40.644444
| 109
| 0.616184
|
4e2fdbeecc62fbd4931e7d9d722f589af37b6cf9
| 7,834
|
py
|
Python
|
docs/conf.py
|
KevalChotaliya/EDS-Covid-19
|
f7e0bd600e2847a6004a50d81422bf6bc013fe9c
|
[
"FTL"
] | null | null | null |
docs/conf.py
|
KevalChotaliya/EDS-Covid-19
|
f7e0bd600e2847a6004a50d81422bf6bc013fe9c
|
[
"FTL"
] | null | null | null |
docs/conf.py
|
KevalChotaliya/EDS-Covid-19
|
f7e0bd600e2847a6004a50d81422bf6bc013fe9c
|
[
"FTL"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# EDS Covid-19 documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'EDS Covid-19 '
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'EDS Covid-19doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'EDS Covid-19.tex',
u'EDS Covid-19 Documentation',
u"Keval Chotaliya", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'EDS Covid-19', u'EDS Covid-19 Documentation',
[u"Keval Chotaliya"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'EDS Covid-19', u'EDS Covid-19 Documentation',
u"Keval Chotaliya", 'EDS Covid-19 ',
'Analysis of Covid-19 Data for different countries', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.97551
| 80
| 0.705514
|
439c78aaa87b39a4eac1f322ab0de26f3d600e3b
| 4,096
|
py
|
Python
|
retriever.py
|
raleighlittles/ArrisCableModemMonitoring
|
3d33f5ac24322a6403fec24be008cc23463d8be1
|
[
"MIT"
] | 15
|
2019-08-19T22:17:07.000Z
|
2021-10-07T06:35:32.000Z
|
retriever.py
|
raleighlittles/ArrisCableModemMonitoring
|
3d33f5ac24322a6403fec24be008cc23463d8be1
|
[
"MIT"
] | null | null | null |
retriever.py
|
raleighlittles/ArrisCableModemMonitoring
|
3d33f5ac24322a6403fec24be008cc23463d8be1
|
[
"MIT"
] | 7
|
2019-10-23T07:31:04.000Z
|
2022-01-17T07:00:39.000Z
|
import datetime
import re
import socket
import typing
import bs4
import requests
# TODO: Extract the IP address of the cable-modem to be a parameter passed through the command line
def make_page_request(url: str = "http://192.168.100.1") -> str:
"""
Makes a GET request to a specified URL and then returns the HTML of that page as a string
:param url:
:return:
"""
page = requests.get(url)
return page.content
def extract_table_data(status_html_string: str) -> typing.List:
"""
Parses a table in HTML form into a list of lists, where each inner list represents a row in that table.
:param status_html_string:
:return:
"""
beautifulsoup = bs4.BeautifulSoup(status_html_string, "html.parser")
status_table = beautifulsoup.find_all("table", attrs={'class' : 'simpleTable'})
return status_table
def construct_list_from_table_html(table_html: str) -> typing.List:
data = []
rows = table_html.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [el.text.strip() for el in cols]
data.append([element for element in cols if element])
return data
def parse_event_log(event_log_list) -> typing.List:
"""
Parses the event log taken in as a list -- mostly timestamp conversions and removing non-numeric values from the priority list
:param event_log_list:
:return: A list where all timestamps are replaced with UTC timestamps, and priority level is integer-type only
"""
parsed_event_log_list = []
for row_number, row_data in enumerate(event_log_list[1:]): # skip the header
parsed_event_log_element = []
# TODO : Change this to a list comprehension
if len(row_data) != 3:
continue
raw_time, raw_priority, description = row_data
if raw_time == "Time Not Established":
first_time_stamp = datetime.datetime.fromtimestamp(0).strftime('%Y-%m-%d %H:%M:%S')
parsed_event_log_element.append(first_time_stamp)
else:
parsed_timestamp = datetime.datetime.strptime(raw_time, '%a %b %d %H:%M:%S %Y').strftime(
"%Y-%m-%d %H:%M:%S")
parsed_event_log_element.append(parsed_timestamp)
priority_level = re.sub('[^[0-9]', '', raw_priority)
parsed_event_log_element.append(priority_level)
# raw_priority = priority_level
parsed_event_log_element.append(description)
parsed_event_log_list.append(parsed_event_log_element)
return parsed_event_log_list
def create_influx_ready_array(table_data: typing.List, direction: str) -> typing.List:
"""
Given a list of lists representing the table data, this functions converts them to the standard array of JSON objects that InfluxDB requires
:param table_data:
:return:
"""
measurements_array = [] # array of dicts
rows_list = table_data[1:]
column_headers_list = rows_list[0]
number_of_channels = len(rows_list) - 1
for channel_row_number, channel_row_data in enumerate(rows_list[1:]):
# start at column #2, first column only contains ID data
for value_index, value_to_report in enumerate(channel_row_data):
if value_index in range(0, 4):
# The values in the 0th-3rd columns aren't numeric
continue
measurement_dict = {}
# For some reason, Grafana doesn't play nice with using spaces in measurement keys?
measurement_dict["measurement"] = column_headers_list[value_index].replace(" ", "_")
measurement_dict["tags"] = {"host": socket.gethostname(),
"channel_direction": str(direction),
"channel_id": channel_row_data[3]}
measurement_dict["fields"] = {
"value": float(re.sub('[^[0-9]', '', value_to_report))}
measurement_dict["time"] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
measurements_array.append(measurement_dict)
return measurements_array
| 35.617391
| 144
| 0.656982
|
14028e3ca65236be785870f0cca3f66176e846fc
| 693
|
py
|
Python
|
version1/AIbird.py
|
CyrusEmber/FlappyBird
|
a0225de6fc4c152ba4f5e7e46e8f6bc613c065f2
|
[
"MIT"
] | 1
|
2021-11-15T04:58:46.000Z
|
2021-11-15T04:58:46.000Z
|
version1/AIbird.py
|
CyrusEmber/FlappyBird
|
a0225de6fc4c152ba4f5e7e46e8f6bc613c065f2
|
[
"MIT"
] | null | null | null |
version1/AIbird.py
|
CyrusEmber/FlappyBird
|
a0225de6fc4c152ba4f5e7e46e8f6bc613c065f2
|
[
"MIT"
] | null | null | null |
import bird
from settings import SPACE, JUMP_SPEED
from version1.CGP import create_pop
class AIBird(bird.Bird):
def __init__(self, individual=None, *args, **kwargs):
super(AIBird, self).__init__(*args, **kwargs)
self.individual = individual
self.individual.find_active_node()
self.score = 0
def update(self, dt):
self.score += 0.1
super(AIBird, self).update(dt)
if self.dead:
self.individual.score = self.score
def check_flap(self, x, y):
y_input = self.y - y - SPACE / 2 + 50
x_input = x - self.x
if self.individual.eval(x_input, y_input) > 0:
self.velocityY = JUMP_SPEED
| 28.875
| 57
| 0.616162
|
a5f84f80270e445d95b6f7e49f871b6bba9d9451
| 3,984
|
py
|
Python
|
oracle_eval/ideal_mask.py
|
sevagh/mss-oracle-experiments
|
991357fe44f5fcdfad98e187ed11f14e2fd316c5
|
[
"MIT"
] | 1
|
2021-05-10T21:55:26.000Z
|
2021-05-10T21:55:26.000Z
|
oracle_eval/ideal_mask.py
|
sevagh/mss-oracle-experiments
|
991357fe44f5fcdfad98e187ed11f14e2fd316c5
|
[
"MIT"
] | null | null | null |
oracle_eval/ideal_mask.py
|
sevagh/mss-oracle-experiments
|
991357fe44f5fcdfad98e187ed11f14e2fd316c5
|
[
"MIT"
] | null | null | null |
import sys
import os
import musdb
import gc
import itertools
import museval
import numpy as np
import functools
import argparse
import tqdm
import scipy
from scipy.signal import stft, istft
import json
from shared import TFTransform
from oracle import ideal_mask, ideal_mixphase, ideal_mask_fbin
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluate Ideal Ratio Mask'
)
parser.add_argument(
'--audio_dir',
nargs='?',
help='Folder where audio results are saved',
default=None,
)
parser.add_argument(
'--eval_dir',
nargs='?',
help='Folder where evaluation results are saved'
)
parser.add_argument(
'config_file',
help='json file with time-frequency (stft, cqt) evaluation configs',
)
args = parser.parse_args()
max_tracks = int(os.getenv('MUSDB_MAX_TRACKS', sys.maxsize))
# initiate musdb with validation tracks
mus = musdb.DB(subsets='train', split='valid', is_wav=True)
# accumulate all time-frequency configs to compare
tfs = []
with open(args.config_file) as jf:
config = json.load(jf)
tmp = None
for stft_win in config.get('stft_configs', {}).get('window_sizes', []):
tfs.append(
TFTransform(
44100,
transform_type="stft",
window=stft_win
)
)
for nsgt_conf in config.get('nsgt_configs', []):
tfs.append(
TFTransform(
44100,
transform_type="nsgt",
fscale=nsgt_conf['scale'],
fmin=nsgt_conf['fmin'],
fbins=nsgt_conf['bins'],
fgamma=nsgt_conf.get('gamma', 0.0),
sllen=nsgt_conf.get('sllen', 32768),
trlen=nsgt_conf.get('trlen', 8192),
)
)
masks = [
{'power': 1, 'binary': False},
{'power': 1, 'binary': False, 'phasemix': True},
#{'power': 1, 'binary': False, 'fbin': True},
#{'power': 2, 'binary': False}, #-- why test these since no NNs actually use them irl
#{'power': 1, 'binary': True},
#{'power': 2, 'binary': True},
]
mss_evaluations = list(itertools.product(mus.tracks[:max_tracks], tfs, masks))
for (track, tf, mask) in tqdm.tqdm(mss_evaluations):
N = track.audio.shape[0] # remember number of samples for future use
# construct mask name e.g. irm1, ibm2
mask_name = 'i'
if mask['binary']:
mask_name += 'b'
else:
mask_name += 'r'
mask_name += f"m{str(mask['power'])}"
if mask.get('phasemix', False):
mask_name = 'mpi'
if mask.get('fbin', False):
mask_name = 'fbin'
name = mask_name
if tf.name != '':
name += f'-{tf.name}'
est = None
est_path = os.path.join(args.eval_dir, f'{name}') if args.eval_dir else None
aud_path = os.path.join(args.audio_dir, f'{name}') if args.audio_dir else None
if not mask.get('phasemix', False) and not mask.get('fbin', False):
# ideal mask
est, _ = ideal_mask(
track,
tf,
mask['power'],
mask['binary'],
0.5,
eval_dir=est_path)
elif mask.get('phasemix', False):
est, _ = ideal_mixphase(
track,
tf,
eval_dir=est_path,
dur=7.4,
start=46.5
)
elif mask.get('fbin', False):
est, _ = ideal_mask_fbin(
track,
tf,
eval_dir=est_path)
gc.collect()
if args.audio_dir:
mus.save_estimates(est, track, aud_path)
| 28.457143
| 97
| 0.517068
|
d7c3c854aca855554eb8309cecac9ea125144719
| 196
|
py
|
Python
|
model/__init__.py
|
xiankgx/U-2-Net
|
ab3ce1c44135579c14716fe9594a355547c02182
|
[
"Apache-2.0"
] | null | null | null |
model/__init__.py
|
xiankgx/U-2-Net
|
ab3ce1c44135579c14716fe9594a355547c02182
|
[
"Apache-2.0"
] | null | null | null |
model/__init__.py
|
xiankgx/U-2-Net
|
ab3ce1c44135579c14716fe9594a355547c02182
|
[
"Apache-2.0"
] | null | null | null |
from .custom import CustomNet
from .discriminator import MultiScaleNLayerDiscriminator
from .u2net import U2NET, U2NETP, u2net_heavy, u2net_portable, u2net_standard
from .vgg import VGG19Features
| 39.2
| 77
| 0.857143
|
96e1055f7a91ea733ffdb6e5960812f9f0d56dea
| 288
|
py
|
Python
|
examples/spot/wallet/disable_fast_withdraw.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 512
|
2021-06-15T08:52:44.000Z
|
2022-03-31T09:49:53.000Z
|
examples/spot/wallet/disable_fast_withdraw.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 75
|
2021-06-20T13:49:50.000Z
|
2022-03-30T02:45:31.000Z
|
examples/spot/wallet/disable_fast_withdraw.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 156
|
2021-06-18T11:56:36.000Z
|
2022-03-29T16:34:22.000Z
|
#!/usr/bin/env python
import logging
from binance.spot import Spot as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
key = ""
secret = ""
spot_client = Client(key, secret, show_header=True)
logging.info(spot_client.disable_fast_withdraw())
| 20.571429
| 51
| 0.784722
|
1ad608d5bb6e871b6b4837d4c611a7283c397b89
| 1,554
|
py
|
Python
|
tests/sentry/incidents/test_receivers.py
|
lauryndbrown/sentry
|
c5304e303966566386f5e61df1b72624a30803b4
|
[
"BSD-3-Clause"
] | 1
|
2019-07-29T16:21:39.000Z
|
2019-07-29T16:21:39.000Z
|
tests/sentry/incidents/test_receivers.py
|
lauryndbrown/sentry
|
c5304e303966566386f5e61df1b72624a30803b4
|
[
"BSD-3-Clause"
] | 1
|
2020-11-05T14:54:44.000Z
|
2020-11-19T21:54:19.000Z
|
tests/sentry/incidents/test_receivers.py
|
lauryndbrown/sentry
|
c5304e303966566386f5e61df1b72624a30803b4
|
[
"BSD-3-Clause"
] | 1
|
2017-02-09T06:36:57.000Z
|
2017-02-09T06:36:57.000Z
|
from __future__ import absolute_import
from sentry.incidents.models import IncidentSuspectCommit
from sentry.models.commit import Commit
from sentry.models.release import Release
from sentry.models.releasecommit import ReleaseCommit
from sentry.models.repository import Repository
from sentry.signals import release_commits_updated
from sentry.testutils import TestCase
class HandleReleaseCommitsUpdatedTest(TestCase):
def test(self):
release = self.create_release(project=self.project, version='something')
self.repo = Repository.objects.create(
organization_id=self.organization.id,
name=self.organization.id,
)
release.set_commits([
{
'id': 'a' * 40,
'repository': self.repo.name,
'author_email': 'bob@example.com',
'author_name': 'Bob',
},
])
commit = Commit.objects.get(releasecommit__release=release)
incident = self.create_incident()
ReleaseCommit.objects.filter(release=release).delete()
IncidentSuspectCommit.objects.create(
incident=incident,
commit=commit,
order=1,
)
with self.tasks():
release_commits_updated.send_robust(
release=release,
removed_commit_ids=set([commit.id]),
added_commit_ids=set([]),
sender=Release,
)
assert not IncidentSuspectCommit.objects.filter(incident=incident).exists()
| 34.533333
| 87
| 0.636422
|
5e565568ecbf91ae2f2ea95c59bc54cfe4445ea1
| 4,947
|
py
|
Python
|
tests/test_cli/test_csr.py
|
lewoudar/certipie
|
af586d1f9e376f6192aa5f8c24d33417b1e47273
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cli/test_csr.py
|
lewoudar/certipie
|
af586d1f9e376f6192aa5f8c24d33417b1e47273
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cli/test_csr.py
|
lewoudar/certipie
|
af586d1f9e376f6192aa5f8c24d33417b1e47273
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from certipie.cli.main import cert
from certipie.core import create_private_key
from tests.helpers import assert_csr, skip_mac_os
@pytest.fixture()
def base_arguments() -> list[str]:
return [
'csr',
'-c',
'FR',
'-s',
'Ile-de-France',
'-C',
'Paris',
'-o',
'hell yeah',
'-n',
'foo.com',
'-a',
'foo.com,*.bar.com',
]
@pytest.mark.parametrize('country', ['c', 'CAM'])
def test_should_print_error_when_country_is_not_correct(runner, base_arguments, country):
base_arguments[2] = country
result = runner.invoke(cert, base_arguments)
assert result.exit_code == 2
assert 'country must be a 2 letters string' in result.output
@pytest.mark.parametrize('domain', ['4', 'foo'])
def test_should_print_error_when_common_name_is_not_correct(runner, base_arguments, domain):
base_arguments[10] = domain
result = runner.invoke(cert, base_arguments)
assert result.exit_code == 2
assert f'{domain} is not a valid domain name' in result.output
@pytest.mark.parametrize('alternative_name', ['4', 'foo'])
def test_should_print_error_when_alternative_name_is_not_correct(runner, base_arguments, alternative_name):
base_arguments[12] = f'bar.com,{alternative_name}'
result = runner.invoke(cert, base_arguments)
assert result.exit_code == 2
assert f'These items are not domain names: {[alternative_name]}' in result.output
def test_should_print_error_if_key_does_not_exist(runner, base_arguments):
base_arguments.extend(['-k', 'foo.key'])
result = runner.invoke(cert, base_arguments)
assert result.exit_code == 2
assert 'key' in result.output
assert 'foo.key' in result.output
def test_should_print_error_when_key_is_not_correct(tmp_path, runner, base_arguments):
key_path = tmp_path / 'key.pem'
key_path.write_text('fake private key')
base_arguments.extend(['-k', f'{key_path}'])
result = runner.invoke(cert, base_arguments, input='\n')
assert result.exit_code == 2
assert 'The key file is not valid or the algorithm used is unsupported.' in result.output
def test_should_print_error_if_directory_does_not_exist(runner, base_arguments):
base_arguments.extend(['-d', 'fake_dir'])
result = runner.invoke(cert, base_arguments)
assert result.exit_code == 2
assert 'directory' in result.output
assert 'fake_dir' in result.output
TO_PARAMETRIZE = (
'country_option',
'state_option',
'city_option',
'organization_option',
'common_name_option',
'alt_name_option',
)
@skip_mac_os
@pytest.mark.parametrize(
TO_PARAMETRIZE,
[
('-c', '-s', '-C', '-o', '-n', '-a'),
('--country', '--state', '--city', '--organization', '--name', '--alt-names'),
],
)
def test_should_create_csr_without_giving_private_key(
runner,
isolated_path,
country_option,
state_option,
city_option,
organization_option,
common_name_option,
alt_name_option,
):
result = runner.invoke(
cert,
[
'csr',
country_option,
'FR',
state_option,
'Ile-de-France',
city_option,
'Paris',
organization_option,
'hell yeah',
common_name_option,
'foo.com',
alt_name_option,
'foo.com,*.bar.com',
],
)
assert result.exit_code == 0
assert result.output == f'The certificate signing request has been successfully created in {isolated_path}\n'
paths = [path for path in isolated_path.iterdir()]
assert len(paths) == 2
assert_csr(paths)
@pytest.mark.parametrize(('filename_option', 'directory_option'), [('-f', '-d'), ('--filename', '--directory')])
def test_should_create_csr_with_given_private_key_and_passphrase(
runner, tmp_path, private_key, base_arguments, filename_option, directory_option
):
base_arguments.extend([filename_option, 'my_csr.pem', directory_option, tmp_path, '-k', private_key])
result = runner.invoke(cert, base_arguments, input='passphrase\n')
assert result.exit_code == 0
assert f'The certificate signing request has been successfully created in {tmp_path}\n' in result.output
paths = [path for path in tmp_path.iterdir()]
assert len(paths) == 2
assert_csr(paths, 'my_csr')
def test_should_create_csr_with_given_private_key_and_no_passphrase(runner, tmp_path, base_arguments):
key_path = tmp_path / 'key.pem'
create_private_key(f'{key_path}')
base_arguments.extend(['-d', tmp_path, '-k', key_path])
result = runner.invoke(cert, base_arguments, input='\n')
assert result.exit_code == 0
assert f'The certificate signing request has been successfully created in {tmp_path}\n' in result.output
paths = [path for path in tmp_path.iterdir()]
assert len(paths) == 2
assert_csr(paths)
| 30.164634
| 113
| 0.671316
|
fe46a29406644d7259140815a638fd67fb26d14c
| 7,949
|
py
|
Python
|
streamselect/adaptive_learning/discrete_segment.py
|
BenHals/streamselect
|
ca5e80f3a8a31a38ac52bccfd92528d73f387a6a
|
[
"BSD-3-Clause"
] | null | null | null |
streamselect/adaptive_learning/discrete_segment.py
|
BenHals/streamselect
|
ca5e80f3a8a31a38ac52bccfd92528d73f387a6a
|
[
"BSD-3-Clause"
] | null | null | null |
streamselect/adaptive_learning/discrete_segment.py
|
BenHals/streamselect
|
ca5e80f3a8a31a38ac52bccfd92528d73f387a6a
|
[
"BSD-3-Clause"
] | null | null | null |
""" A simple discrete segment adaptive learning system. """
from typing import Callable, Optional
from river.base import Classifier, DriftDetector
from streamselect.adaptive_learning import (
BaseAdaptiveLearner,
BaseBufferedAdaptiveLearner,
get_increasing_buffer_scheduler,
)
from streamselect.concept_representations import ConceptRepresentation
from streamselect.repository import RepresentationComparer, ValuationPolicy
from streamselect.states import State
from streamselect.utils import Observation
class DiscreteSegmentAL(BaseAdaptiveLearner):
"""A discrete segment adaptive learning system considers each
data stream segment with a contiguous concept to be distict from
and previous segment. This means previous states are always irrelevant
and adaptation can simply be constructing a new classifier.
As we do not need to consider previous states, we can set the repository size to 1."""
def __init__(
self,
classifier_constructor: Callable[[], Classifier],
representation_constructor: Callable[[int, int, str, int], ConceptRepresentation],
representation_comparer: RepresentationComparer,
drift_detector_constructor: Callable[[], DriftDetector],
representation_update_period: int = 1,
train_representation: bool = True,
representation_window_size: int = 1,
drift_detection_mode: str = "any",
) -> None:
"""
Parameters
----------
classifier_constructor: Callable[[], Classifier]
A function to generate a new classifier.
representation_constructor: Callable[[int, int, str, int], ConceptRepresentation]
A function to generate a new concept representation taking in:
representation_window_size, state_id, mode and update_period.
representation_comparer: RepresentationComparer
An object capable of calculating similarity between two representations.
drift_detector_constructor: Callable[[], DriftDetector]
A function to generate an object capable of detecting drift in a univariate stream.
representation_update_period: int
Default: 1
The number of timesteps between representation updates.
train_representation: bool
Whether or not new states train representations.
Must be set to automatically construct states.
representation_window_size: int
Default: 1
The number of observations to construct a concept representation over.
drift_detection_mode: str["any", "lower", "higher]
Default: "any"
How change is interpreted as concept drift.
"any": Any significant change in relevance is detected as drift.
"lower": Significant changes where the new value is lower than the mean is detected.
"higher": Significant changes where the new value is higher than the mean is detected.
"""
super().__init__(
classifier_constructor=classifier_constructor,
representation_constructor=representation_constructor,
representation_comparer=representation_comparer,
drift_detector_constructor=drift_detector_constructor,
representation_update_period=representation_update_period,
max_size=1,
valuation_policy=ValuationPolicy.FIFO,
train_representation=train_representation,
representation_window_size=representation_window_size,
construct_pair_representations=False,
prediction_mode="active",
background_state_mode=None,
drift_detection_mode=drift_detection_mode,
)
class BufferedDiscreteSegmentAL(BaseBufferedAdaptiveLearner):
"""A discrete segment adaptive learning system considers each
data stream segment with a contiguous concept to be distict from
and previous segment. This means previous states are always irrelevant
and adaptation can simply be constructing a new classifier.
As we do not need to consider previous states, we can set the repository size to 1.
Uses a buffer to delay learning from new observations."""
def __init__(
self,
classifier_constructor: Callable[[], Classifier],
representation_constructor: Callable[[int, int, str, int], ConceptRepresentation],
representation_comparer: RepresentationComparer,
drift_detector_constructor: Callable[[], DriftDetector],
representation_update_period: int = 1,
train_representation: bool = True,
representation_window_size: int = 1,
drift_detection_mode: str = "any",
buffer_timeout_max: float = 0.0,
buffer_timeout_scheduler: Callable[
[float, State, Optional[Observation]], float
] = get_increasing_buffer_scheduler(),
) -> None:
"""
Parameters
----------
classifier_constructor: Callable[[], Classifier]
A function to generate a new classifier.
representation_constructor: Callable[[int, int, str, int], ConceptRepresentation]
A function to generate a new concept representation taking in:
representation_window_size, state_id, mode and update_period.
representation_comparer: RepresentationComparer
An object capable of calculating similarity between two representations.
drift_detector_constructor: Callable[[], DriftDetector]
A function to generate an object capable of detecting drift in a univariate stream.
representation_update_period: int
Default: 1
The number of timesteps between representation updates.
train_representation: bool
Whether or not new states train representations.
Must be set to automatically construct states.
representation_window_size: int
Default: 1
The number of observations to construct a concept representation over.
drift_detection_mode: str["any", "lower", "higher]
Default: "any"
How change is interpreted as concept drift.
"any": Any significant change in relevance is detected as drift.
"lower": Significant changes where the new value is lower than the mean is detected.
"higher": Significant changes where the new value is higher than the mean is detected.
buffer_timeout_max: float
The max number of timesteps to buffer new data before training.
buffer_timeout_scheduler: Callable[[float, State, Observation], float
Default: constant_max_buffer_scheduler
A function to calculate the current buffer_timeout. The function is passed buffer_timeout_max,
the active state and (optinally) the new observation.
The default simply returns the buffer_timeout_max. An alternative is the increasing
scheduler, which slowly increases the buffer_timeout so that a new classifier may learn.
"""
super().__init__(
classifier_constructor=classifier_constructor,
representation_constructor=representation_constructor,
representation_comparer=representation_comparer,
drift_detector_constructor=drift_detector_constructor,
representation_update_period=representation_update_period,
max_size=1,
valuation_policy=ValuationPolicy.FIFO,
train_representation=train_representation,
representation_window_size=representation_window_size,
construct_pair_representations=False,
prediction_mode="active",
background_state_mode=None,
drift_detection_mode=drift_detection_mode,
buffer_timeout_max=buffer_timeout_max,
buffer_timeout_scheduler=buffer_timeout_scheduler,
)
| 45.422857
| 106
| 0.700969
|
fb2bb5a84babdee3a740ab5da93c6827817ef7cc
| 403
|
py
|
Python
|
Chapter17/example2.py
|
3DAlgoLab/Mastering-Concurrency-in-Python
|
48e4cb4878830b5f3e23346187ba8ada37f1dd2c
|
[
"MIT"
] | 66
|
2018-11-21T02:07:16.000Z
|
2021-11-08T13:13:31.000Z
|
Chapter17/example2.py
|
3DAlgoLab/Mastering-Concurrency-in-Python
|
48e4cb4878830b5f3e23346187ba8ada37f1dd2c
|
[
"MIT"
] | 2
|
2020-03-11T19:56:39.000Z
|
2021-11-15T14:07:05.000Z
|
Chapter17/example2.py
|
3DAlgoLab/Mastering-Concurrency-in-Python
|
48e4cb4878830b5f3e23346187ba8ada37f1dd2c
|
[
"MIT"
] | 58
|
2018-11-03T14:06:10.000Z
|
2022-03-17T14:06:55.000Z
|
# ch17/example2.py
import sys; sys.setswitchinterval(.000001)
import threading
def foo():
global my_list
my_list.append(1)
my_list = []
threads = []
for i in range(1000):
thread = threading.Thread(target=foo)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print(f'Final list length: {len(my_list)}.')
print('Finished.')
| 15.5
| 44
| 0.684864
|
b6e2dc163693584d0a3db76c3c8e6455cc24fe57
| 628
|
py
|
Python
|
plotly/validators/layout/scene/yaxis/_autorange.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/layout/scene/yaxis/_autorange.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/layout/scene/yaxis/_autorange.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class AutorangeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='autorange',
parent_name='layout.scene.yaxis',
**kwargs
):
super(AutorangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
implied_edits=kwargs.pop('implied_edits', {}),
role=kwargs.pop('role', 'info'),
values=kwargs.pop('values', [True, False, 'reversed']),
**kwargs
)
| 29.904762
| 75
| 0.605096
|
f7b41b74e32c6a7be272ca44b5c2037690078c79
| 1,143
|
py
|
Python
|
Python/sqlmlutils/packagemanagement/download_script.py
|
ntakru/sqlmlutils
|
d154f6a6f278510217958a1631578eeca01db6e8
|
[
"MIT"
] | 15
|
2019-07-01T04:56:55.000Z
|
2021-12-14T18:47:21.000Z
|
Python/sqlmlutils/packagemanagement/download_script.py
|
ntakru/sqlmlutils
|
d154f6a6f278510217958a1631578eeca01db6e8
|
[
"MIT"
] | 29
|
2019-05-29T20:59:21.000Z
|
2022-03-01T22:46:11.000Z
|
Python/sqlmlutils/packagemanagement/download_script.py
|
ntakru/sqlmlutils
|
d154f6a6f278510217958a1631578eeca01db6e8
|
[
"MIT"
] | 26
|
2019-08-12T21:15:25.000Z
|
2022-03-11T05:59:00.000Z
|
# Copyright(c) Microsoft Corporation.
# Licensed under the MIT license.
import pip
import sys
import warnings
from distutils.version import LooseVersion
pipversion = LooseVersion(pip.__version__ )
if pipversion >= LooseVersion("19.3"):
from wheel import pep425tags
from pip._internal.main import main as pipmain
elif pipversion > LooseVersion("10"):
from pip._internal import pep425tags
from pip._internal import main as pipmain
else:
if pipversion < LooseVersion("8.1.2"):
warnings.warn("Pip version less than 8.1.2 not supported.", Warning)
from pip import pep425tags
from pip import main as pipmain
# Monkey patch the pip version information with server information
pep425tags.is_manylinux2010_compatible = lambda: True
pep425tags.is_manylinux1_compatible = lambda: True
pep425tags.get_impl_version_info = lambda: eval(sys.argv[1])
pep425tags.get_abbr_impl = lambda: sys.argv[2]
pep425tags.get_abi_tag = lambda: sys.argv[3]
pep425tags.get_platform = lambda: sys.argv[4]
# Call pipmain with the download request
pipmain(list(map(str.strip, sys.argv[5].split(","))))
| 33.617647
| 77
| 0.745407
|
9dc77fa289e5c91ab4f2426fa4454a872bc5dd10
| 937
|
py
|
Python
|
minst.py
|
Liam-Mackey/MINST-FUN
|
82db33fc04406e6f0b7428313f5922c1d0436828
|
[
"MIT"
] | null | null | null |
minst.py
|
Liam-Mackey/MINST-FUN
|
82db33fc04406e6f0b7428313f5922c1d0436828
|
[
"MIT"
] | null | null | null |
minst.py
|
Liam-Mackey/MINST-FUN
|
82db33fc04406e6f0b7428313f5922c1d0436828
|
[
"MIT"
] | null | null | null |
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
| 28.393939
| 85
| 0.73746
|
3055110d1cb452353fede909d0830b0cdbe5eddd
| 98,786
|
py
|
Python
|
bin/cqlsh.py
|
rishikthr09/cassandra
|
9b770ea0d1cb231de3aed7487258c2ebd48082bb
|
[
"Apache-2.0"
] | 1
|
2020-03-29T01:56:58.000Z
|
2020-03-29T01:56:58.000Z
|
bin/cqlsh.py
|
rishikthr09/cassandra
|
9b770ea0d1cb231de3aed7487258c2ebd48082bb
|
[
"Apache-2.0"
] | null | null | null |
bin/cqlsh.py
|
rishikthr09/cassandra
|
9b770ea0d1cb231de3aed7487258c2ebd48082bb
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/sh
# -*- mode: Python -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":"
# bash code here; finds a suitable python interpreter and execs this file.
# prefer unqualified "python" if suitable:
python -c 'import sys; sys.exit(not (0x020700b0 < sys.hexversion < 0x03000000))' 2>/dev/null \
&& exec python "$0" "$@"
for pyver in 2.7; do
which python$pyver > /dev/null 2>&1 && exec python$pyver "$0" "$@"
done
echo "No appropriate python interpreter found." >&2
exit 1
":"""
from __future__ import with_statement
import cmd
import codecs
import ConfigParser
import csv
import getpass
import optparse
import os
import platform
import sys
import traceback
import warnings
import webbrowser
from StringIO import StringIO
from contextlib import contextmanager
from glob import glob
from uuid import UUID
if sys.version_info[0] != 2 or sys.version_info[1] != 7:
sys.exit("\nCQL Shell supports only Python 2.7\n")
# see CASSANDRA-10428
if platform.python_implementation().startswith('Jython'):
sys.exit("\nCQL Shell does not run on Jython\n")
UTF8 = 'utf-8'
CP65001 = 'cp65001' # Win utf-8 variant
description = "CQL Shell for Apache Cassandra"
version = "5.0.1"
readline = None
try:
# check if tty first, cause readline doesn't check, and only cares
# about $TERM. we don't want the funky escape code stuff to be
# output if not a tty.
if sys.stdin.isatty():
import readline
except ImportError:
pass
CQL_LIB_PREFIX = 'cassandra-driver-internal-only-'
CASSANDRA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CASSANDRA_CQL_HTML_FALLBACK = 'https://cassandra.apache.org/doc/cql3/CQL-3.2.html'
# default location of local CQL.html
if os.path.exists(CASSANDRA_PATH + '/doc/cql3/CQL.html'):
# default location of local CQL.html
CASSANDRA_CQL_HTML = 'file://' + CASSANDRA_PATH + '/doc/cql3/CQL.html'
elif os.path.exists('/usr/share/doc/cassandra/CQL.html'):
# fallback to package file
CASSANDRA_CQL_HTML = 'file:///usr/share/doc/cassandra/CQL.html'
else:
# fallback to online version
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
# On Linux, the Python webbrowser module uses the 'xdg-open' executable
# to open a file/URL. But that only works, if the current session has been
# opened from _within_ a desktop environment. I.e. 'xdg-open' will fail,
# if the session's been opened via ssh to a remote box.
#
# Use 'python' to get some information about the detected browsers.
# >>> import webbrowser
# >>> webbrowser._tryorder
# >>> webbrowser._browser
#
if len(webbrowser._tryorder) == 0:
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
elif webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', '') == '':
# only on Linux (some OS with xdg-open)
webbrowser._tryorder.remove('xdg-open')
webbrowser._tryorder.append('xdg-open')
# use bundled lib for python-cql if available. if there
# is a ../lib dir, use bundled libs there preferentially.
ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')]
myplatform = platform.system()
is_win = myplatform == 'Windows'
# Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216)
if is_win and sys.version_info < (3, 3):
codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None)
if myplatform == 'Linux':
ZIPLIB_DIRS.append('/usr/share/cassandra/lib')
if os.environ.get('CQLSH_NO_BUNDLED', ''):
ZIPLIB_DIRS = ()
def find_zip(libprefix):
for ziplibdir in ZIPLIB_DIRS:
zips = glob(os.path.join(ziplibdir, libprefix + '*.zip'))
if zips:
return max(zips) # probably the highest version, if multiple
cql_zip = find_zip(CQL_LIB_PREFIX)
if cql_zip:
ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):]
sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver))
third_parties = ('futures-', 'six-')
for lib in third_parties:
lib_zip = find_zip(lib)
if lib_zip:
sys.path.insert(0, lib_zip)
warnings.filterwarnings("ignore", r".*blist.*")
try:
import cassandra
except ImportError, e:
sys.exit("\nPython Cassandra driver not installed, or not on PYTHONPATH.\n"
'You might try "pip install cassandra-driver".\n\n'
'Python: %s\n'
'Module load path: %r\n\n'
'Error: %s\n' % (sys.executable, sys.path, e))
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.cqltypes import cql_typename
from cassandra.marshal import int64_unpack
from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata,
TableMetadata, protect_name, protect_names)
from cassandra.policies import WhiteListRoundRobinPolicy
from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable
from cassandra.util import datetime_from_timestamp
# cqlsh should run correctly when run out of a Cassandra source tree,
# out of an unpacked Cassandra tarball, and after a proper package install.
cqlshlibdir = os.path.join(CASSANDRA_PATH, 'pylib')
if os.path.isdir(cqlshlibdir):
sys.path.insert(0, cqlshlibdir)
from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling, cqlshhandling
from cqlshlib.copyutil import ExportTask, ImportTask
from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN,
RED, WHITE, FormattedValue, colorme)
from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT,
DEFAULT_TIMESTAMP_FORMAT, CqlType, DateTimeFormat,
format_by_type, formatter_for)
from cqlshlib.tracing import print_trace, print_trace_session
from cqlshlib.util import get_file_encoding_bomsize, trim_if_present
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 9042
DEFAULT_SSL = False
DEFAULT_CONNECT_TIMEOUT_SECONDS = 5
DEFAULT_REQUEST_TIMEOUT_SECONDS = 10
DEFAULT_FLOAT_PRECISION = 5
DEFAULT_DOUBLE_PRECISION = 5
DEFAULT_MAX_TRACE_WAIT = 10
if readline is not None and readline.__doc__ is not None and 'libedit' in readline.__doc__:
DEFAULT_COMPLETEKEY = '\t'
else:
DEFAULT_COMPLETEKEY = 'tab'
cqldocs = None
cqlruleset = None
epilog = """Connects to %(DEFAULT_HOST)s:%(DEFAULT_PORT)d by default. These
defaults can be changed by setting $CQLSH_HOST and/or $CQLSH_PORT. When a
host (and optional port number) are given on the command line, they take
precedence over any defaults.""" % globals()
parser = optparse.OptionParser(description=description, epilog=epilog,
usage="Usage: %prog [options] [host [port]]",
version='cqlsh ' + version)
parser.add_option("-C", "--color", action='store_true', dest='color',
help='Always use color output')
parser.add_option("--no-color", action='store_false', dest='color',
help='Never use color output')
parser.add_option("--browser", dest='browser', help="""The browser to use to display CQL help, where BROWSER can be:
- one of the supported browsers in https://docs.python.org/2/library/webbrowser.html.
- browser path followed by %s, example: /usr/bin/google-chrome-stable %s""")
parser.add_option('--ssl', action='store_true', help='Use SSL', default=False)
parser.add_option("-u", "--username", help="Authenticate as user.")
parser.add_option("-p", "--password", help="Authenticate using password.")
parser.add_option('-k', '--keyspace', help='Authenticate to the given keyspace.')
parser.add_option("-f", "--file", help="Execute commands from FILE, then exit")
parser.add_option('--debug', action='store_true',
help='Show additional debugging information')
parser.add_option("--encoding", help="Specify a non-default encoding for output." +
" (Default: %s)" % (UTF8,))
parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.")
parser.add_option('--cqlversion', default=None,
help='Specify a particular CQL version, '
'by default the highest version supported by the server will be used.'
' Examples: "3.0.3", "3.1.0"')
parser.add_option("--protocol-version", type="int", default=None,
help='Specify a specific protcol version otherwise the client will default and downgrade as necessary')
parser.add_option("-e", "--execute", help='Execute the statement and quit.')
parser.add_option("--connect-timeout", default=DEFAULT_CONNECT_TIMEOUT_SECONDS, dest='connect_timeout',
help='Specify the connection timeout in seconds (default: %default seconds).')
parser.add_option("--request-timeout", default=DEFAULT_REQUEST_TIMEOUT_SECONDS, dest='request_timeout',
help='Specify the default request timeout in seconds (default: %default seconds).')
parser.add_option("-t", "--tty", action='store_true', dest='tty',
help='Force tty mode (command prompt).')
optvalues = optparse.Values()
(options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues)
# BEGIN history/config definition
HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra'))
if hasattr(options, 'cqlshrc'):
CONFIG_FILE = options.cqlshrc
if not os.path.exists(CONFIG_FILE):
print '\nWarning: Specified cqlshrc location `%s` does not exist. Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR)
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
else:
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history')
if not os.path.exists(HISTORY_DIR):
try:
os.mkdir(HISTORY_DIR)
except OSError:
print '\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR
OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc'))
if os.path.exists(OLD_CONFIG_FILE):
if os.path.exists(CONFIG_FILE):
print '\nWarning: cqlshrc config files were found at both the old location (%s) and \
the new location (%s), the old config file will not be migrated to the new \
location, and the new location will be used for now. You should manually \
consolidate the config files at the new location and remove the old file.' \
% (OLD_CONFIG_FILE, CONFIG_FILE)
else:
os.rename(OLD_CONFIG_FILE, CONFIG_FILE)
OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history'))
if os.path.exists(OLD_HISTORY):
os.rename(OLD_HISTORY, HISTORY)
# END history/config definition
CQL_ERRORS = (
cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure,
cassandra.InvalidRequest, cassandra.Timeout, cassandra.Unauthorized, cassandra.OperationTimedOut,
cassandra.cluster.NoHostAvailable,
cassandra.connection.ConnectionBusy, cassandra.connection.ProtocolError, cassandra.connection.ConnectionException,
cassandra.protocol.ErrorMessage, cassandra.protocol.InternalError, cassandra.query.TraceUnavailable
)
debug_completion = bool(os.environ.get('CQLSH_DEBUG_COMPLETION', '') == 'YES')
class NoKeyspaceError(Exception):
pass
class KeyspaceNotFound(Exception):
pass
class ColumnFamilyNotFound(Exception):
pass
class IndexNotFound(Exception):
pass
class MaterializedViewNotFound(Exception):
pass
class ObjectNotFound(Exception):
pass
class VersionNotSupported(Exception):
pass
class UserTypeNotFound(Exception):
pass
class FunctionNotFound(Exception):
pass
class AggregateNotFound(Exception):
pass
class DecodeError(Exception):
verb = 'decode'
def __init__(self, thebytes, err, colname=None):
self.thebytes = thebytes
self.err = err
self.colname = colname
def __str__(self):
return str(self.thebytes)
def message(self):
what = 'value %r' % (self.thebytes,)
if self.colname is not None:
what = 'value %r (for column %r)' % (self.thebytes, self.colname)
return 'Failed to %s %s : %s' \
% (self.verb, what, self.err)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.message())
class FormatError(DecodeError):
verb = 'format'
def full_cql_version(ver):
while ver.count('.') < 2:
ver += '.0'
ver_parts = ver.split('-', 1) + ['']
vertuple = tuple(map(int, ver_parts[0].split('.')) + [ver_parts[1]])
return ver, vertuple
def format_value(val, cqltype, encoding, addcolor=False, date_time_format=None,
float_precision=None, colormap=None, nullval=None):
if isinstance(val, DecodeError):
if addcolor:
return colorme(repr(val.thebytes), colormap, 'error')
else:
return FormattedValue(repr(val.thebytes))
return format_by_type(val, cqltype=cqltype, encoding=encoding, colormap=colormap,
addcolor=addcolor, nullval=nullval, date_time_format=date_time_format,
float_precision=float_precision)
def show_warning_without_quoting_line(message, category, filename, lineno, file=None, line=None):
if file is None:
file = sys.stderr
try:
file.write(warnings.formatwarning(message, category, filename, lineno, line=''))
except IOError:
pass
warnings.showwarning = show_warning_without_quoting_line
warnings.filterwarnings('always', category=cql3handling.UnexpectedTableStructure)
def insert_driver_hooks():
class DateOverFlowWarning(RuntimeWarning):
pass
# Native datetime types blow up outside of datetime.[MIN|MAX]_YEAR. We will fall back to an int timestamp
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = int64_unpack(byts)
try:
return datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
warnings.warn(DateOverFlowWarning("Some timestamps are larger than Python datetime can represent. "
"Timestamps are displayed in milliseconds from epoch."))
return timestamp_ms
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
if hasattr(cassandra, 'deserializers'):
del cassandra.deserializers.DesDateType
# Return cassandra.cqltypes.EMPTY instead of None for empty values
cassandra.cqltypes.CassandraType.support_empty_values = True
class FrozenType(cassandra.cqltypes._ParameterizedType):
"""
Needed until the bundled python driver adds FrozenType.
"""
typename = "frozen"
num_subtypes = 1
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
return subtype.from_binary(byts)
@classmethod
def serialize_safe(cls, val, protocol_version):
subtype, = cls.subtypes
return subtype.to_binary(val, protocol_version)
class Shell(cmd.Cmd):
custom_prompt = os.getenv('CQLSH_PROMPT', '')
if custom_prompt is not '':
custom_prompt += "\n"
default_prompt = custom_prompt + "cqlsh> "
continue_prompt = " ... "
keyspace_prompt = custom_prompt + "cqlsh:%s> "
keyspace_continue_prompt = "%s ... "
show_line_nums = False
debug = False
stop = False
last_hist = None
shunted_query_out = None
use_paging = True
default_page_size = 100
def __init__(self, hostname, port, color=False,
username=None, password=None, encoding=None, stdin=None, tty=True,
completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None,
cqlver=None, keyspace=None,
tracing_enabled=False, expand_enabled=False,
display_nanotime_format=DEFAULT_NANOTIME_FORMAT,
display_timestamp_format=DEFAULT_TIMESTAMP_FORMAT,
display_date_format=DEFAULT_DATE_FORMAT,
display_float_precision=DEFAULT_FLOAT_PRECISION,
display_double_precision=DEFAULT_DOUBLE_PRECISION,
display_timezone=None,
max_trace_wait=DEFAULT_MAX_TRACE_WAIT,
ssl=False,
single_statement=None,
request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS,
protocol_version=None,
connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS,
allow_server_port_discovery=False):
cmd.Cmd.__init__(self, completekey=completekey)
self.hostname = hostname
self.port = port
self.auth_provider = None
if username:
if not password:
password = getpass.getpass()
self.auth_provider = PlainTextAuthProvider(username=username, password=password)
self.username = username
self.keyspace = keyspace
self.ssl = ssl
self.tracing_enabled = tracing_enabled
self.page_size = self.default_page_size
self.expand_enabled = expand_enabled
self.allow_server_port_discovery = allow_server_port_discovery
if use_conn:
self.conn = use_conn
else:
kwargs = {}
if protocol_version is not None:
kwargs['protocol_version'] = protocol_version
self.conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=cqlver,
auth_provider=self.auth_provider,
ssl_options=sslhandling.ssl_settings(hostname, CONFIG_FILE) if ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=connect_timeout,
connect_timeout=connect_timeout,
allow_server_port_discovery=allow_server_port_discovery,
**kwargs)
self.owns_connection = not use_conn
if keyspace:
self.session = self.conn.connect(keyspace)
else:
self.session = self.conn.connect()
if browser == "":
browser = None
self.browser = browser
self.color = color
self.display_nanotime_format = display_nanotime_format
self.display_timestamp_format = display_timestamp_format
self.display_date_format = display_date_format
self.display_float_precision = display_float_precision
self.display_double_precision = display_double_precision
self.display_timezone = display_timezone
self.session.default_timeout = request_timeout
self.session.row_factory = ordered_dict_factory
self.session.default_consistency_level = cassandra.ConsistencyLevel.ONE
self.get_connection_versions()
self.set_expanded_cql_version(self.connection_versions['cql'])
self.current_keyspace = keyspace
self.max_trace_wait = max_trace_wait
self.session.max_trace_wait = max_trace_wait
self.tty = tty
self.encoding = encoding
self.check_windows_encoding()
self.output_codec = codecs.lookup(encoding)
self.statement = StringIO()
self.lineno = 1
self.in_comment = False
self.prompt = ''
if stdin is None:
stdin = sys.stdin
if tty:
self.reset_prompt()
self.report_connection()
print 'Use HELP for help.'
else:
self.show_line_nums = True
self.stdin = stdin
self.query_out = sys.stdout
self.consistency_level = cassandra.ConsistencyLevel.ONE
self.serial_consistency_level = cassandra.ConsistencyLevel.SERIAL
self.empty_lines = 0
self.statement_error = False
self.single_statement = single_statement
@property
def is_using_utf8(self):
# utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html
return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001]
def check_windows_encoding(self):
if is_win and os.name == 'nt' and self.tty and \
self.is_using_utf8 and sys.stdout.encoding != CP65001:
self.printerr("\nWARNING: console codepage must be set to cp65001 "
"to support {} encoding on Windows platforms.\n"
"If you experience encoding problems, change your console"
" codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding))
def set_expanded_cql_version(self, ver):
ver, vertuple = full_cql_version(ver)
self.cql_version = ver
self.cql_ver_tuple = vertuple
def cqlver_atleast(self, major, minor=0, patch=0):
return self.cql_ver_tuple[:3] >= (major, minor, patch)
def myformat_value(self, val, cqltype=None, **kwargs):
if isinstance(val, DecodeError):
self.decoding_errors.append(val)
try:
dtformats = DateTimeFormat(timestamp_format=self.display_timestamp_format,
date_format=self.display_date_format, nanotime_format=self.display_nanotime_format,
timezone=self.display_timezone)
precision = self.display_double_precision if cqltype is not None and cqltype.type_name == 'double' \
else self.display_float_precision
return format_value(val, cqltype=cqltype, encoding=self.output_codec.name,
addcolor=self.color, date_time_format=dtformats,
float_precision=precision, **kwargs)
except Exception, e:
err = FormatError(val, e)
self.decoding_errors.append(err)
return format_value(err, cqltype=cqltype, encoding=self.output_codec.name, addcolor=self.color)
def myformat_colname(self, name, table_meta=None):
column_colors = COLUMN_NAME_COLORS.copy()
# check column role and color appropriately
if table_meta:
if name in [col.name for col in table_meta.partition_key]:
column_colors.default_factory = lambda: RED
elif name in [col.name for col in table_meta.clustering_key]:
column_colors.default_factory = lambda: CYAN
elif name in table_meta.columns and table_meta.columns[name].is_static:
column_colors.default_factory = lambda: WHITE
return self.myformat_value(name, colormap=column_colors)
def report_connection(self):
self.show_host()
self.show_version()
def show_host(self):
print "Connected to %s at %s:%d." % \
(self.applycolor(self.get_cluster_name(), BLUE),
self.hostname,
self.port)
def show_version(self):
vers = self.connection_versions.copy()
vers['shver'] = version
# system.Versions['cql'] apparently does not reflect changes with
# set_cql_version.
vers['cql'] = self.cql_version
print "[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers
def show_session(self, sessionid, partial_session=False):
print_trace_session(self, self.session, sessionid, partial_session)
def get_connection_versions(self):
result, = self.session.execute("select * from system.local where key = 'local'")
vers = {
'build': result['release_version'],
'protocol': result['native_protocol_version'],
'cql': result['cql_version'],
}
self.connection_versions = vers
def get_keyspace_names(self):
return map(str, self.conn.metadata.keyspaces.keys())
def get_columnfamily_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).tables.keys())
def get_materialized_view_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).views.keys())
def get_index_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).indexes.keys())
def get_column_names(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
layout = self.get_table_meta(ksname, cfname)
return [unicode(col) for col in layout.columns]
def get_usertype_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return self.get_keyspace_meta(ksname).user_types.keys()
def get_usertype_layout(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
ks_meta = self.get_keyspace_meta(ksname)
try:
user_type = ks_meta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
return zip(user_type.field_names, user_type.field_types)
def get_userfunction_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).functions.values())
def get_useraggregate_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).aggregates.values())
def get_cluster_name(self):
return self.conn.metadata.cluster_name
def get_partitioner(self):
return self.conn.metadata.partitioner
def get_keyspace_meta(self, ksname):
if ksname not in self.conn.metadata.keyspaces:
raise KeyspaceNotFound('Keyspace %r not found.' % ksname)
return self.conn.metadata.keyspaces[ksname]
def get_keyspaces(self):
return self.conn.metadata.keyspaces.values()
def get_ring(self, ks):
self.conn.metadata.token_map.rebuild_keyspace(ks, build_if_absent=True)
return self.conn.metadata.token_map.tokens_to_hosts_by_ks[ks]
def get_table_meta(self, ksname, tablename):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if tablename not in ksmeta.tables:
if ksname == 'system_auth' and tablename in ['roles', 'role_permissions']:
self.get_fake_auth_table_meta(ksname, tablename)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
else:
return ksmeta.tables[tablename]
def get_fake_auth_table_meta(self, ksname, tablename):
# may be using external auth implementation so internal tables
# aren't actually defined in schema. In this case, we'll fake
# them up
if tablename == 'roles':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'roles')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['is_superuser'] = ColumnMetadata(table_meta, 'is_superuser', cassandra.cqltypes.BooleanType)
table_meta.columns['can_login'] = ColumnMetadata(table_meta, 'can_login', cassandra.cqltypes.BooleanType)
elif tablename == 'role_permissions':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'role_permissions')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['resource'] = ColumnMetadata(table_meta, 'resource', cassandra.cqltypes.UTF8Type)
table_meta.columns['permission'] = ColumnMetadata(table_meta, 'permission', cassandra.cqltypes.UTF8Type)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
def get_index_meta(self, ksname, idxname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if idxname not in ksmeta.indexes:
raise IndexNotFound("Index %r not found" % idxname)
return ksmeta.indexes[idxname]
def get_view_meta(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if viewname not in ksmeta.views:
raise MaterializedViewNotFound("Materialized view %r not found" % viewname)
return ksmeta.views[viewname]
def get_object_meta(self, ks, name):
if name is None:
if ks and ks in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ks]
elif self.current_keyspace is None:
raise ObjectNotFound("%r not found in keyspaces" % (ks))
else:
name = ks
ks = self.current_keyspace
if ks is None:
ks = self.current_keyspace
ksmeta = self.get_keyspace_meta(ks)
if name in ksmeta.tables:
return ksmeta.tables[name]
elif name in ksmeta.indexes:
return ksmeta.indexes[name]
elif name in ksmeta.views:
return ksmeta.views[name]
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def get_usertypes_meta(self):
data = self.session.execute("select * from system.schema_usertypes")
if not data:
return cql3handling.UserTypesMeta({})
return cql3handling.UserTypesMeta.from_layout(data)
def get_trigger_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [trigger.name
for table in self.get_keyspace_meta(ksname).tables.values()
for trigger in table.triggers.values()]
def reset_statement(self):
self.reset_prompt()
self.statement.truncate(0)
self.empty_lines = 0
def reset_prompt(self):
if self.current_keyspace is None:
self.set_prompt(self.default_prompt, True)
else:
self.set_prompt(self.keyspace_prompt % self.current_keyspace, True)
def set_continue_prompt(self):
if self.empty_lines >= 3:
self.set_prompt("Statements are terminated with a ';'. You can press CTRL-C to cancel an incomplete statement.")
self.empty_lines = 0
return
if self.current_keyspace is None:
self.set_prompt(self.continue_prompt)
else:
spaces = ' ' * len(str(self.current_keyspace))
self.set_prompt(self.keyspace_continue_prompt % spaces)
self.empty_lines = self.empty_lines + 1 if not self.lastcmd else 0
@contextmanager
def prepare_loop(self):
readline = None
if self.tty and self.completekey:
try:
import readline
except ImportError:
if is_win:
print "WARNING: pyreadline dependency missing. Install to enable tab completion."
pass
else:
old_completer = readline.get_completer()
readline.set_completer(self.complete)
if readline.__doc__ is not None and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind -e")
readline.parse_and_bind("bind '" + self.completekey + "' rl_complete")
readline.parse_and_bind("bind ^R em-inc-search-prev")
else:
readline.parse_and_bind(self.completekey + ": complete")
try:
yield
finally:
if readline is not None:
readline.set_completer(old_completer)
def get_input_line(self, prompt=''):
if self.tty:
try:
self.lastcmd = raw_input(prompt).decode(self.encoding)
except UnicodeDecodeError:
self.lastcmd = ''
traceback.print_exc()
self.check_windows_encoding()
line = self.lastcmd + '\n'
else:
self.lastcmd = self.stdin.readline()
line = self.lastcmd
if not len(line):
raise EOFError
self.lineno += 1
return line
def use_stdin_reader(self, until='', prompt=''):
until += '\n'
while True:
try:
newline = self.get_input_line(prompt=prompt)
except EOFError:
return
if newline == until:
return
yield newline
def cmdloop(self):
"""
Adapted from cmd.Cmd's version, because there is literally no way with
cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in
input and an actual EOF.
"""
with self.prepare_loop():
while not self.stop:
try:
if self.single_statement:
line = self.single_statement
self.stop = True
else:
line = self.get_input_line(self.prompt)
self.statement.write(line)
if self.onecmd(self.statement.getvalue()):
self.reset_statement()
except EOFError:
self.handle_eof()
except CQL_ERRORS, cqlerr:
self.printerr(cqlerr.message.decode(encoding='utf-8'))
except KeyboardInterrupt:
self.reset_statement()
print
def onecmd(self, statementtext):
"""
Returns true if the statement is complete and was handled (meaning it
can be reset).
"""
try:
statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext)
except pylexotron.LexingError, e:
if self.show_line_nums:
self.printerr('Invalid syntax at char %d' % (e.charnum,))
else:
self.printerr('Invalid syntax at line %d, char %d'
% (e.linenum, e.charnum))
statementline = statementtext.split('\n')[e.linenum - 1]
self.printerr(' %s' % statementline)
self.printerr(' %s^' % (' ' * e.charnum))
return True
while statements and not statements[-1]:
statements = statements[:-1]
if not statements:
return True
if endtoken_escaped or statements[-1][-1][0] != 'endtoken':
self.set_continue_prompt()
return
for st in statements:
try:
self.handle_statement(st, statementtext)
except Exception, e:
if self.debug:
traceback.print_exc()
else:
self.printerr(e)
return True
def handle_eof(self):
if self.tty:
print
statement = self.statement.getvalue()
if statement.strip():
if not self.onecmd(statement):
self.printerr('Incomplete statement at end of file')
self.do_exit()
def handle_statement(self, tokens, srcstr):
# Concat multi-line statements and insert into history
if readline is not None:
nl_count = srcstr.count("\n")
new_hist = srcstr.replace("\n", " ").rstrip()
if nl_count > 1 and self.last_hist != new_hist:
readline.add_history(new_hist.encode(self.encoding))
self.last_hist = new_hist
cmdword = tokens[0][1]
if cmdword == '?':
cmdword = 'help'
custom_handler = getattr(self, 'do_' + cmdword.lower(), None)
if custom_handler:
parsed = cqlruleset.cql_whole_parse_tokens(tokens, srcstr=srcstr,
startsymbol='cqlshCommand')
if parsed and not parsed.remainder:
# successful complete parse
return custom_handler(parsed)
else:
return self.handle_parse_error(cmdword, tokens, parsed, srcstr)
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
def handle_parse_error(self, cmdword, tokens, parsed, srcstr):
if cmdword.lower() in ('select', 'insert', 'update', 'delete', 'truncate',
'create', 'drop', 'alter', 'grant', 'revoke',
'batch', 'list'):
# hey, maybe they know about some new syntax we don't. type
# assumptions won't work, but maybe the query will.
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
if parsed:
self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
else:
self.printerr('Improper %s command.' % cmdword)
def do_use(self, parsed):
ksname = parsed.get_binding('ksname')
success, _ = self.perform_simple_statement(SimpleStatement(parsed.extract_orig()))
if success:
if ksname[0] == '"' and ksname[-1] == '"':
self.current_keyspace = self.cql_unprotect_name(ksname)
else:
self.current_keyspace = ksname.lower()
def do_select(self, parsed):
tracing_was_enabled = self.tracing_enabled
ksname = parsed.get_binding('ksname')
stop_tracing = ksname == 'system_traces' or (ksname is None and self.current_keyspace == 'system_traces')
self.tracing_enabled = self.tracing_enabled and not stop_tracing
statement = parsed.extract_orig()
self.perform_statement(statement)
self.tracing_enabled = tracing_was_enabled
def perform_statement(self, statement):
stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None)
success, future = self.perform_simple_statement(stmt)
if future:
if future.warnings:
self.print_warnings(future.warnings)
if self.tracing_enabled:
try:
for trace in future.get_all_query_traces(max_wait_per=self.max_trace_wait, query_cl=self.consistency_level):
print_trace(self, trace)
except TraceUnavailable:
msg = "Statement trace did not complete within %d seconds; trace data may be incomplete." % (self.session.max_trace_wait,)
self.writeresult(msg, color=RED)
for trace_id in future.get_query_trace_ids():
self.show_session(trace_id, partial_session=True)
except Exception, err:
self.printerr("Unable to fetch query trace: %s" % (str(err),))
return success
def parse_for_select_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname', None))
try:
return self.get_table_meta(ks, name)
except ColumnFamilyNotFound:
try:
return self.get_view_meta(ks, name)
except MaterializedViewNotFound:
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def parse_for_update_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
return self.get_table_meta(ks, cf)
def perform_simple_statement(self, statement):
if not statement:
return False, None
future = self.session.execute_async(statement, trace=self.tracing_enabled)
result = None
try:
result = future.result()
except CQL_ERRORS, err:
self.printerr(unicode(err.__class__.__name__) + u": " + err.message.decode(encoding='utf-8'))
except Exception:
import traceback
self.printerr(traceback.format_exc())
# Even if statement failed we try to refresh schema if not agreed (see CASSANDRA-9689)
if not future.is_schema_agreed:
try:
self.conn.refresh_schema_metadata(5) # will throw exception if there is a schema mismatch
except Exception:
self.printerr("Warning: schema version mismatch detected; check the schema versions of your "
"nodes in system.local and system.peers.")
self.conn.refresh_schema_metadata(-1)
if result is None:
return False, None
if statement.query_string[:6].lower() == 'select':
self.print_result(result, self.parse_for_select_meta(statement.query_string))
elif statement.query_string.lower().startswith("list users") or statement.query_string.lower().startswith("list roles"):
self.print_result(result, self.get_table_meta('system_auth', 'roles'))
elif statement.query_string.lower().startswith("list"):
self.print_result(result, self.get_table_meta('system_auth', 'role_permissions'))
elif result:
# CAS INSERT/UPDATE
self.writeresult("")
self.print_static_result(result, self.parse_for_update_meta(statement.query_string))
self.flush_output()
return True, future
def print_result(self, result, table_meta):
self.decoding_errors = []
self.writeresult("")
if result.has_more_pages and self.tty:
num_rows = 0
while True:
if result.current_rows:
num_rows += len(result.current_rows)
self.print_static_result(result, table_meta)
if result.has_more_pages:
if self.shunted_query_out is None:
# Only pause when not capturing.
raw_input("---MORE---")
result.fetch_next_page()
else:
break
else:
num_rows = len(result.current_rows)
self.print_static_result(result, table_meta)
self.writeresult("(%d rows)" % num_rows)
if self.decoding_errors:
for err in self.decoding_errors[:2]:
self.writeresult(err.message(), color=RED)
if len(self.decoding_errors) > 2:
self.writeresult('%d more decoding errors suppressed.'
% (len(self.decoding_errors) - 2), color=RED)
def print_static_result(self, result, table_meta):
if not result.column_names and not table_meta:
return
column_names = result.column_names or table_meta.columns.keys()
formatted_names = [self.myformat_colname(name, table_meta) for name in column_names]
if not result.current_rows:
# print header only
self.print_formatted_result(formatted_names, None)
return
cql_types = []
if result.column_types:
ks_name = table_meta.keyspace_name if table_meta else self.current_keyspace
ks_meta = self.conn.metadata.keyspaces.get(ks_name, None)
cql_types = [CqlType(cql_typename(t), ks_meta) for t in result.column_types]
formatted_values = [map(self.myformat_value, [row[c] for c in column_names], cql_types) for row in result.current_rows]
if self.expand_enabled:
self.print_formatted_result_vertically(formatted_names, formatted_values)
else:
self.print_formatted_result(formatted_names, formatted_values)
def print_formatted_result(self, formatted_names, formatted_values):
# determine column widths
widths = [n.displaywidth for n in formatted_names]
if formatted_values is not None:
for fmtrow in formatted_values:
for num, col in enumerate(fmtrow):
widths[num] = max(widths[num], col.displaywidth)
# print header
header = ' | '.join(hdr.ljust(w, color=self.color) for (hdr, w) in zip(formatted_names, widths))
self.writeresult(' ' + header.rstrip())
self.writeresult('-%s-' % '-+-'.join('-' * w for w in widths))
# stop if there are no rows
if formatted_values is None:
self.writeresult("")
return
# print row data
for row in formatted_values:
line = ' | '.join(col.rjust(w, color=self.color) for (col, w) in zip(row, widths))
self.writeresult(' ' + line)
self.writeresult("")
def print_formatted_result_vertically(self, formatted_names, formatted_values):
max_col_width = max([n.displaywidth for n in formatted_names])
max_val_width = max([n.displaywidth for row in formatted_values for n in row])
# for each row returned, list all the column-value pairs
for row_id, row in enumerate(formatted_values):
self.writeresult("@ Row %d" % (row_id + 1))
self.writeresult('-%s-' % '-+-'.join(['-' * max_col_width, '-' * max_val_width]))
for field_id, field in enumerate(row):
column = formatted_names[field_id].ljust(max_col_width, color=self.color)
value = field.ljust(field.displaywidth, color=self.color)
self.writeresult(' ' + " | ".join([column, value]))
self.writeresult('')
def print_warnings(self, warnings):
if warnings is None or len(warnings) == 0:
return
self.writeresult('')
self.writeresult('Warnings :')
for warning in warnings:
self.writeresult(warning)
self.writeresult('')
def emptyline(self):
pass
def parseline(self, line):
# this shouldn't be needed
raise NotImplementedError
def complete(self, text, state):
if readline is None:
return
if state == 0:
try:
self.completion_matches = self.find_completions(text)
except Exception:
if debug_completion:
import traceback
traceback.print_exc()
else:
raise
try:
return self.completion_matches[state]
except IndexError:
return None
def find_completions(self, text):
curline = readline.get_line_buffer()
prevlines = self.statement.getvalue()
wholestmt = prevlines + curline
begidx = readline.get_begidx() + len(prevlines)
stuff_to_complete = wholestmt[:begidx]
return cqlruleset.cql_complete(stuff_to_complete, text, cassandra_conn=self,
debug=debug_completion, startsymbol='cqlshCommand')
def set_prompt(self, prompt, prepend_user=False):
if prepend_user and self.username:
self.prompt = "%s@%s" % (self.username, prompt)
return
self.prompt = prompt
def cql_unprotect_name(self, namestr):
if namestr is None:
return
return cqlruleset.dequote_name(namestr)
def cql_unprotect_value(self, valstr):
if valstr is not None:
return cqlruleset.dequote_value(valstr)
def print_recreate_keyspace(self, ksdef, out):
out.write(ksdef.export_as_string())
out.write("\n")
def print_recreate_columnfamily(self, ksname, cfname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given table.
Writes output to the given out stream.
"""
out.write(self.get_table_meta(ksname, cfname).export_as_string())
out.write("\n")
def print_recreate_index(self, ksname, idxname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given index.
Writes output to the given out stream.
"""
out.write(self.get_index_meta(ksname, idxname).export_as_string())
out.write("\n")
def print_recreate_materialized_view(self, ksname, viewname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given materialized view.
Writes output to the given out stream.
"""
out.write(self.get_view_meta(ksname, viewname).export_as_string())
out.write("\n")
def print_recreate_object(self, ks, name, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given object (ks, table or index).
Writes output to the given out stream.
"""
out.write(self.get_object_meta(ks, name).export_as_string())
out.write("\n")
def describe_keyspaces(self):
print
cmd.Cmd.columnize(self, protect_names(self.get_keyspace_names()))
print
def describe_keyspace(self, ksname):
print
self.print_recreate_keyspace(self.get_keyspace_meta(ksname), sys.stdout)
print
def describe_columnfamily(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_columnfamily(ksname, cfname, sys.stdout)
print
def describe_index(self, ksname, idxname):
print
self.print_recreate_index(ksname, idxname, sys.stdout)
print
def describe_materialized_view(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_materialized_view(ksname, viewname, sys.stdout)
print
def describe_object(self, ks, name):
print
self.print_recreate_object(ks, name, sys.stdout)
print
def describe_columnfamilies(self, ksname):
print
if ksname is None:
for k in self.get_keyspaces():
name = protect_name(k.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(k.name)))
print
else:
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(ksname)))
print
def describe_functions(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
self._columnize_unicode(ksmeta.functions.keys())
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(ksmeta.functions.keys())
def describe_function(self, ksname, functionname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
functions = filter(lambda f: f.name == functionname, ksmeta.functions.values())
if len(functions) == 0:
raise FunctionNotFound("User defined function %r not found" % functionname)
print "\n\n".join(func.export_as_string() for func in functions)
print
def describe_aggregates(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
self._columnize_unicode(ksmeta.aggregates.keys())
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(ksmeta.aggregates.keys())
def describe_aggregate(self, ksname, aggregatename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
aggregates = filter(lambda f: f.name == aggregatename, ksmeta.aggregates.values())
if len(aggregates) == 0:
raise FunctionNotFound("User defined aggregate %r not found" % aggregatename)
print "\n\n".join(aggr.export_as_string() for aggr in aggregates)
print
def describe_usertypes(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
self._columnize_unicode(ksmeta.user_types.keys(), quote=True)
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(ksmeta.user_types.keys(), quote=True)
def describe_usertype(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
try:
usertype = ksmeta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
print usertype.export_as_string()
def _columnize_unicode(self, name_list, quote=False):
"""
Used when columnizing identifiers that may contain unicode
"""
names = [n.encode('utf-8') for n in name_list]
if quote:
names = protect_names(names)
cmd.Cmd.columnize(self, names)
print
def describe_cluster(self):
print '\nCluster: %s' % self.get_cluster_name()
p = trim_if_present(self.get_partitioner(), 'org.apache.cassandra.dht.')
print 'Partitioner: %s\n' % p
# TODO: snitch?
# snitch = trim_if_present(self.get_snitch(), 'org.apache.cassandra.locator.')
# print 'Snitch: %s\n' % snitch
if self.current_keyspace is not None and self.current_keyspace != 'system':
print "Range ownership:"
ring = self.get_ring(self.current_keyspace)
for entry in ring.items():
print ' %39s [%s]' % (str(entry[0].value), ', '.join([host.address for host in entry[1]]))
print
def describe_schema(self, include_system=False):
print
for k in self.get_keyspaces():
if include_system or k.name not in cql3handling.SYSTEM_KEYSPACES:
self.print_recreate_keyspace(k, sys.stdout)
print
def do_describe(self, parsed):
"""
DESCRIBE [cqlsh only]
(DESC may be used as a shorthand.)
Outputs information about the connected Cassandra cluster, or about
the data objects stored in the cluster. Use in one of the following ways:
DESCRIBE KEYSPACES
Output the names of all keyspaces.
DESCRIBE KEYSPACE [<keyspacename>]
Output CQL commands that could be used to recreate the given keyspace,
and the objects in it (such as tables, types, functions, etc.).
In some cases, as the CQL interface matures, there will be some metadata
about a keyspace that is not representable with CQL. That metadata will not be shown.
The '<keyspacename>' argument may be omitted, in which case the current
keyspace will be described.
DESCRIBE TABLES
Output the names of all tables in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TABLE [<keyspace>.]<tablename>
Output CQL commands that could be used to recreate the given table.
In some cases, as above, there may be table metadata which is not
representable and which will not be shown.
DESCRIBE INDEX <indexname>
Output the CQL command that could be used to recreate the given index.
In some cases, there may be index metadata which is not representable
and which will not be shown.
DESCRIBE MATERIALIZED VIEW <viewname>
Output the CQL command that could be used to recreate the given materialized view.
In some cases, there may be materialized view metadata which is not representable
and which will not be shown.
DESCRIBE CLUSTER
Output information about the connected Cassandra cluster, such as the
cluster name, and the partitioner and snitch in use. When you are
connected to a non-system keyspace, also shows endpoint-range
ownership information for the Cassandra ring.
DESCRIBE [FULL] SCHEMA
Output CQL commands that could be used to recreate the entire (non-system) schema.
Works as though "DESCRIBE KEYSPACE k" was invoked for each non-system keyspace
k. Use DESCRIBE FULL SCHEMA to include the system keyspaces.
DESCRIBE TYPES
Output the names of all user-defined-types in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TYPE [<keyspace>.]<type>
Output the CQL command that could be used to recreate the given user-defined-type.
DESCRIBE FUNCTIONS
Output the names of all user-defined-functions in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE FUNCTION [<keyspace>.]<function>
Output the CQL command that could be used to recreate the given user-defined-function.
DESCRIBE AGGREGATES
Output the names of all user-defined-aggregates in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE AGGREGATE [<keyspace>.]<aggregate>
Output the CQL command that could be used to recreate the given user-defined-aggregate.
DESCRIBE <objname>
Output CQL commands that could be used to recreate the entire object schema,
where object can be either a keyspace or a table or an index or a materialized
view (in this order).
"""
what = parsed.matched[1][1].lower()
if what == 'functions':
self.describe_functions(self.current_keyspace)
elif what == 'function':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
functionname = self.cql_unprotect_name(parsed.get_binding('udfname'))
self.describe_function(ksname, functionname)
elif what == 'aggregates':
self.describe_aggregates(self.current_keyspace)
elif what == 'aggregate':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
aggregatename = self.cql_unprotect_name(parsed.get_binding('udaname'))
self.describe_aggregate(ksname, aggregatename)
elif what == 'keyspaces':
self.describe_keyspaces()
elif what == 'keyspace':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', ''))
if not ksname:
ksname = self.current_keyspace
if ksname is None:
self.printerr('Not in any keyspace.')
return
self.describe_keyspace(ksname)
elif what in ('columnfamily', 'table'):
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
self.describe_columnfamily(ks, cf)
elif what == 'index':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
idx = self.cql_unprotect_name(parsed.get_binding('idxname', None))
self.describe_index(ks, idx)
elif what == 'materialized' and parsed.matched[2][1].lower() == 'view':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
mv = self.cql_unprotect_name(parsed.get_binding('mvname'))
self.describe_materialized_view(ks, mv)
elif what in ('columnfamilies', 'tables'):
self.describe_columnfamilies(self.current_keyspace)
elif what == 'types':
self.describe_usertypes(self.current_keyspace)
elif what == 'type':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
ut = self.cql_unprotect_name(parsed.get_binding('utname'))
self.describe_usertype(ks, ut)
elif what == 'cluster':
self.describe_cluster()
elif what == 'schema':
self.describe_schema(False)
elif what == 'full' and parsed.matched[2][1].lower() == 'schema':
self.describe_schema(True)
elif what:
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname'))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('idxname', None))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('mvname', None))
self.describe_object(ks, name)
do_desc = do_describe
def do_copy(self, parsed):
r"""
COPY [cqlsh only]
COPY x FROM: Imports CSV data into a Cassandra table
COPY x TO: Exports data from a Cassandra table in CSV format.
COPY <table_name> [ ( column [, ...] ) ]
FROM ( '<file_pattern_1, file_pattern_2, ... file_pattern_n>' | STDIN )
[ WITH <option>='value' [AND ...] ];
File patterns are either file names or valid python glob expressions, e.g. *.csv or folder/*.csv.
COPY <table_name> [ ( column [, ...] ) ]
TO ( '<filename>' | STDOUT )
[ WITH <option>='value' [AND ...] ];
Available common COPY options and defaults:
DELIMITER=',' - character that appears between records
QUOTE='"' - quoting character to be used to quote fields
ESCAPE='\' - character to appear before the QUOTE char when quoted
HEADER=false - whether to ignore the first line
NULL='' - string that represents a null value
DATETIMEFORMAT= - timestamp strftime format
'%Y-%m-%d %H:%M:%S%z' defaults to time_format value in cqlshrc
MAXATTEMPTS=5 - the maximum number of attempts per batch or range
REPORTFREQUENCY=0.25 - the frequency with which we display status updates in seconds
DECIMALSEP='.' - the separator for decimal values
THOUSANDSSEP='' - the separator for thousands digit groups
BOOLSTYLE='True,False' - the representation for booleans, case insensitive, specify true followed by false,
for example yes,no or 1,0
NUMPROCESSES=n - the number of worker processes, by default the number of cores minus one
capped at 16
CONFIGFILE='' - a configuration file with the same format as .cqlshrc (see the Python ConfigParser
documentation) where you can specify WITH options under the following optional
sections: [copy], [copy-to], [copy-from], [copy:ks.table], [copy-to:ks.table],
[copy-from:ks.table], where <ks> is your keyspace name and <table> is your table
name. Options are read from these sections, in the order specified
above, and command line options always override options in configuration files.
Depending on the COPY direction, only the relevant copy-from or copy-to sections
are used. If no configfile is specified then .cqlshrc is searched instead.
RATEFILE='' - an optional file where to print the output statistics
Available COPY FROM options and defaults:
CHUNKSIZE=5000 - the size of chunks passed to worker processes
INGESTRATE=100000 - an approximate ingest rate in rows per second
MINBATCHSIZE=10 - the minimum size of an import batch
MAXBATCHSIZE=20 - the maximum size of an import batch
MAXROWS=-1 - the maximum number of rows, -1 means no maximum
SKIPROWS=0 - the number of rows to skip
SKIPCOLS='' - a comma separated list of column names to skip
MAXPARSEERRORS=-1 - the maximum global number of parsing errors, -1 means no maximum
MAXINSERTERRORS=1000 - the maximum global number of insert errors, -1 means no maximum
ERRFILE='' - a file where to store all rows that could not be imported, by default this is
import_ks_table.err where <ks> is your keyspace and <table> is your table name.
PREPAREDSTATEMENTS=True - whether to use prepared statements when importing, by default True. Set this to
False if you don't mind shifting data parsing to the cluster. The cluster will also
have to compile every batch statement. For large and oversized clusters
this will result in a faster import but for smaller clusters it may generate
timeouts.
TTL=3600 - the time to live in seconds, by default data will not expire
Available COPY TO options and defaults:
ENCODING='utf8' - encoding for CSV output
PAGESIZE='1000' - the page size for fetching results
PAGETIMEOUT=10 - the page timeout in seconds for fetching results
BEGINTOKEN='' - the minimum token string to consider when exporting data
ENDTOKEN='' - the maximum token string to consider when exporting data
MAXREQUESTS=6 - the maximum number of requests each worker process can work on in parallel
MAXOUTPUTSIZE='-1' - the maximum size of the output file measured in number of lines,
beyond this maximum the output file will be split into segments,
-1 means unlimited.
FLOATPRECISION=5 - the number of digits displayed after the decimal point for cql float values
DOUBLEPRECISION=12 - the number of digits displayed after the decimal point for cql double values
When entering CSV data on STDIN, you can use the sequence "\."
on a line by itself to end the data input.
"""
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
if ks is None:
ks = self.current_keyspace
if ks is None:
raise NoKeyspaceError("Not in any keyspace.")
table = self.cql_unprotect_name(parsed.get_binding('cfname'))
columns = parsed.get_binding('colnames', None)
if columns is not None:
columns = map(self.cql_unprotect_name, columns)
else:
# default to all known columns
columns = self.get_column_names(ks, table)
fname = parsed.get_binding('fname', None)
if fname is not None:
fname = self.cql_unprotect_value(fname)
copyoptnames = map(str.lower, parsed.get_binding('optnames', ()))
copyoptvals = map(self.cql_unprotect_value, parsed.get_binding('optvals', ()))
opts = dict(zip(copyoptnames, copyoptvals))
direction = parsed.get_binding('dir').upper()
if direction == 'FROM':
task = ImportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
elif direction == 'TO':
task = ExportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
else:
raise SyntaxError("Unknown direction %s" % direction)
task.run()
def do_show(self, parsed):
"""
SHOW [cqlsh only]
Displays information about the current cqlsh session. Can be called in
the following ways:
SHOW VERSION
Shows the version and build of the connected Cassandra instance, as
well as the version of the CQL spec that the connected Cassandra
instance understands.
SHOW HOST
Shows where cqlsh is currently connected.
SHOW SESSION <sessionid>
Pretty-prints the requested tracing session.
"""
showwhat = parsed.get_binding('what').lower()
if showwhat == 'version':
self.get_connection_versions()
self.show_version()
elif showwhat == 'host':
self.show_host()
elif showwhat.startswith('session'):
session_id = parsed.get_binding('sessionid').lower()
self.show_session(UUID(session_id))
else:
self.printerr('Wait, how do I show %r?' % (showwhat,))
def do_source(self, parsed):
"""
SOURCE [cqlsh only]
Executes a file containing CQL statements. Gives the output for each
statement in turn, if any, or any errors that occur along the way.
Errors do NOT abort execution of the CQL source file.
Usage:
SOURCE '<file>';
That is, the path to the file to be executed must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
See also the --file option to cqlsh.
"""
fname = parsed.get_binding('fname')
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
encoding, bom_size = get_file_encoding_bomsize(fname)
f = codecs.open(fname, 'r', encoding)
f.seek(bom_size)
except IOError, e:
self.printerr('Could not open %r: %s' % (fname, e))
return
username = self.auth_provider.username if self.auth_provider else None
password = self.auth_provider.password if self.auth_provider else None
subshell = Shell(self.hostname, self.port, color=self.color,
username=username, password=password,
encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn,
cqlver=self.cql_version, keyspace=self.current_keyspace,
tracing_enabled=self.tracing_enabled,
display_nanotime_format=self.display_nanotime_format,
display_timestamp_format=self.display_timestamp_format,
display_date_format=self.display_date_format,
display_float_precision=self.display_float_precision,
display_double_precision=self.display_double_precision,
display_timezone=self.display_timezone,
max_trace_wait=self.max_trace_wait, ssl=self.ssl,
request_timeout=self.session.default_timeout,
connect_timeout=self.conn.connect_timeout,
allow_server_port_discovery=self.allow_server_port_discovery)
subshell.cmdloop()
f.close()
def do_capture(self, parsed):
"""
CAPTURE [cqlsh only]
Begins capturing command output and appending it to a specified file.
Output will not be shown at the console while it is captured.
Usage:
CAPTURE '<file>';
CAPTURE OFF;
CAPTURE;
That is, the path to the file to be appended to must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
Only query result output is captured. Errors and output from cqlsh-only
commands will still be shown in the cqlsh session.
To stop capturing output and show it in the cqlsh session again, use
CAPTURE OFF.
To inspect the current capture configuration, use CAPTURE with no
arguments.
"""
fname = parsed.get_binding('fname')
if fname is None:
if self.shunted_query_out is not None:
print "Currently capturing query output to %r." % (self.query_out.name,)
else:
print "Currently not capturing query output."
return
if fname.upper() == 'OFF':
if self.shunted_query_out is None:
self.printerr('Not currently capturing output.')
return
self.query_out.close()
self.query_out = self.shunted_query_out
self.color = self.shunted_color
self.shunted_query_out = None
del self.shunted_color
return
if self.shunted_query_out is not None:
self.printerr('Already capturing output to %s. Use CAPTURE OFF'
' to disable.' % (self.query_out.name,))
return
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
f = open(fname, 'a')
except IOError, e:
self.printerr('Could not open %r for append: %s' % (fname, e))
return
self.shunted_query_out = self.query_out
self.shunted_color = self.color
self.query_out = f
self.color = False
print 'Now capturing query output to %r.' % (fname,)
def do_tracing(self, parsed):
"""
TRACING [cqlsh]
Enables or disables request tracing.
TRACING ON
Enables tracing for all further requests.
TRACING OFF
Disables tracing.
TRACING
TRACING with no arguments shows the current tracing status.
"""
self.tracing_enabled = SwitchCommand("TRACING", "Tracing").execute(self.tracing_enabled, parsed, self.printerr)
def do_expand(self, parsed):
"""
EXPAND [cqlsh]
Enables or disables expanded (vertical) output.
EXPAND ON
Enables expanded (vertical) output.
EXPAND OFF
Disables expanded (vertical) output.
EXPAND
EXPAND with no arguments shows the current value of expand setting.
"""
self.expand_enabled = SwitchCommand("EXPAND", "Expanded output").execute(self.expand_enabled, parsed, self.printerr)
def do_consistency(self, parsed):
"""
CONSISTENCY [cqlsh only]
Overrides default consistency level (default level is ONE).
CONSISTENCY <level>
Sets consistency level for future requests.
Valid consistency levels:
ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_ONE, LOCAL_QUORUM, EACH_QUORUM, SERIAL and LOCAL_SERIAL.
SERIAL and LOCAL_SERIAL may be used only for SELECTs; will be rejected with updates.
CONSISTENCY
CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.consistency_level])
return
self.consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Consistency level set to %s.' % (level.upper(),)
def do_serial(self, parsed):
"""
SERIAL CONSISTENCY [cqlsh only]
Overrides serial consistency level (default level is SERIAL).
SERIAL CONSISTENCY <level>
Sets consistency level for future conditional updates.
Valid consistency levels:
SERIAL, LOCAL_SERIAL.
SERIAL CONSISTENCY
SERIAL CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current serial consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.serial_consistency_level])
return
self.serial_consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Serial consistency level set to %s.' % (level.upper(),)
def do_login(self, parsed):
"""
LOGIN [cqlsh only]
Changes login information without requiring restart.
LOGIN <username> (<password>)
Login using the specified username. If password is specified, it will be used
otherwise, you will be prompted to enter.
"""
username = parsed.get_binding('username')
password = parsed.get_binding('password')
if password is None:
password = getpass.getpass()
else:
password = password[1:-1]
auth_provider = PlainTextAuthProvider(username=username, password=password)
conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=self.conn.cql_version,
protocol_version=self.conn.protocol_version,
auth_provider=auth_provider,
ssl_options=self.conn.ssl_options,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=self.conn.connect_timeout,
connect_timeout=self.conn.connect_timeout)
if self.current_keyspace:
session = conn.connect(self.current_keyspace)
else:
session = conn.connect()
# Copy session properties
session.default_timeout = self.session.default_timeout
session.row_factory = self.session.row_factory
session.default_consistency_level = self.session.default_consistency_level
session.max_trace_wait = self.session.max_trace_wait
# Update after we've connected in case we fail to authenticate
self.conn = conn
self.auth_provider = auth_provider
self.username = username
self.session = session
def do_exit(self, parsed=None):
"""
EXIT/QUIT [cqlsh only]
Exits cqlsh.
"""
self.stop = True
if self.owns_connection:
self.conn.shutdown()
do_quit = do_exit
def do_clear(self, parsed):
"""
CLEAR/CLS [cqlsh only]
Clears the console.
"""
import subprocess
subprocess.call(['clear', 'cls'][is_win], shell=True)
do_cls = do_clear
def do_debug(self, parsed):
import pdb
pdb.set_trace()
def get_help_topics(self):
topics = [t[3:] for t in dir(self) if t.startswith('do_') and getattr(self, t, None).__doc__]
for hide_from_help in ('quit',):
topics.remove(hide_from_help)
return topics
def columnize(self, slist, *a, **kw):
return cmd.Cmd.columnize(self, sorted([u.upper() for u in slist]), *a, **kw)
def do_help(self, parsed):
"""
HELP [cqlsh only]
Gives information about cqlsh commands. To see available topics,
enter "HELP" without any arguments. To see help on a topic,
use "HELP <topic>".
"""
topics = parsed.get_binding('topic', ())
if not topics:
shell_topics = [t.upper() for t in self.get_help_topics()]
self.print_topics("\nDocumented shell commands:", shell_topics, 15, 80)
cql_topics = [t.upper() for t in cqldocs.get_help_topics()]
self.print_topics("CQL help topics:", cql_topics, 15, 80)
return
for t in topics:
if t.lower() in self.get_help_topics():
doc = getattr(self, 'do_' + t.lower()).__doc__
self.stdout.write(doc + "\n")
elif t.lower() in cqldocs.get_help_topics():
urlpart = cqldocs.get_help_topic(t)
if urlpart is not None:
url = "%s#%s" % (CASSANDRA_CQL_HTML, urlpart)
if len(webbrowser._tryorder) == 0:
self.printerr("*** No browser to display CQL help. URL for help topic %s : %s" % (t, url))
elif self.browser is not None:
webbrowser.get(self.browser).open_new_tab(url)
else:
webbrowser.open_new_tab(url)
else:
self.printerr("*** No help on %s" % (t,))
def do_unicode(self, parsed):
"""
Textual input/output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
def do_paging(self, parsed):
"""
PAGING [cqlsh]
Enables or disables query paging.
PAGING ON
Enables query paging for all further queries.
PAGING OFF
Disables paging.
PAGING
PAGING with no arguments shows the current query paging status.
"""
(self.use_paging, requested_page_size) = SwitchCommandWithValue(
"PAGING", "Query paging", value_type=int).execute(self.use_paging, parsed, self.printerr)
if self.use_paging and requested_page_size is not None:
self.page_size = requested_page_size
if self.use_paging:
print("Page size: {}".format(self.page_size))
else:
self.page_size = self.default_page_size
def applycolor(self, text, color=None):
if not color or not self.color:
return text
return color + text + ANSI_RESET
def writeresult(self, text, color=None, newline=True, out=None):
if out is None:
out = self.query_out
# convert Exceptions, etc to text
if not isinstance(text, (unicode, str)):
text = unicode(text)
if isinstance(text, unicode):
text = text.encode(self.encoding)
to_write = self.applycolor(text, color) + ('\n' if newline else '')
out.write(to_write)
def flush_output(self):
self.query_out.flush()
def printerr(self, text, color=RED, newline=True, shownum=None):
self.statement_error = True
if shownum is None:
shownum = self.show_line_nums
if shownum:
text = '%s:%d:%s' % (self.stdin.name, self.lineno, text)
self.writeresult(text, color, newline=newline, out=sys.stderr)
class SwitchCommand(object):
command = None
description = None
def __init__(self, command, desc):
self.command = command
self.description = desc
def execute(self, state, parsed, printerr):
switch = parsed.get_binding('switch')
if switch is None:
if state:
print "%s is currently enabled. Use %s OFF to disable" \
% (self.description, self.command)
else:
print "%s is currently disabled. Use %s ON to enable." \
% (self.description, self.command)
return state
if switch.upper() == 'ON':
if state:
printerr('%s is already enabled. Use %s OFF to disable.'
% (self.description, self.command))
return state
print 'Now %s is enabled' % (self.description,)
return True
if switch.upper() == 'OFF':
if not state:
printerr('%s is not enabled.' % (self.description,))
return state
print 'Disabled %s.' % (self.description,)
return False
class SwitchCommandWithValue(SwitchCommand):
"""The same as SwitchCommand except it also accepts a value in place of ON.
This returns a tuple of the form: (SWITCH_VALUE, PASSED_VALUE)
eg: PAGING 50 returns (True, 50)
PAGING OFF returns (False, None)
PAGING ON returns (True, None)
The value_type must match for the PASSED_VALUE, otherwise it will return None.
"""
def __init__(self, command, desc, value_type=int):
SwitchCommand.__init__(self, command, desc)
self.value_type = value_type
def execute(self, state, parsed, printerr):
binary_switch_value = SwitchCommand.execute(self, state, parsed, printerr)
switch = parsed.get_binding('switch')
try:
value = self.value_type(switch)
binary_switch_value = True
except (ValueError, TypeError):
value = None
return (binary_switch_value, value)
def option_with_default(cparser_getter, section, option, default=None):
try:
return cparser_getter(section, option)
except ConfigParser.Error:
return default
def raw_option_with_default(configs, section, option, default=None):
"""
Same (almost) as option_with_default() but won't do any string interpolation.
Useful for config values that include '%' symbol, e.g. time format string.
"""
try:
return configs.get(section, option, raw=True)
except ConfigParser.Error:
return default
def should_use_color():
if not sys.stdout.isatty():
return False
if os.environ.get('TERM', '') in ('dumb', ''):
return False
try:
import subprocess
p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if int(stdout.strip()) < 8:
return False
except (OSError, ImportError, ValueError):
# oh well, we tried. at least we know there's a $TERM and it's
# not "dumb".
pass
return True
def read_options(cmdlineargs, environment):
configs = ConfigParser.SafeConfigParser()
configs.read(CONFIG_FILE)
rawconfigs = ConfigParser.RawConfigParser()
rawconfigs.read(CONFIG_FILE)
optvalues = optparse.Values()
optvalues.username = option_with_default(configs.get, 'authentication', 'username')
optvalues.password = option_with_default(rawconfigs.get, 'authentication', 'password')
optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace')
optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None)
optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey',
DEFAULT_COMPLETEKEY)
optvalues.color = option_with_default(configs.getboolean, 'ui', 'color')
optvalues.time_format = raw_option_with_default(configs, 'ui', 'time_format',
DEFAULT_TIMESTAMP_FORMAT)
optvalues.nanotime_format = raw_option_with_default(configs, 'ui', 'nanotime_format',
DEFAULT_NANOTIME_FORMAT)
optvalues.date_format = raw_option_with_default(configs, 'ui', 'date_format',
DEFAULT_DATE_FORMAT)
optvalues.float_precision = option_with_default(configs.getint, 'ui', 'float_precision',
DEFAULT_FLOAT_PRECISION)
optvalues.double_precision = option_with_default(configs.getint, 'ui', 'double_precision',
DEFAULT_DOUBLE_PRECISION)
optvalues.field_size_limit = option_with_default(configs.getint, 'csv', 'field_size_limit', csv.field_size_limit())
optvalues.max_trace_wait = option_with_default(configs.getfloat, 'tracing', 'max_trace_wait',
DEFAULT_MAX_TRACE_WAIT)
optvalues.timezone = option_with_default(configs.get, 'ui', 'timezone', None)
optvalues.debug = False
optvalues.file = None
optvalues.ssl = option_with_default(configs.getboolean, 'connection', 'ssl', DEFAULT_SSL)
optvalues.encoding = option_with_default(configs.get, 'ui', 'encoding', UTF8)
optvalues.tty = option_with_default(configs.getboolean, 'ui', 'tty', sys.stdin.isatty())
optvalues.protocol_version = option_with_default(configs.getint, 'protocol', 'version', None)
optvalues.cqlversion = option_with_default(configs.get, 'cql', 'version', None)
optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS)
optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS)
optvalues.execute = None
optvalues.allow_server_port_discovery = option_with_default(configs.getboolean, 'connection', 'allow_server_port_discovery', 'False')
(options, arguments) = parser.parse_args(cmdlineargs, values=optvalues)
hostname = option_with_default(configs.get, 'connection', 'hostname', DEFAULT_HOST)
port = option_with_default(configs.get, 'connection', 'port', DEFAULT_PORT)
try:
options.connect_timeout = int(options.connect_timeout)
except ValueError:
parser.error('"%s" is not a valid connect timeout.' % (options.connect_timeout,))
options.connect_timeout = DEFAULT_CONNECT_TIMEOUT_SECONDS
try:
options.request_timeout = int(options.request_timeout)
except ValueError:
parser.error('"%s" is not a valid request timeout.' % (options.request_timeout,))
options.request_timeout = DEFAULT_REQUEST_TIMEOUT_SECONDS
hostname = environment.get('CQLSH_HOST', hostname)
port = environment.get('CQLSH_PORT', port)
if len(arguments) > 0:
hostname = arguments[0]
if len(arguments) > 1:
port = arguments[1]
if options.file or options.execute:
options.tty = False
if options.execute and not options.execute.endswith(';'):
options.execute += ';'
if optvalues.color in (True, False):
options.color = optvalues.color
else:
if options.file is not None:
options.color = False
else:
options.color = should_use_color()
if options.cqlversion is not None:
options.cqlversion, cqlvertup = full_cql_version(options.cqlversion)
if cqlvertup[0] < 3:
parser.error('%r is not a supported CQL version.' % options.cqlversion)
options.cqlmodule = cql3handling
try:
port = int(port)
except ValueError:
parser.error('%r is not a valid port number.' % port)
return options, hostname, port
def setup_cqlruleset(cqlmodule):
global cqlruleset
cqlruleset = cqlmodule.CqlRuleSet
cqlruleset.append_rules(cqlshhandling.cqlsh_extra_syntax_rules)
for rulename, termname, func in cqlshhandling.cqlsh_syntax_completers:
cqlruleset.completer_for(rulename, termname)(func)
cqlruleset.commands_end_with_newline.update(cqlshhandling.my_commands_ending_with_newline)
def setup_cqldocs(cqlmodule):
global cqldocs
cqldocs = cqlmodule.cqldocs
def init_history():
if readline is not None:
try:
readline.read_history_file(HISTORY)
except IOError:
pass
delims = readline.get_completer_delims()
delims.replace("'", "")
delims += '.'
readline.set_completer_delims(delims)
def save_history():
if readline is not None:
try:
readline.write_history_file(HISTORY)
except IOError:
pass
def main(options, hostname, port):
setup_cqlruleset(options.cqlmodule)
setup_cqldocs(options.cqlmodule)
init_history()
csv.field_size_limit(options.field_size_limit)
if options.file is None:
stdin = None
else:
try:
encoding, bom_size = get_file_encoding_bomsize(options.file)
stdin = codecs.open(options.file, 'r', encoding)
stdin.seek(bom_size)
except IOError, e:
sys.exit("Can't open %r: %s" % (options.file, e))
if options.debug:
sys.stderr.write("Using CQL driver: %s\n" % (cassandra,))
sys.stderr.write("Using connect timeout: %s seconds\n" % (options.connect_timeout,))
sys.stderr.write("Using '%s' encoding\n" % (options.encoding,))
sys.stderr.write("Using ssl: %s\n" % (options.ssl,))
# create timezone based on settings, environment or auto-detection
timezone = None
if options.timezone or 'TZ' in os.environ:
try:
import pytz
if options.timezone:
try:
timezone = pytz.timezone(options.timezone)
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' specified in cqlshrc\n\n" % (options.timezone))
if 'TZ' in os.environ:
try:
timezone = pytz.timezone(os.environ['TZ'])
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' from environment value TZ\n\n" % (os.environ['TZ']))
except ImportError:
sys.stderr.write("Warning: Timezone defined and 'pytz' module for timezone conversion not installed. Timestamps will be displayed in UTC timezone.\n\n")
# try auto-detect timezone if tzlocal is installed
if not timezone:
try:
from tzlocal import get_localzone
timezone = get_localzone()
except ImportError:
# we silently ignore and fallback to UTC unless a custom timestamp format (which likely
# does contain a TZ part) was specified
if options.time_format != DEFAULT_TIMESTAMP_FORMAT:
sys.stderr.write("Warning: custom timestamp format specified in cqlshrc, but local timezone could not be detected.\n" +
"Either install Python 'tzlocal' module for auto-detection or specify client timezone in your cqlshrc.\n\n")
try:
shell = Shell(hostname,
port,
color=options.color,
username=options.username,
password=options.password,
stdin=stdin,
tty=options.tty,
completekey=options.completekey,
browser=options.browser,
protocol_version=options.protocol_version,
cqlver=options.cqlversion,
keyspace=options.keyspace,
display_timestamp_format=options.time_format,
display_nanotime_format=options.nanotime_format,
display_date_format=options.date_format,
display_float_precision=options.float_precision,
display_double_precision=options.double_precision,
display_timezone=timezone,
max_trace_wait=options.max_trace_wait,
ssl=options.ssl,
single_statement=options.execute,
request_timeout=options.request_timeout,
connect_timeout=options.connect_timeout,
encoding=options.encoding,
allow_server_port_discovery=options.allow_server_port_discovery)
except KeyboardInterrupt:
sys.exit('Connection aborted.')
except CQL_ERRORS, e:
sys.exit('Connection error: %s' % (e,))
except VersionNotSupported, e:
sys.exit('Unsupported CQL version: %s' % (e,))
if options.debug:
shell.debug = True
shell.cmdloop()
save_history()
batch_mode = options.file or options.execute
if batch_mode and shell.statement_error:
sys.exit(2)
# always call this regardless of module name: when a sub-process is spawned
# on Windows then the module name is not __main__, see CASSANDRA-9304
insert_driver_hooks()
if __name__ == '__main__':
main(*read_options(sys.argv[1:], os.environ))
# vim: set ft=python et ts=4 sw=4 :
| 40.337281
| 195
| 0.62607
|
a7a274010441766880a1798a4249ae1e142533ea
| 1,894
|
py
|
Python
|
redash/serializers.py
|
hgs847825/redash
|
11d09b2f0945fb90898b68b730d12e7a52190251
|
[
"BSD-2-Clause"
] | 4
|
2018-07-31T09:39:33.000Z
|
2019-05-22T23:56:18.000Z
|
redash/serializers.py
|
hgs847825/redash
|
11d09b2f0945fb90898b68b730d12e7a52190251
|
[
"BSD-2-Clause"
] | 60
|
2019-05-31T06:11:35.000Z
|
2021-06-25T15:20:33.000Z
|
redash/serializers.py
|
hgs847825/redash
|
11d09b2f0945fb90898b68b730d12e7a52190251
|
[
"BSD-2-Clause"
] | 2
|
2018-07-06T23:48:38.000Z
|
2018-07-11T01:21:05.000Z
|
"""
This will eventually replace all the `to_dict` methods of the different model
classes we have. This will ensure cleaner code and better
separation of concerns.
"""
import json
from funcy import project
from redash import models
def public_widget(widget):
res = {
'id': widget.id,
'width': widget.width,
'options': json.loads(widget.options),
'text': widget.text,
'updated_at': widget.updated_at,
'created_at': widget.created_at
}
if widget.visualization and widget.visualization.id:
query_data = models.QueryResult.query.get(widget.visualization.query_rel.latest_query_data_id).to_dict()
res['visualization'] = {
'type': widget.visualization.type,
'name': widget.visualization.name,
'description': widget.visualization.description,
'options': json.loads(widget.visualization.options),
'updated_at': widget.visualization.updated_at,
'created_at': widget.visualization.created_at,
'query': {
'query': ' ', # workaround, as otherwise the query data won't be loaded.
'name': widget.visualization.query_rel.name,
'description': widget.visualization.query_rel.description,
'options': {},
'latest_query_data': query_data
}
}
return res
def public_dashboard(dashboard):
dashboard_dict = project(dashboard.to_dict(), (
'name', 'layout', 'dashboard_filters_enabled', 'updated_at',
'created_at'
))
widget_list = (models.Widget.query
.filter(models.Widget.dashboard_id == dashboard.id)
.outerjoin(models.Visualization)
.outerjoin(models.Query))
dashboard_dict['widgets'] = [public_widget(w) for w in widget_list]
return dashboard_dict
| 33.821429
| 112
| 0.6283
|
4fd3584b6537f2dcd7ce1a69e73dcfff50604d71
| 4,843
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/plugins/lookup/template.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/plugins/lookup/template.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/plugins/lookup/template.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2012-17, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: template
author: Michael DeHaan <michael.dehaan@gmail.com>
version_added: "0.9"
short_description: retrieve contents of file after templating with Jinja2
description:
- Returns a list of strings; for each template in the list of templates you pass in, returns a string containing the results of processing that template.
options:
_terms:
description: list of files to template
convert_data:
type: bool
description: whether to convert YAML into data. If False, strings that are YAML will be left untouched.
variable_start_string:
description: The string marking the beginning of a print statement.
default: '{{'
version_added: '2.8'
type: str
variable_end_string:
description: The string marking the end of a print statement.
default: '}}'
version_added: '2.8'
type: str
"""
EXAMPLES = """
- name: show templating results
debug:
msg: "{{ lookup('template', './some_template.j2') }}"
- name: show templating results with different variable start and end string
debug:
msg: "{{ lookup('template', './some_template.j2', variable_start_string='[%', variable_end_string='%]') }}"
"""
RETURN = """
_raw:
description: file(s) content after templating
"""
from copy import deepcopy
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
from ansible.template import generate_ansible_template_vars
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
convert_data_p = kwargs.get('convert_data', True)
lookup_template_vars = kwargs.get('template_vars', {})
ret = []
variable_start_string = kwargs.get('variable_start_string', None)
variable_end_string = kwargs.get('variable_end_string', None)
old_vars = self._templar.available_variables
for term in terms:
display.debug("File lookup term: %s" % term)
lookupfile = self.find_file_in_search_path(variables, 'templates', term)
display.vvvv("File lookup using %s as file" % lookupfile)
if lookupfile:
b_template_data, show_data = self._loader._get_file_contents(lookupfile)
template_data = to_text(b_template_data, errors='surrogate_or_strict')
# set jinja2 internal search path for includes
searchpath = variables.get('ansible_search_path', [])
if searchpath:
# our search paths aren't actually the proper ones for jinja includes.
# We want to search into the 'templates' subdir of each search path in
# addition to our original search paths.
newsearchpath = []
for p in searchpath:
newsearchpath.append(os.path.join(p, 'templates'))
newsearchpath.append(p)
searchpath = newsearchpath
searchpath.insert(0, os.path.dirname(lookupfile))
self._templar.environment.loader.searchpath = searchpath
if variable_start_string is not None:
self._templar.environment.variable_start_string = variable_start_string
if variable_end_string is not None:
self._templar.environment.variable_end_string = variable_end_string
# The template will have access to all existing variables,
# plus some added by ansible (e.g., template_{path,mtime}),
# plus anything passed to the lookup with the template_vars=
# argument.
vars = deepcopy(variables)
vars.update(generate_ansible_template_vars(lookupfile))
vars.update(lookup_template_vars)
self._templar.available_variables = vars
# do the templating
res = self._templar.template(template_data, preserve_trailing_newlines=True,
convert_data=convert_data_p, escape_backslashes=False)
ret.append(res)
else:
raise AnsibleError("the template file %s could not be found for the lookup" % term)
# restore old variables
self._templar.available_variables = old_vars
return ret
| 39.696721
| 159
| 0.643816
|
5d69364f9f99317da3af8a634e419ab31435622e
| 9,187
|
py
|
Python
|
tests/test_deposit_withdraw.py
|
SBfin/UniStrategy
|
0380a8f49a0b4eace72a56a2d7831100bdf4d276
|
[
"Unlicense"
] | 2
|
2021-11-10T17:41:11.000Z
|
2022-03-17T08:46:05.000Z
|
tests/test_deposit_withdraw.py
|
i001962/UniStrategy
|
0380a8f49a0b4eace72a56a2d7831100bdf4d276
|
[
"Unlicense"
] | null | null | null |
tests/test_deposit_withdraw.py
|
i001962/UniStrategy
|
0380a8f49a0b4eace72a56a2d7831100bdf4d276
|
[
"Unlicense"
] | 1
|
2021-11-10T17:41:05.000Z
|
2021-11-10T17:41:05.000Z
|
from brownie import chain, reverts, ZERO_ADDRESS
import pytest
from pytest import approx
@pytest.mark.parametrize(
"amount0Desired,amount1Desired",
[[0, 1], [1, 0], [1e18, 0], [0, 1e18], [1e4, 1e18], [1e18, 1e18]],
)
def test_initial_deposit(
vault,
tokens,
gov,
user,
recipient,
amount0Desired,
amount1Desired,
):
# Store balances
balance0 = tokens[0].balanceOf(user)
balance1 = tokens[1].balanceOf(user)
# Deposit
tx = vault.deposit(amount0Desired, amount1Desired, 0, 0, recipient, {"from": user})
shares, amount0, amount1 = tx.return_value
# Check amounts are same as inputs
assert amount0 == amount0Desired
assert amount1 == amount1Desired
# Check received right number of shares
assert shares == vault.balanceOf(recipient) > 0
# Check paid right amount of tokens
assert amount0 == balance0 - tokens[0].balanceOf(user)
assert amount1 == balance1 - tokens[1].balanceOf(user)
# Check event
assert tx.events["Deposit"] == {
"sender": user,
"to": recipient,
"shares": shares,
"amount0": amount0,
"amount1": amount1,
}
@pytest.mark.parametrize(
"amount0Desired,amount1Desired",
[[1, 1e18], [1e18, 1], [1e4, 1e18], [1e18, 1e18]],
)
def test_deposit(
vaultAfterPriceMove,
tokens,
getPositions,
gov,
user,
recipient,
amount0Desired,
amount1Desired,
):
vault = vaultAfterPriceMove
# Store balances, supply and positions
balance0 = tokens[0].balanceOf(user)
balance1 = tokens[1].balanceOf(user)
totalSupply = vault.totalSupply()
total0, total1 = vault.getTotalAmounts()
govShares = vault.balanceOf(gov)
# Deposit
tx = vault.deposit(amount0Desired, amount1Desired, 0, 0, recipient, {"from": user})
shares, amount0, amount1 = tx.return_value
# Check amounts don't exceed desired
assert amount0 <= amount0Desired
assert amount1 <= amount1Desired
# Check received right number of shares
assert shares == vault.balanceOf(recipient) > 0
# Check paid right amount of tokens
assert amount0 == balance0 - tokens[0].balanceOf(user)
assert amount1 == balance1 - tokens[1].balanceOf(user)
# Check one amount is tight
assert approx(amount0) == amount0Desired or approx(amount1) == amount1Desired
# Check total amounts are in proportion
total0After, total1After = vault.getTotalAmounts()
totalSupplyAfter = vault.totalSupply()
assert approx(total0 * total1After) == total1 * total0After
assert approx(total0 * totalSupplyAfter) == total0After * totalSupply
assert approx(total1 * totalSupplyAfter) == total1After * totalSupply
# Check event
assert tx.events["Deposit"] == {
"sender": user,
"to": recipient,
"shares": shares,
"amount0": amount0,
"amount1": amount1,
}
@pytest.mark.parametrize(
"amount0Desired,amount1Desired",
[[1e4, 1e18], [1e18, 1e18]],
)
def test_deposit_when_vault_only_has_token0(
vaultOnlyWithToken0,
pool,
tokens,
getPositions,
gov,
user,
recipient,
amount0Desired,
amount1Desired,
):
vault = vaultOnlyWithToken0
# Poke fees
vault.withdraw(vault.balanceOf(gov) // 2, 0, 0, gov, {"from": gov})
# Store balances, supply and positions
balance0 = tokens[0].balanceOf(user)
balance1 = tokens[1].balanceOf(user)
totalSupply = vault.totalSupply()
total0, total1 = vault.getTotalAmounts()
# Deposit
tx = vault.deposit(amount0Desired, amount1Desired, 0, 0, recipient, {"from": user})
shares, amount0, amount1 = tx.return_value
# Check amounts don't exceed desired
assert amount0 <= amount0Desired
assert amount1 <= amount1Desired
# Check received right number of shares
assert shares == vault.balanceOf(recipient) > 0
# Check paid right amount of tokens
assert amount0 == balance0 - tokens[0].balanceOf(user)
assert amount1 == balance1 - tokens[1].balanceOf(user)
# Check paid mainly token0
assert amount0 > 0
assert approx(amount1 / amount0, abs=1e-3) == 0
# Check amount is tight
assert approx(amount0) == amount0Desired
# Check total amounts are in proportion
total0After, total1After = vault.getTotalAmounts()
totalSupplyAfter = vault.totalSupply()
assert approx(total0 * totalSupplyAfter) == total0After * totalSupply
@pytest.mark.parametrize(
"amount0Desired,amount1Desired",
[[1e4, 1e18], [1e18, 1e18]],
)
def test_deposit_when_vault_only_has_token1(
vaultOnlyWithToken1,
pool,
tokens,
getPositions,
gov,
user,
recipient,
amount0Desired,
amount1Desired,
):
vault = vaultOnlyWithToken1
# Poke fees
vault.withdraw(vault.balanceOf(gov) // 2, 0, 0, gov, {"from": gov})
# Store balances, supply and positions
balance0 = tokens[0].balanceOf(user)
balance1 = tokens[1].balanceOf(user)
totalSupply = vault.totalSupply()
total0, total1 = vault.getTotalAmounts()
# Deposit
tx = vault.deposit(amount0Desired, amount1Desired, 0, 0, recipient, {"from": user})
shares, amount0, amount1 = tx.return_value
# Check amounts don't exceed desired
assert amount0 <= amount0Desired
assert amount1 <= amount1Desired
# Check received right number of shares
assert shares == vault.balanceOf(recipient) > 0
# Check paid right amount of tokens
assert amount0 == balance0 - tokens[0].balanceOf(user)
assert amount1 == balance1 - tokens[1].balanceOf(user)
# Check paid mainly token1
assert amount1 > 0
assert approx(amount0 / amount1, abs=1e-3) == 0
# Check amount is tight
assert approx(amount1) == amount1Desired
# Check total amounts are in proportion
total0After, total1After = vault.getTotalAmounts()
totalSupplyAfter = vault.totalSupply()
assert approx(total1 * totalSupplyAfter) == total1After * totalSupply
def test_deposit_checks(vault, user):
with reverts("amount0Desired or amount1Desired"):
vault.deposit(0, 0, 0, 0, user, {"from": user})
with reverts("to"):
vault.deposit(1e8, 1e8, 0, 0, ZERO_ADDRESS, {"from": user})
with reverts("to"):
vault.deposit(1e8, 1e8, 0, 0, vault, {"from": user})
with reverts("amount0Min"):
vault.deposit(1e8, 0, 2e8, 0, user, {"from": user})
with reverts("amount1Min"):
vault.deposit(0, 1e8, 0, 2e8, user, {"from": user})
with reverts("maxTotalSupply"):
vault.deposit(1e8, 200e18, 0, 0, user, {"from": user})
def test_withdraw(
vaultAfterPriceMove,
strategy,
pool,
tokens,
getPositions,
gov,
user,
recipient,
keeper,
):
vault = vaultAfterPriceMove
# Deposit and rebalance
tx = vault.deposit(1e8, 1e10, 0, 0, user, {"from": user})
shares, _, _ = tx.return_value
strategy.rebalance({"from": keeper})
# Store balances, supply and positions
balance0 = tokens[0].balanceOf(recipient)
balance1 = tokens[1].balanceOf(recipient)
totalSupply = vault.totalSupply()
total0, total1 = vault.getTotalAmounts()
basePos, limitPos = getPositions(vault)
# Withdraw all shares
tx = vault.withdraw(shares, 0, 0, recipient, {"from": user})
amount0, amount1 = tx.return_value
# Check is empty now
assert vault.balanceOf(user) == 0
# Check received right amount of tokens
assert tokens[0].balanceOf(recipient) - balance0 == amount0 > 0
assert tokens[1].balanceOf(recipient) - balance1 == amount1 > 0
# Check total amounts are in proportion
ratio = (totalSupply - shares) / totalSupply
total0After, total1After = vault.getTotalAmounts()
assert approx(total0After / total0) == ratio
assert approx(total1After / total1) == ratio
# Check liquidity in pool decreases proportionally
basePosAfter, limitPosAfter = getPositions(vault)
assert approx(basePosAfter[0] / basePos[0]) == ratio
assert approx(limitPosAfter[0] / limitPos[0]) == ratio
# Check event
assert tx.events["Withdraw"] == {
"sender": user,
"to": recipient,
"shares": shares,
"amount0": amount0,
"amount1": amount1,
}
def test_withdraw_checks(vault, user, recipient):
tx = vault.deposit(1e8, 1e10, 0, 0, user, {"from": user})
shares, _, _ = tx.return_value
with reverts("shares"):
vault.withdraw(0, 0, 0, recipient, {"from": user})
with reverts("to"):
vault.withdraw(shares - 1000, 0, 0, ZERO_ADDRESS, {"from": user})
with reverts("to"):
vault.withdraw(shares - 1000, 0, 0, vault, {"from": user})
with reverts("amount0Min"):
vault.withdraw(shares - 1000, 1e18, 0, recipient, {"from": user})
with reverts("amount1Min"):
vault.withdraw(shares - 1000, 0, 1e18, recipient, {"from": user})
| 30.220395
| 88
| 0.639055
|
17913c28be8fca9e5d721f82b1a37392ccc9dd9e
| 1,388
|
py
|
Python
|
tools/delete_momentum.py
|
sisrfeng/NA-fWebSOD
|
49cb75a9a0d557b05968c6b11b0f17a7043f2077
|
[
"Apache-2.0"
] | 23
|
2020-03-30T11:48:33.000Z
|
2022-03-11T06:34:31.000Z
|
tools/delete_momentum.py
|
sisrfeng/NA-fWebSOD
|
49cb75a9a0d557b05968c6b11b0f17a7043f2077
|
[
"Apache-2.0"
] | 9
|
2020-09-28T07:15:16.000Z
|
2022-03-25T08:11:06.000Z
|
tools/delete_momentum.py
|
sisrfeng/NA-fWebSOD
|
49cb75a9a0d557b05968c6b11b0f17a7043f2077
|
[
"Apache-2.0"
] | 10
|
2020-03-30T11:48:34.000Z
|
2021-06-02T06:12:36.000Z
|
#!/usr/bin/env python
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import os
import sys
from collections import OrderedDict
from six.moves import cPickle as pickle
from detectron.utils.io import save_object
from detectron.utils.io import load_object
if __name__ == '__main__':
in_path = sys.argv[1]
out_path = sys.argv[2]
pkl_data = load_object(in_path)
pkl_data = pkl_data['blobs']
keys = pkl_data.keys()
for k in list(keys):
if 'momentum' in k:
print('delete ', k)
t = pkl_data.pop(k, None)
save_object(pkl_data, out_path)
| 28.916667
| 78
| 0.693084
|
2485062c3a96c440ff0b0bac3e5016deaea5893a
| 6,748
|
py
|
Python
|
datasets/bls/unemployment_cps_series/unemployment_cps_series_dag.py
|
nlarge-google/public-datasets-pipelines
|
ba4bff67c1ea334c87b3895fffa2f2402e6e56c6
|
[
"Apache-2.0"
] | 2
|
2022-02-27T02:31:35.000Z
|
2022-02-27T02:32:49.000Z
|
datasets/bls/unemployment_cps_series/unemployment_cps_series_dag.py
|
nlarge-google/public-datasets-pipelines
|
ba4bff67c1ea334c87b3895fffa2f2402e6e56c6
|
[
"Apache-2.0"
] | null | null | null |
datasets/bls/unemployment_cps_series/unemployment_cps_series_dag.py
|
nlarge-google/public-datasets-pipelines
|
ba4bff67c1ea334c87b3895fffa2f2402e6e56c6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.contrib.operators import gcs_to_bq, kubernetes_pod_operator
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="bls.unemployment_cps_series",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
transform_csv = kubernetes_pod_operator.KubernetesPodOperator(
task_id="transform_csv",
startup_timeout_seconds=600,
name="unemployment_cps_series",
namespace="default",
affinity={
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "cloud.google.com/gke-nodepool",
"operator": "In",
"values": ["pool-e2-standard-4"],
}
]
}
]
}
}
},
image_pull_policy="Always",
image="{{ var.json.bls.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URLS": '["gs://pdp-feeds-staging/Bureau/ln.series.tsv"]',
"SOURCE_FILES": '["files/data1.tsv"]',
"TARGET_FILE": "files/data_output.tsv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/bls/unemployment_cps_series/data_output.csv",
"PIPELINE_NAME": "unemployment_cps_series",
"JOINING_KEY": "",
"TRIM_SPACE": '["series_id","footnote_codes","series_title"]',
"CSV_HEADERS": '["series_id","lfst_code","periodicity_code","series_title","absn_code","activity_code","ages_code","class_code","duration_code","education_code","entr_code","expr_code","hheader_code","hour_code","indy_code","jdes_code","look_code","mari_code","mjhs_code","occupation_code","orig_code","pcts_code","race_code","rjnw_code","rnlf_code","rwns_code","seek_code","sexs_code","tdat_code","vets_code","wkst_code","born_code","chld_code","disa_code","seasonal","footnote_codes","begin_year","begin_period","end_year","end_period","cert_code"]',
},
resources={"request_memory": "4G", "request_cpu": "1"},
)
# Task to load CSV data to a BigQuery table
load_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator(
task_id="load_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/bls/unemployment_cps_series/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="bls.unemployment_cps_series",
skip_leading_rows=1,
allow_quoted_newlines=True,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{"name": "series_id", "type": "STRING", "mode": "required"},
{"name": "lfst_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "periodicity_code", "type": "STRING", "mode": "NULLABLE"},
{"name": "series_title", "type": "STRING", "mode": "NULLABLE"},
{"name": "absn_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "activity_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "ages_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "class_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "duration_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "education_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "entr_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "expr_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "hheader_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "hour_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "indy_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "jdes_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "look_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "mari_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "mjhs_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "occupation_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "orig_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "pcts_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "race_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "rjnw_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "rnlf_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "rwns_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "seek_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "sexs_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "tdat_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "vets_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "wkst_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "born_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "chld_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "disa_code", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "seasonal", "type": "STRING", "mode": "NULLABLE"},
{"name": "footnote_codes", "type": "STRING", "mode": "NULLABLE"},
{"name": "begin_year", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "begin_period", "type": "STRING", "mode": "NULLABLE"},
{"name": "end_year", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "end_period", "type": "STRING", "mode": "NULLABLE"},
{"name": "cert_code", "type": "INTEGER", "mode": "NULLABLE"},
],
)
transform_csv >> load_to_bq
| 51.907692
| 564
| 0.55987
|
2b0a3acb328774988cc09dc999bd7c802fd78892
| 2,290
|
py
|
Python
|
fluent_contents/plugins/markup/content_plugins.py
|
francofuji/django-fluent-contents
|
03da447ef0854b0e6a6f8ff39d9281d11efc8587
|
[
"Apache-2.0"
] | null | null | null |
fluent_contents/plugins/markup/content_plugins.py
|
francofuji/django-fluent-contents
|
03da447ef0854b0e6a6f8ff39d9281d11efc8587
|
[
"Apache-2.0"
] | null | null | null |
fluent_contents/plugins/markup/content_plugins.py
|
francofuji/django-fluent-contents
|
03da447ef0854b0e6a6f8ff39d9281d11efc8587
|
[
"Apache-2.0"
] | null | null | null |
"""
Markup plugin, rendering human readable formatted text to HTML.
This plugin supports several markup languages:
reStructuredText: Used for Python documentation.
Markdown: Used for GitHub and Stackoverflow comments (both have a dialect/extended version)
Textile: A extensive markup format, also used in Redmine and partially in Basecamp.
"""
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from fluent_contents.extensions import ContentPlugin, plugin_pool
from fluent_contents.plugins.markup.models import MarkupItem, MarkupItemForm, LANGUAGE_MODEL_CLASSES
from fluent_contents.plugins.markup import backend, appsettings
class MarkupPluginBase(ContentPlugin):
"""
Base plugin for markup item models.
The actual plugins are dynamically created.
"""
model = MarkupItem
category = _('Markup')
form = MarkupItemForm
admin_form_template = ContentPlugin.ADMIN_TEMPLATE_WITHOUT_LABELS
search_output = True
class Media:
css = {'screen': ('fluent_contents/plugins/markup/markup_admin.css',)}
def render(self, request, instance, **kwargs):
try:
html = backend.render_text(instance.text, instance.language)
except Exception as e:
html = self.render_error(e)
# Included in a DIV, so the next item will be displayed below.
return mark_safe('<div class="markup">' + html + '</div>\n')
def _create_markup_plugin(language, model):
"""
Create a new MarkupPlugin class that represents the plugin type.
"""
form = type("{0}MarkupItemForm".format(language.capitalize()), (MarkupItemForm,), {
'default_language': language,
})
classname = "{0}MarkupPlugin".format(language.capitalize())
PluginClass = type(classname, (MarkupPluginBase,), {
'model': model,
'form': form,
})
return PluginClass
# Dynamically create plugins for every language type.
# Allows adding them separately in the admin, while using the same database table.
for language, model in LANGUAGE_MODEL_CLASSES.items():
if language not in appsettings.FLUENT_MARKUP_LANGUAGES:
continue
#globals()[classname] = PluginClass
plugin_pool.register(_create_markup_plugin(language, model))
| 33.676471
| 100
| 0.724454
|
824a04cf3f8b7e81e6607a121d2beaf535be06d5
| 8,022
|
py
|
Python
|
pkg/tests/install/test.py
|
hborawski/rules_pkg
|
8d542763a3959db79175404758f46c7f3f385fa5
|
[
"Apache-2.0"
] | null | null | null |
pkg/tests/install/test.py
|
hborawski/rules_pkg
|
8d542763a3959db79175404758f46c7f3f385fa5
|
[
"Apache-2.0"
] | null | null | null |
pkg/tests/install/test.py
|
hborawski/rules_pkg
|
8d542763a3959db79175404758f46c7f3f385fa5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import os
import unittest
import stat
import subprocess
from rules_python.python.runfiles import runfiles
import private.manifest as manifest
class PkgInstallTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.runfiles = runfiles.Create()
# Somewhat of an implementation detail, but it works. I think.
manifest_file = cls.runfiles.Rlocation("rules_pkg/tests/install/test_installer_install_script-install-manifest.json")
with open(manifest_file, 'r') as fh:
manifest_data_raw = json.load(fh)
cls.manifest_data = {}
for entry in manifest_data_raw:
entry_struct = manifest.ManifestEntry(*entry)
cls.manifest_data[entry_struct.dest] = entry_struct
cls.installdir = os.path.join(os.getenv("TEST_TMPDIR"), "installdir")
env = {}
env.update(cls.runfiles.EnvVars())
subprocess.check_call([
cls.runfiles.Rlocation("rules_pkg/tests/install/test_installer"),
"--destdir", cls.installdir,
"--verbose",
],
env=env)
def entity_type_at_path(self, path):
if os.path.islink(path):
return manifest.ENTRY_IS_LINK
elif os.path.isfile(path):
return manifest.ENTRY_IS_FILE
elif os.path.isdir(path):
return manifest.ENTRY_IS_DIR
else:
# We can't infer what TreeArtifacts are by looking at them -- the
# build system is not aware of their contents.
raise ValueError("Entity {} is not a link, file, or directory")
def assertEntryTypeMatches(self, entry, actual_path):
actual_entry_type = self.entity_type_at_path(actual_path)
self.assertEqual(actual_entry_type, entry.entry_type,
"Entity {} should be a {}, but was actually {}".format(
entry.dest,
manifest.entry_type_to_string(entry.entry_type),
manifest.entry_type_to_string(actual_entry_type),
))
def assertEntryModeMatches(self, entry, actual_path):
# TODO: permissions in windows are... tricky. Don't bother
# testing for them if we're in it for the time being
if os.name == 'nt':
return
actual_mode = stat.S_IMODE(os.stat(actual_path).st_mode)
expected_mode = int(entry.mode, 8)
self.assertEqual(actual_mode, expected_mode,
"Entry {} has mode {:04o}, expected {:04o}".format(
entry.dest, actual_mode, expected_mode,
))
def test_manifest_matches(self):
unowned_dirs = set()
owned_dirs = set()
# Figure out what directories we are supposed to own, and which ones we
# aren't.
#
# Unowned directories are created implicitly by requesting other
# elements be created or installed.
#
# Owned directories are created explicitly with the pkg_mkdirs rule.
for dest, data in self.manifest_data.items():
if data.entry_type == manifest.ENTRY_IS_DIR:
owned_dirs.add(dest)
# TODO(nacl): The initial stage of the accumulation returns an empty string,
# which end up in the set representing the root of the manifest.
# This may not be the best thing.
unowned_dirs.update([p for p in itertools.accumulate(os.path.dirname(dest).split('/'),
func=lambda accum, new: accum + '/' + new)])
# In the above loop, unowned_dirs contains all possible directories that
# are in the manifest. Prune them here.
unowned_dirs -= owned_dirs
# TODO: check for ownership (user, group)
found_entries = {dest: False for dest in self.manifest_data.keys()}
for root, dirs, files in os.walk(self.installdir):
rel_root_path = os.path.relpath(root, self.installdir)
# The rest of this uses string comparison. To reduce potential
# confusion, ensure that the "." doesn't show up elsewhere.
#
# TODO(nacl) consider using pathlib here, which will reduce the
# need for path cleverness.
if rel_root_path == '.':
rel_root_path = ''
# TODO(nacl): check for treeartifacts here. If so, prune `dirs`,
# and set the rest aside for future processing.
# Directory ownership tests
if len(files) == 0 and len(dirs) == 0:
# Empty directories must be explicitly requested by something
if rel_root_path not in self.manifest_data:
self.fail("Directory {} not in manifest".format(rel_root_path))
entry = self.manifest_data[rel_root_path]
self.assertEntryTypeMatches(entry, root)
self.assertEntryModeMatches(entry, root)
found_entries[rel_root_path] = True
else:
# There's something in here. Depending on how it was set up, it
# could either be owned or unowned.
if rel_root_path in self.manifest_data:
entry = self.manifest_data[rel_root_path]
self.assertEntryTypeMatches(entry, root)
self.assertEntryModeMatches(entry, root)
found_entries[rel_root_path] = True
else:
# If any unowned directories are here, they must be the
# prefix of some entity in the manifest.
self.assertIn(rel_root_path, unowned_dirs)
for f in files:
# The path on the filesystem in which the file actually exists.
# TODO(#382): This part of the test assumes that the path
# separator is '/', which is not the case in Windows. However,
# paths emitted in the JSON manifests may also be using
# '/'-separated paths.
#
# Confirm the degree to which this is a problem, and remedy as
# needed. It maybe worth setting the keys in the manifest_data
# dictionary to pathlib.Path or otherwise converting them to
# native paths.
fpath = os.path.normpath("/".join([root, f]))
# The path inside the manifest (relative to the install
# destdir).
rel_fpath = os.path.normpath("/".join([rel_root_path, f]))
if rel_fpath not in self.manifest_data:
self.fail("Entity {} not in manifest".format(rel_fpath))
entry = self.manifest_data[rel_fpath]
self.assertEntryTypeMatches(entry, fpath)
self.assertEntryModeMatches(entry, fpath)
found_entries[rel_fpath] = True
# TODO(nacl): check for TreeArtifacts
num_missing = 0
for dest, present in found_entries.items():
if present is False:
print("Entity {} is missing from the tree".format(dest))
num_missing += 1
self.assertEqual(num_missing, 0)
if __name__ == "__main__":
unittest.main()
| 42.670213
| 125
| 0.600349
|
3546262ecba6163926126718585908cd3e9022aa
| 1,063
|
py
|
Python
|
misc/ragout_config.py
|
fomightez/LRSDAY
|
5786d7dc10987671283c6c8c4217ec244a8e915f
|
[
"MIT"
] | 24
|
2017-08-31T19:53:42.000Z
|
2022-03-02T09:28:10.000Z
|
misc/ragout_config.py
|
fomightez/LRSDAY
|
5786d7dc10987671283c6c8c4217ec244a8e915f
|
[
"MIT"
] | 2
|
2019-04-04T16:22:33.000Z
|
2019-04-15T12:53:02.000Z
|
misc/ragout_config.py
|
fomightez/LRSDAY
|
5786d7dc10987671283c6c8c4217ec244a8e915f
|
[
"MIT"
] | 5
|
2019-04-01T18:03:48.000Z
|
2021-07-23T12:45:51.000Z
|
#(c) 2013-2014 by Authors
#This file is a part of Ragout program.
#Released under the BSD license (see LICENSE file)
"""
This module stores some configuration parameters
"""
vals = {
"overlap" :
{
"min_overlap" : 33,
"max_overlap" : 200,
"max_path_len" : 30,
"detect_kmer" : True
},
"maf2synteny" :
[
(30, 500),
(100, 5000),
(500, 50000),
(5000, 500000)
],
"sibelia" :
[
(30, 150),
(100, 500),
(500, 1500)
],
"blocks" :
{
"small" : [5000, 500, 100],
"large" : [10000, 500, 100]
},
"big_genome_threshold" : 500 * 1024 * 1024,
"min_synteny_coverage" : 0.6,
"min_overlap_rate" : 0.5,
"min_scaffold_gap": 5000,
"max_scaffold_gap": 10000
}
| 23.108696
| 55
| 0.388523
|
274334b46cb3b7a8408058aa4d4fd685267d7ba0
| 6,588
|
py
|
Python
|
statsmodels/base/_penalized.py
|
nikhase/statsmodels
|
e1822d4513f442002816bb898ca5794785f35c32
|
[
"BSD-3-Clause"
] | 2
|
2021-01-25T15:50:42.000Z
|
2021-05-26T15:54:56.000Z
|
statsmodels/base/_penalized.py
|
nikhase/statsmodels
|
e1822d4513f442002816bb898ca5794785f35c32
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/base/_penalized.py
|
nikhase/statsmodels
|
e1822d4513f442002816bb898ca5794785f35c32
|
[
"BSD-3-Clause"
] | 1
|
2019-05-15T12:02:20.000Z
|
2019-05-15T12:02:20.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 10 08:23:48 2015
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from ._penalties import NonePenalty
from statsmodels.tools.numdiff import approx_fprime_cs, approx_fprime
class PenalizedMixin(object):
"""Mixin class for Maximum Penalized Likelihood
Parameters
----------
args and kwds for the model super class
penal : None or instance of Penalized function class
If penal is None, then NonePenalty is used.
pen_weight : float or None
factor for weighting the penalization term.
If None, then pen_weight is set to nobs.
TODO: missing **kwds or explicit keywords
TODO: do we adjust the inherited docstrings?
We would need templating to add the penalization parameters
"""
def __init__(self, *args, **kwds):
# pop extra kwds before calling super
self.penal = kwds.pop('penal', None)
self.pen_weight = kwds.pop('pen_weight', None)
super(PenalizedMixin, self).__init__(*args, **kwds)
# TODO: define pen_weight as average pen_weight? i.e. per observation
# I would have prefered len(self.endog) * kwds.get('pen_weight', 1)
# or use pen_weight_factor in signature
if self.pen_weight is None:
self.pen_weight = len(self.endog)
if self.penal is None:
# unpenalized by default
self.penal = NonePenalty()
self.pen_weight = 0
self._init_keys.extend(['penal', 'pen_weight'])
self._null_drop_keys = getattr(self, '_null_drop_keys', [])
self._null_drop_keys.extend(['penal'])
def loglike(self, params, pen_weight=None, **kwds):
if pen_weight is None:
pen_weight = self.pen_weight
llf = super(PenalizedMixin, self).loglike(params, **kwds)
if pen_weight != 0:
llf -= pen_weight * self.penal.func(params)
return llf
def loglikeobs(self, params, pen_weight=None, **kwds):
if pen_weight is None:
pen_weight = self.pen_weight
llf = super(PenalizedMixin, self).loglikeobs(params, **kwds)
nobs_llf = float(llf.shape[0])
if pen_weight != 0:
llf -= pen_weight / nobs_llf * self.penal.func(params)
return llf
def score_numdiff(self, params, pen_weight=None, method='fd', **kwds):
"""score based on finite difference derivative
"""
if pen_weight is None:
pen_weight = self.pen_weight
loglike = lambda p: self.loglike(p, pen_weight=pen_weight, **kwds)
if method == 'cs':
return approx_fprime_cs(params, loglike)
elif method == 'fd':
return approx_fprime(params, loglike, centered=True)
else:
raise ValueError('method not recognized, should be "fd" or "cs"')
def score(self, params, pen_weight=None, **kwds):
if pen_weight is None:
pen_weight = self.pen_weight
sc = super(PenalizedMixin, self).score(params, **kwds)
if pen_weight != 0:
sc -= pen_weight * self.penal.grad(params)
return sc
def score_obs(self, params, pen_weight=None, **kwargs):
if pen_weight is None:
pen_weight = self.pen_weight
sc = super(PenalizedMixin, self).score_obs(params, **kwargs)
nobs_sc = float(sc.shape[0])
if pen_weight != 0:
sc -= pen_weight / nobs_sc * self.penal.grad(params)
return sc
def hessian_numdiff(self, params, pen_weight=None, **kwds):
"""hessian based on finite difference derivative
"""
if pen_weight is None:
pen_weight = self.pen_weight
loglike = lambda p: self.loglike(p, pen_weight=pen_weight, **kwds)
from statsmodels.tools.numdiff import approx_hess
return approx_hess(params, loglike)
def hessian(self, params, pen_weight=None, **kwds):
if pen_weight is None:
pen_weight = self.pen_weight
hess = super(PenalizedMixin, self).hessian(params, **kwds)
if pen_weight != 0:
h = self.penal.deriv2(params)
if h.ndim == 1:
hess -= np.diag(pen_weight * h)
else:
hess -= pen_weight * h
return hess
def fit(self, method=None, trim=None, **kwds):
"""minimize negative penalized log-likelihood
Parameters
----------
method : None or str
Method specifies the scipy optimizer as in nonlinear MLE models.
trim : Boolean or float
Default is False or None, which uses no trimming.
If trim is True or a float, then small parameters are set to zero.
If True, then a default threshold is used. If trim is a float, then
it will be used as threshold.
The default threshold is currently 1e-4, but it will change in
future and become penalty function dependent.
kwds : extra keyword arguments
This keyword arguments are treated in the same way as in the
fit method of the underlying model class.
Specifically, additional optimizer keywords and cov_type related
keywords can be added.
"""
# If method is None, then we choose a default method ourselves
# TODO: temporary hack, need extra fit kwds
# we need to rule out fit methods in a model that will not work with
# penalization
if hasattr(self, 'family'): # assume this identifies GLM
kwds.update({'max_start_irls' : 0})
# currently we use `bfgs` by default
if method is None:
method = 'bfgs'
if trim is None:
trim = False
res = super(PenalizedMixin, self).fit(method=method, **kwds)
if trim is False:
# note boolean check for "is False", not "False_like"
return res
else:
if trim is True:
trim = 1e-4 # trim threshold
# TODO: make it penal function dependent
# temporary standin, only checked for Poisson and GLM,
# and is computationally inefficient
drop_index = np.nonzero(np.abs(res.params) < trim)[0]
keep_index = np.nonzero(np.abs(res.params) > trim)[0]
if drop_index.any():
# TODO: do we need to add results attributes?
res_aux = self._fit_zeros(keep_index, **kwds)
return res_aux
else:
return res
| 33.441624
| 79
| 0.604129
|
e18e0cd8f629a27e3ff3a442cea6d5b57bd59852
| 1,478
|
py
|
Python
|
src/sctools/test/test_platform.py
|
amcnicho/sctools
|
9d09540b18abdbebece111d999770baa5837c728
|
[
"BSD-3-Clause"
] | 20
|
2018-07-08T01:52:45.000Z
|
2022-03-23T02:39:15.000Z
|
src/sctools/test/test_platform.py
|
amcnicho/sctools
|
9d09540b18abdbebece111d999770baa5837c728
|
[
"BSD-3-Clause"
] | 68
|
2017-10-31T02:50:27.000Z
|
2022-01-31T18:17:36.000Z
|
src/sctools/test/test_platform.py
|
amcnicho/sctools
|
9d09540b18abdbebece111d999770baa5837c728
|
[
"BSD-3-Clause"
] | 3
|
2017-11-17T20:13:35.000Z
|
2020-09-08T20:39:55.000Z
|
import os
import tempfile
import pysam
from .. import platform
data_dir = os.path.split(__file__)[0] + "/data/"
def test_attach_barcodes():
"""High-level test of the AttachBarcodes command"""
temp_dir_name = tempfile.mkdtemp()
# Construct cli arguments to pass to the command
temp_output_bam = temp_dir_name + "output.bam"
args = [
"--r1",
data_dir + "test_r1.fastq",
"--u2",
data_dir + "test_r2.bam",
"--i1",
data_dir + "test_i1.fastq",
"--o",
temp_output_bam,
"--sample-barcode-start-pos",
"0",
"--sample-barcode-length",
"8",
"--cell-barcode-start-pos",
"0",
"--cell-barcode-length",
"16",
"--molecule-barcode-start-pos",
"16",
"--molecule-barcode-length",
"4",
]
platform.BarcodePlatform.attach_barcodes(args)
with pysam.AlignmentFile(temp_output_bam, "rb", check_sq=False) as samfile:
for read in samfile:
tag_cr = read.get_tag("CR")
tag_cy = read.get_tag("CY")
tag_ur = read.get_tag("UR")
tag_uy = read.get_tag("UY")
tag_sr = read.get_tag("SR")
tag_sy = read.get_tag("SY")
assert len(tag_cr) == 16
assert len(tag_cy) == 16
assert len(tag_ur) == 4
assert len(tag_uy) == 4
assert len(tag_sr) == 8
assert len(tag_sy) == 8
| 25.929825
| 79
| 0.535859
|
d65495ad2f9f2caf004edb898a8bb82ca26dd70b
| 1,396
|
py
|
Python
|
test/D/HSTeoh/sconstest-singleStringCannotBeMultipleOptions_gdc.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 1
|
2020-03-21T05:24:47.000Z
|
2020-03-21T05:24:47.000Z
|
test/D/HSTeoh/sconstest-singleStringCannotBeMultipleOptions_gdc.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 4
|
2019-04-11T16:27:45.000Z
|
2019-04-11T23:56:30.000Z
|
test/D/HSTeoh/sconstest-singleStringCannotBeMultipleOptions_gdc.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 2
|
2018-01-16T11:29:16.000Z
|
2020-05-13T16:48:26.000Z
|
"""
Test compiling and executing using the gdc tool.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
from Common.singleStringCannotBeMultipleOptions import testForTool
testForTool('gdc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 36.736842
| 73
| 0.777221
|
e6a25c39da49ffdba90215a590f01308ae477ea8
| 336
|
py
|
Python
|
graphdatascience/query_runner/query_runner.py
|
soerenreichardt/graph-data-science-client
|
845c79cabdb21d57b590d51e4ddad0fa7a1caeab
|
[
"Apache-2.0"
] | null | null | null |
graphdatascience/query_runner/query_runner.py
|
soerenreichardt/graph-data-science-client
|
845c79cabdb21d57b590d51e4ddad0fa7a1caeab
|
[
"Apache-2.0"
] | null | null | null |
graphdatascience/query_runner/query_runner.py
|
soerenreichardt/graph-data-science-client
|
845c79cabdb21d57b590d51e4ddad0fa7a1caeab
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import Any, Dict, List
Row = Dict[str, Any]
QueryResult = List[Row]
class QueryRunner(ABC):
@abstractmethod
def run_query(self, query: str, params: Dict[str, Any] = {}) -> QueryResult:
pass
@abstractmethod
def set_database(self, db: str) -> None:
pass
| 21
| 80
| 0.660714
|
6a9164f027f4d48ab2ab525f9141b35aa1a65d95
| 26,892
|
py
|
Python
|
src/translate/model.py
|
risa2000/pyopenvr
|
6c153f986d46b61a3e136c144011695841b56502
|
[
"BSD-3-Clause"
] | null | null | null |
src/translate/model.py
|
risa2000/pyopenvr
|
6c153f986d46b61a3e136c144011695841b56502
|
[
"BSD-3-Clause"
] | null | null | null |
src/translate/model.py
|
risa2000/pyopenvr
|
6c153f986d46b61a3e136c144011695841b56502
|
[
"BSD-3-Clause"
] | null | null | null |
from clang.cindex import TypeKind
import inspect
import re
import textwrap
class Declaration(object):
def __init__(self, name, docstring=None):
self.name = name
self.docstring = docstring
def __str__(self):
return f'{self.name}'
class FunctionBase(Declaration):
def __init__(self, name, type_=None, docstring=None):
super().__init__(name=name, docstring=docstring)
self.type = type_
self.parameters = []
self._count_parameter_names = set()
def __str__(self):
return self.ctypes_string()
def add_parameter(self, parameter):
# Tag count parameters
m = parameter.is_array()
if m:
self._count_parameter_names.add(m.group(1))
if parameter.name in self._count_parameter_names:
parameter.is_count = True
if parameter.name in ('punRequiredBufferSize', ):
parameter.is_required_count = True # getRuntimePath()
self.parameters.append(parameter)
def annotate_parameters(self):
for pix, p in enumerate(self.parameters):
if p.is_output_string():
len_param = self.parameters[pix + 1]
len_param.is_count = True
if p.is_struct_size():
if pix > 0:
sized_param = self.parameters[pix - 1]
t = sized_param.type.get_pointee().spelling
t = translate_type(t)
p.always_value = f'sizeof({t})'
def ctypes_string(self, in_params=()):
in_params = list(in_params)
self.annotate_parameters()
call_params = []
out_params = []
if self.has_return() and not self.raise_error_code():
param = 'result'
if self.returns_const_string():
param = f"{param}.decode('utf-8')"
out_params.append(param)
pre_call_statements = ''
post_call_statements = ''
for p in self.parameters:
if p.input_param_name():
in_params.append(p.input_param_name())
if p.call_param_name():
call_params.append(p.call_param_name())
if p.return_param_name():
out_params.append(p.return_param_name())
pre_call_statements += p.pre_call_block()
post_call_statements += p.post_call_block()
# Handle output strings
for pix, p in enumerate(self.parameters):
if p.is_output_string():
len_param = self.parameters[pix + 1]
if len_param.is_struct_size():
len_param = self.parameters[pix + 2]
len_param.is_count = True
call_params0 = []
# Treat VR_GetRuntimePath specially...
initial_buffer_size = 0
length_is_retval = True
required_len_param = None
if len(self.parameters) >= 3 and self.parameters[2].name == 'punRequiredBufferSize':
initial_buffer_size = 1
length_is_retval = False
required_len_param = self.parameters[2]
if initial_buffer_size > 0:
pre_call_statements += f'{p.py_name} = ctypes.create_string_buffer({initial_buffer_size})\n'
for p2 in self.parameters:
if p2 is p:
if initial_buffer_size == 0:
call_params0.append('None')
else:
call_params0.append(p2.call_param_name())
elif p2 is len_param:
call_params0.append(str(initial_buffer_size))
elif p2.call_param_name():
call_params0.append(p2.call_param_name())
param_list = ', '.join(call_params0)
if length_is_retval:
pre_call_statements += textwrap.dedent(f'''\
{len_param.py_name} = fn({param_list})
''')
error_category = None
error_param_name = 'error'
if not self.raise_error_code():
for p2 in self.parameters:
if p2.is_error():
assert p2.type.kind == TypeKind.POINTER
pt = p2.type.get_pointee()
error_category = translate_error_category(pt)
break
if error_category is not None:
pre_call_statements += textwrap.dedent(f'''\
try:
{error_category}.check_error_value(error.value)
except openvr.error_code.BufferTooSmallError:
pass
''')
pre_call_statements += textwrap.dedent(f'''\
{p.py_name} = ctypes.create_string_buffer({len_param.py_name})
''')
else: # getRuntimePath()
pre_call_statements += textwrap.dedent(f'''\
fn({param_list})
{len_param.py_name} = {required_len_param.py_name}.value
{p.py_name} = ctypes.create_string_buffer({len_param.py_name})
''')
param_list1 = ', '.join(in_params)
# pythonically downcase first letter of method name
result_annotation = ''
if len(out_params) == 0:
result_annotation = ' -> None'
method_string = f'def {self.py_method_name()}({param_list1}){result_annotation}:\n'
body_string = ''
if self.docstring:
body_string += f'"""{self.docstring}"""\n'
body_string += f'fn = {self.inner_function_name()}\n'
body_string += pre_call_statements
param_list2 = ', '.join(call_params)
if self.raise_error_code():
body_string += f'error = fn({param_list2})'
elif self.has_return():
body_string += f'result = fn({param_list2})'
else:
body_string += f'fn({param_list2})'
if self.raise_error_code():
error_category = translate_error_category(self.type)
post_call_statements += f'\n{error_category}.check_error_value(error)'
body_string += post_call_statements
if self.py_method_name() == 'pollNextEvent':
body_string += '\nreturn result != 0' # Custom return statement
elif len(out_params) > 0:
results = ', '.join(out_params)
body_string += f'\nreturn {results}'
body_string = textwrap.indent(body_string, ' '*4)
method_string += body_string
return method_string
def has_return(self):
if self.type.spelling == 'void':
return False
for p in self.parameters:
if p.is_output_string():
return False
return True
def inner_function_name(self):
return f'self.function_table.{self.py_method_name()}'
def py_method_name(self):
n = self.name
if n.startswith('VR_'):
n = n[3:]
return n[0].lower() + n[1:]
def raise_error_code(self):
return re.match(r'(?:vr::)?E\S+Error$', self.type.spelling)
def returns_const_string(self):
if not self.type.kind == TypeKind.POINTER:
return False
pt = self.type.get_pointee()
if not pt.is_const_qualified():
return False
return pt.kind == TypeKind.CHAR_S
class COpenVRContext(Declaration):
def __init__(self, name, docstring=None):
super().__init__(name=name, docstring=docstring)
self.vr_member_names = []
self.vr_method_names = []
def __str__(self):
docstring = ''
if self.docstring:
docstring = textwrap.indent(f'\n"""{self.docstring}"""\n', ' '*16)
name = translate_type(self.name)
class_string = textwrap.dedent(f'''\
class {name}(object):{docstring}
def __init__(self):
''')
for m in self.vr_member_names:
class_string += ' '*8 + f'self.{m} = None\n'
class_string += textwrap.indent(textwrap.dedent(f'''\
def checkClear(self):
global _vr_token
if _vr_token != getInitToken():
self.clear()
_vr_token = getInitToken()
def clear(self):
'''), ' '*4)
for m in self.vr_member_names:
class_string += ' '*8 + f'self.{m} = None\n'
class_string += '\n'
for m in self.vr_method_names:
method_string = textwrap.dedent(f'''\
def {m}(self):
self.checkClear()
if self.m_p{m} is None:
self.m_p{m} = I{m}()
return self.m_p{m}
''')
class_string += textwrap.indent(method_string, ' '*4)
class_string += textwrap.dedent(f'''\
# Globals for context management
_vr_token = None
_internal_module_context = COpenVRContext()
''')
for m in self.vr_method_names:
method_string = textwrap.dedent(f'''\
def {m}():
return _internal_module_context.{m}()
''')
class_string += method_string
return class_string
def add_vr_member_name(self, name):
self.vr_member_names.append(name)
def add_vr_method_name(self, name):
self.vr_method_names.append(name)
class IVRClass(Declaration):
def __init__(self, name, docstring=None):
super().__init__(name=name, docstring=docstring)
self.base = 'object'
self.methods = []
def __str__(self):
docstring = ''
if self.docstring:
docstring = textwrap.indent(f'\n"""{self.docstring}"""\n', ' '*16)
name = translate_type(self.name)
methods = 'pass'
fn_table_methods = ''
if len(self.methods) > 0:
methods = '\n'
for method in self.methods:
methods += textwrap.indent(str(method), 16*' ') + '\n\n'
fn_table_methods += '\n' + ' '*20 + f'{method.ctypes_fntable_string()}'
return inspect.cleandoc(f'''
class {name}_FnTable(Structure):
_fields_ = [{fn_table_methods}
]
class {name}({self.base}):{docstring}
def __init__(self):
version_key = {name}_Version
_checkInterfaceVersion(version_key)
fn_key = 'FnTable:' + version_key
fn_type = {name}_FnTable
fn_table_ptr = cast(getGenericInterface(fn_key), POINTER(fn_type))
if fn_table_ptr is None:
raise OpenVRError("Error retrieving VR API for {name}")
self.function_table = fn_table_ptr.contents\n{methods}
''')
def add_method(self, method):
self.methods.append(method)
class ConstantDeclaration(Declaration):
def __init__(self, name, value, docstring=None):
super().__init__(name=name, docstring=docstring)
self.value = value
def __str__(self):
docstring = ''
if self.docstring:
docstring = f' # {self.docstring}'
return f'{self.name} = {self.value}{docstring}'
class EnumDecl(Declaration):
def __init__(self, name, docstring=None):
super().__init__(name=name, docstring=docstring)
self.constants = []
def add_constant(self, constant):
self.constants.append(constant)
def __str__(self):
result = f'{self.name} = ENUM_TYPE'
for c in self.constants:
result += f'\n{c}'
return result
class EnumConstant(Declaration):
def __init__(self, name, value, docstring=None):
super().__init__(name=name, docstring=docstring)
self.value = value
def __str__(self):
return f'{self.name} = ENUM_VALUE_TYPE({self.value})'
class Function(FunctionBase):
def inner_function_name(self):
return f'_openvr.{self.name}'
def ctypes_string(self):
restype = translate_type(self.type.spelling)
param_types = []
for p in self.parameters:
param_types.append(translate_type(p.type.spelling))
arg_types = ', '.join(param_types)
result = textwrap.dedent(f'''\
_openvr.{self.name}.restype = {restype}
_openvr.{self.name}.argtypes = [{arg_types}]
''')
result += super().ctypes_string()
return result
class Method(FunctionBase):
def ctypes_fntable_string(self):
method_name = self.name[0].lower() + self.name[1:]
param_list = [translate_type(self.type.spelling), ]
for p in self.parameters:
param_list.append(translate_type(p.type.spelling))
params = ', '.join(param_list)
result = f'("{method_name}", OPENVR_FNTABLE_CALLTYPE({params})),'
return result
def ctypes_string(self):
return super().ctypes_string(in_params=['self', ])
class Parameter(Declaration):
def __init__(self, name, type_, default_value=None, docstring=None, annotation=None):
super().__init__(name=name, docstring=docstring)
self.type = type_
self.always_value = None
self.default_value = default_value
self.annotation = annotation
self.is_count = False
self.is_required_count = False
self.py_name = self.get_py_name(self.name)
@staticmethod
def get_py_name(c_name):
result = c_name
match = re.match(r'^[a-z]{1,5}([A-Z].*)$', result)
if match: # strip initial hungarian prefix
n = match.group(1)
result = n[0].lower() + n[1:] # convert first character to lower case
if result in ('bytes', 'from', 'property', 'type'): # avoid python keywords
result += '_'
return result
def is_array(self):
if not self.annotation:
return False
return re.match(r'array_count:(\S+);', self.annotation)
def is_error(self):
if self.type.kind != TypeKind.POINTER:
return False
t = translate_type(self.type.get_pointee().spelling)
if re.match(r'^(vr::)?E\S+Error$', t):
return True
return False
def is_input_string(self):
if not self.type.kind == TypeKind.POINTER:
return False
pt = self.type.get_pointee()
if not pt.is_const_qualified():
return False
return pt.kind == TypeKind.CHAR_S
def is_float(self):
return self.type.kind in (
TypeKind.FLOAT,
TypeKind.DOUBLE,
TypeKind.LONGDOUBLE,
TypeKind.FLOAT128,
)
def is_int(self):
return self.type.kind in (
TypeKind.USHORT,
TypeKind.UINT,
TypeKind.ULONG,
TypeKind.ULONGLONG,
TypeKind.UINT128,
TypeKind.SHORT,
TypeKind.INT,
TypeKind.LONG,
TypeKind.LONGLONG,
TypeKind.INT128,
)
def is_output_string(self):
if not self.annotation:
return False
return str(self.annotation) == 'out_string: ;'
def is_output(self):
if self.is_count:
return False
if self.is_required_count:
return False
if not self.type.kind == TypeKind.POINTER:
return False
pt = self.type.get_pointee()
if pt.is_const_qualified():
return False
if pt.kind == TypeKind.VOID:
return False
if pt.kind == TypeKind.POINTER:
return True # pointer to pointer
if pt.spelling == 'vr::RenderModel_t': # IVRRenderModels.freeRenderModel()
return False
if pt.spelling == 'vr::RenderModel_TextureMap_t': # IVRRenderModels.freeTexture()
return False
return True
def is_input(self):
if self.is_count:
return False
if self.is_required_count:
return False
elif self.is_array():
return True
elif self.name == 'pEvent':
return True
elif self.always_value is not None:
return False
elif not self.is_output():
return True
else:
return False # TODO:
def is_struct_size(self):
if self.is_count:
return False
if self.type.kind not in (TypeKind.TYPEDEF, ):
return False
if self.name.startswith('unSizeOf'):
return True
if self.name in ('uncbVREvent', ):
return True
if not self.name.endswith('Size'):
return False
if self.default_value is not None:
return False
if self.name.endswith('BufferSize'):
return False
if self.name.endswith('CompressedSize'):
return False
if self.name.endswith('ElementSize'):
return False
return True
def pre_call_block(self):
m = self.is_array()
if m:
result = ''
count_param = m.group(1)
count_param = self.get_py_name(count_param)
element_t = translate_type(self.type.get_pointee().spelling)
is_pose_array = False
if re.match(r'^trackedDevice.*Count$', count_param):
is_pose_array = True
if re.match(r'^\S+PoseArrayCount$', count_param):
is_pose_array = True
default_length = 1
if is_pose_array:
default_length = 'k_unMaxTrackedDeviceCount'
result += textwrap.dedent(f'''\
if {self.py_name} is None:
{self.py_name}Arg = None
{count_param} = 0
elif isinstance({self.py_name}, ctypes.Array):
{self.py_name}Arg = byref({self.py_name}[0])
{count_param} = len({self.py_name})
else:
{self.py_name} = ({element_t} * {default_length})()
{self.py_name}Arg = byref({self.py_name}[0])
{count_param} = {default_length}
''')
return result
elif self.is_output_string():
return ''
elif self.is_count:
return ''
elif self.always_value is not None:
return f'{self.py_name} = {self.always_value}\n'
elif not self.is_input():
t = translate_type(self.type.get_pointee().spelling)
return f'{self.py_name} = {t}()\n'
elif self.is_input_string():
result = textwrap.dedent(f'''\
if {self.py_name} is not None:
{self.py_name} = bytes({self.py_name}, encoding='utf-8')
''')
return result
else:
return ''
def post_call_block(self):
result = ''
if self.is_error():
assert self.type.kind == TypeKind.POINTER
pt = self.type.get_pointee()
error_category = translate_error_category(pt)
result += f'\n{error_category}.check_error_value({self.py_name}.value)'
if self.is_output() and self.type.kind == TypeKind.POINTER:
pt = self.type.get_pointee()
if pt.kind == TypeKind.POINTER:
pt2 = pt.get_pointee()
if pt2.spelling.endswith('_t'):
n = self.py_name
result += textwrap.dedent(f'''\
if {n}:
{n} = {n}.contents
else:
{n} = None''')
return result
def input_param_name(self):
if not self.is_input():
return None
n = self.py_name
has_type_annotation = False
if self.is_input_string():
n = f'{n}: str'
has_type_annotation = True
elif self.is_int():
n = f'{n}: int'
has_type_annotation = True
elif self.is_float():
n = f'{n}: float'
has_type_annotation = True
if self.default_value:
if has_type_annotation:
n = f'{n} = {self.default_value}'
else:
n = f'{n}={self.default_value}'
return n
def call_param_name(self):
if self.is_array():
return f'{self.py_name}Arg'
elif self.is_count:
return self.py_name
elif self.is_output_string():
return self.py_name
elif self.is_output():
return f'byref({self.py_name})'
elif self.type.kind == TypeKind.POINTER:
ptk = self.type.get_pointee().kind
if ptk == TypeKind.CHAR_S:
return self.py_name
else:
return f'byref({self.py_name})'
else:
return self.py_name
def return_param_name(self):
if self.is_error():
return None
if self.is_output_string():
return f"bytes({self.py_name}.value).decode('utf-8')"
if not self.is_output():
return None
result = self.py_name
pt0 = self.type.get_pointee()
extract_value = False
if pt0.kind == TypeKind.TYPEDEF and pt0.spelling.endswith('Handle_t'):
extract_value = True
pt = translate_type(pt0.spelling)
if pt.startswith('c_'):
extract_value = True
if extract_value:
result += '.value'
return result
class Struct(Declaration):
def __init__(self, name, docstring=None):
if name == 'vr::VRControllerState001_t':
name = 'VRControllerState_t'
super().__init__(name=name, docstring=docstring)
self.fields = []
self.base = None
if name == 'VRControllerState_t':
self.base = 'PackHackStructure'
if name == 'vr::VREvent_t':
self.base = 'PackHackStructure'
def add_field(self, field):
self.fields.append(field)
def __str__(self):
docstring = ''
if self.docstring:
docstring = textwrap.indent(f'\n"""{self.docstring}"""\n', ' '*16)
fields = ''
for f in self.fields:
fields = fields + f'''
{f}'''
name = translate_type(self.name)
base = 'Structure'
if self.base is not None:
base = translate_type(self.base)
if name.startswith('HmdMatrix'):
base = f'_MatrixMixin, {base}'
if name.startswith('HmdVector'):
base = f'_VectorMixin, {base}'
return inspect.cleandoc(f'''
class {name}({base}):{docstring}
_fields_ = [{fields}
]
''')
class StructureForwardDeclaration(Declaration):
def __str__(self):
return inspect.cleandoc(f'''
class {self.name}(Structure):
pass
''')
class StructField(Declaration):
def __init__(self, name, type_, docstring=None):
super().__init__(name=name, docstring=docstring)
self.type = type_
def __str__(self):
type_name = translate_type(self.type)
return f'("{self.name}", {type_name}),'
class Typedef(Declaration):
def __init__(self, alias, original, docstring=None):
super().__init__(name=alias, docstring=docstring)
self.original = original
def __str__(self):
orig = translate_type(self.original)
if self.name == orig:
return ''
return f'{self.name} = {orig}'
def translate_error_category(type_):
error_category = type_.spelling
assert error_category.endswith('Error')
if error_category.startswith('vr::EVR'):
error_category = error_category[7:]
elif error_category.startswith('vr::E'):
error_category = error_category[5:]
else:
assert False
return f'openvr.error_code.{error_category}'
def translate_type(type_name, bracket=False):
"""
Convert c++ type name to ctypes type name
# TODO: move to ctypes generator
"""
# trim space characters
result = type_name.strip()
result = re.sub(r'\bconst\s+', '', result)
result = re.sub(r'\s+const\b', '', result)
result = re.sub(r'\bstruct\s+', '', result)
result = re.sub(r'\benum\s+', '', result)
result = re.sub(r'\bunion\s+', '', result)
# no implicit int
if result == 'unsigned':
result = 'unsigned int'
# abbreviate type for ctypes
result = re.sub(r'8_t\b', '8', result)
result = re.sub(r'16_t\b', '16', result)
result = re.sub(r'32_t\b', '32', result)
result = re.sub(r'64_t\b', '64', result)
result = re.sub(r'\bunsigned\s+', 'u', result) # unsigned int -> uint
if re.match(r'^\s*(?:const\s+)?char\s*\*\s*$', result):
result = 'c_char_p'
result = re.sub(r'\blong\s+long\b', 'longlong', result)
# prepend 'c_' for ctypes
if re.match(r'^(float|u?int|double|u?char|u?short|u?long)', result):
result = f'c_{result}'
# remove leading "VR_"
result = re.sub(r'\bVR_', '', result)
m = re.match(r'^([^*]+\S)\s*[*&](.*)$', result)
while m: # # HmdStruct* -> POINTER(HmdStruct)
pointee_type = translate_type(m.group(1))
result = f'POINTER({pointee_type}){m.group(2)}'
m = re.match(r'^([^*]+\S)\s*[*&](.*)$', result)
# translate pointer type "ptr"
m = re.match(r'^([^*]+)ptr(?:_t)?(.*)$', result)
while m: # uintptr_t -> POINTER(c_uint)
pointee_type = translate_type(m.group(1))
result = f'POINTER({pointee_type}){m.group(2)}'
m = re.match(r'^([^*]+)ptr(?:_t)?(.*)$', result)
if result == 'void':
result = 'None'
if result == 'POINTER(None)':
result = 'c_void_p'
result = re.sub(r'\bbool\b', 'openvr_bool', result)
# e.g. vr::HmdMatrix34_t -> HmdMatrix34_t
if result.startswith('vr::'):
result = result[4:]
# e.g. float[3] -> c_float * 3
m = re.match(r'^([^\[]+\S)\s*\[(\d+)\](.*)$', result)
if m:
t = f'{m.group(1)}{m.group(3)}' # in case there are more dimensions
t = translate_type(t, bracket=True)
result = f'{t} * {m.group(2)}'
if bracket:
result = f'({result})' # multiple levels of arrays
return result
| 35.524439
| 112
| 0.541313
|
250c3a57951ba0928d914fa43261e3a4dd7be040
| 30,733
|
py
|
Python
|
sqlglot/generator.py
|
RobinL/sqlglot
|
7ec1022ac4c1fbaeb44e47d5f187a78e5c14735a
|
[
"MIT"
] | null | null | null |
sqlglot/generator.py
|
RobinL/sqlglot
|
7ec1022ac4c1fbaeb44e47d5f187a78e5c14735a
|
[
"MIT"
] | null | null | null |
sqlglot/generator.py
|
RobinL/sqlglot
|
7ec1022ac4c1fbaeb44e47d5f187a78e5c14735a
|
[
"MIT"
] | null | null | null |
import logging
import sqlglot.expressions as exp
from sqlglot.errors import ErrorLevel, UnsupportedError
from sqlglot.helper import apply_index_offset, csv, ensure_list
from sqlglot.time import format_time
from sqlglot.tokens import Tokenizer
logger = logging.getLogger("sqlglot")
class Generator:
"""
Generator interprets the given syntax tree and produces a SQL string as an output.
Args
transforms (dict): the dictionary of custom transformations in which key
represents the expression type and the value is a function which defines
how the given expression type should be rendered.
type_mapping (dict): the dictionary of custom type mappings in which the key
represents the data type (:class:`~sqlglot.expressions.DataType.Type`) and
the value is its SQL string representation.
time_mapping (dict): the dictionary of custom time mappings in which the key
represents a python time format and the output the target time format
time_trie (trie): a trie of the time_mapping keys
pretty (bool): if set to True the returned string will be formatted. Default: False.
identifier (str): specifies which character to use to delimit identifiers. Default: ".
identify (bool): if set to True all identifiers will be delimited by the corresponding
character.
quote (str): specifies a character which should be treated as a quote (eg. to delimit
literals). Default: '.
escape (str): specifies an escape character. Default: '.
pad (int): determines padding in a formatted string. Default: 2.
indent (int): determines the size of indentation in a formatted string. Default: 4.
unsupported_level (ErrorLevel): determines the generator's behavior when it encounters
unsupported expressions. Default ErrorLevel.WARN.
"""
TRANSFORMS = {
exp.DateAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e, 'unit')})",
exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
exp.TsOrDsAdd: lambda self, e: f"TS_OR_DS_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e, 'unit')})",
}
__slots__ = (
"transforms",
"type_mapping",
"time_mapping",
"time_trie",
"pretty",
"configured_pretty",
"identifier",
"identify",
"quote",
"escape",
"pad",
"index_offset",
"unsupported_level",
"unsupported_messages",
"_indent",
"_level",
)
def __init__(
self,
transforms=None,
type_mapping=None,
time_mapping=None,
time_trie=None,
pretty=None,
identifier=None,
identify=False,
quote=None,
escape=None,
pad=2,
indent=4,
index_offset=0,
unsupported_level=ErrorLevel.WARN,
):
# pylint: disable=too-many-arguments
import sqlglot
self.transforms = {**self.TRANSFORMS, **(transforms or {})}
self.type_mapping = type_mapping or {}
self.time_mapping = time_mapping or {}
self.time_trie = time_trie
self.pretty = pretty if pretty is not None else sqlglot.pretty
self.configured_pretty = self.pretty
self.identifier = identifier or '"'
self.identify = identify
self.quote = quote or "'"
self.escape = escape or "'"
self.pad = pad
self.index_offset = index_offset
self.unsupported_level = unsupported_level
self.unsupported_messages = []
self._indent = indent
self._level = 0
def generate(self, expression):
"""
Generates a SQL string by interpreting the given syntax tree.
Args
expression (Expression): the syntax tree.
Returns
the SQL string.
"""
self.unsupported_messages = []
sql = self.sql(expression).strip()
if self.unsupported_level == ErrorLevel.IGNORE:
return sql
for msg in self.unsupported_messages:
if self.unsupported_level == ErrorLevel.RAISE:
raise UnsupportedError(msg)
logger.warning(msg)
return sql
def unsupported(self, message):
self.unsupported_messages.append(message)
def indent(self, sql, level=None, pad=0):
level = self._level if level is None else level
if self.pretty:
sql = f"{' ' * (level * self._indent + pad)}{sql}"
return sql
def sep(self, sep=" "):
return f"{sep.strip()}\n" if self.pretty else sep
def seg(self, sql, sep=" ", level=None, pad=0):
return f"{self.sep(sep)}{self.indent(sql, level=level, pad=pad)}"
def properties(self, name, expression):
if expression.args["expressions"]:
return f"{self.seg(name)} ({self.sep('')}{self.expressions(expression)}{self.sep('')})"
return ""
def wrap(self, expression):
self._level += 1
this_sql = self.indent(self.sql(expression, "this"))
self._level -= 1
return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}"
def no_format(self, func):
original = self.pretty
self.pretty = False
result = func()
self.pretty = original
return result
def no_identify(self, func):
original = self.identify
self.identify = False
result = func()
self.identify = original
return result
def indent_newlines(self, sql, skip_first=False):
if not self.pretty:
return sql
return "\n".join(
line if skip_first and i == 0 else self.indent(line, pad=self.pad)
for i, line in enumerate(sql.split("\n"))
)
def sql(self, expression, key=None):
if not expression:
return ""
if isinstance(expression, str):
return expression
if key:
return self.sql(expression.args.get(key))
transform = self.transforms.get(expression.__class__)
if callable(transform):
return transform(self, expression)
if transform:
return transform
if not isinstance(expression, exp.Expression):
raise ValueError(
f"Expected an Expression. Received {type(expression)}: {expression}"
)
exp_handler_name = f"{expression.key}_sql"
if hasattr(self, exp_handler_name):
return getattr(self, exp_handler_name)(expression)
if isinstance(expression, exp.Func):
return self.function_fallback_sql(expression)
raise ValueError(f"Unsupported expression type {expression.__class__.__name__}")
def annotation_sql(self, expression):
return self.sql(expression, "expression")
def uncache_sql(self, expression):
table = self.sql(expression, "this")
exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
return f"UNCACHE TABLE{exists_sql} {table}"
def cache_sql(self, expression):
lazy = " LAZY" if expression.args.get("lazy") else ""
table = self.sql(expression, "this")
options = expression.args.get("options")
options = (
f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})"
if options
else ""
)
sql = self.sql(expression, "expression")
sql = f" AS{self.sep()}{sql}" if sql else ""
sql = f"CACHE{lazy} TABLE {table}{options}{sql}"
return self.prepend_ctes(expression, sql)
def characterset_sql(self, expression):
default = "DEFAULT " if expression.args.get("default") else ""
return f"{default}CHARACTER SET={self.sql(expression, 'this')}"
def column_sql(self, expression):
fields = expression.args.get("fields")
if fields:
return ".".join(self.sql(field) for field in fields)
return ".".join(
part
for part in [
self.sql(expression, "db"),
self.sql(expression, "table"),
self.sql(expression, "this"),
]
if part
)
def columndef_sql(self, expression):
column = self.sql(expression, "this")
kind = self.sql(expression, "kind")
not_null = " NOT NULL" if expression.args.get("not_null") else ""
default = self.sql(expression, "default")
default = f" DEFAULT {default}" if default else ""
auto_increment = (
" AUTO_INCREMENT" if expression.args.get("auto_increment") else ""
)
collate = self.sql(expression, "collate")
collate = f" COLLATE {collate}" if collate else ""
comment = self.sql(expression, "comment")
comment = f" COMMENT {comment}" if comment else ""
primary = " PRIMARY KEY" if expression.args.get("primary") else ""
return f"{column} {kind}{not_null}{default}{collate}{auto_increment}{comment}{primary}"
def create_sql(self, expression):
this = self.sql(expression, "this")
kind = self.sql(expression, "kind").upper()
expression_sql = self.sql(expression, "expression")
expression_sql = f"AS{self.sep()}{expression_sql}" if expression_sql else ""
temporary = " TEMPORARY" if expression.args.get("temporary") else ""
replace = " OR REPLACE" if expression.args.get("replace") else ""
exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else ""
properties = self.sql(expression, "properties")
engine = self.sql(expression, "engine")
engine = f"ENGINE={engine}" if engine else ""
auto_increment = self.sql(expression, "auto_increment")
auto_increment = f"AUTO_INCREMENT={auto_increment}" if auto_increment else ""
character_set = self.sql(expression, "character_set")
collate = self.sql(expression, "collate")
collate = f"COLLATE={collate}" if collate else ""
comment = self.sql(expression, "comment")
comment = f"COMMENT={comment}" if comment else ""
options = " ".join(
option
for option in (
engine,
auto_increment,
character_set,
collate,
comment,
)
if option
)
expression_sql = f"CREATE{replace}{temporary} {kind}{exists_sql} {this}{properties} {expression_sql}{options}"
return self.prepend_ctes(expression, expression_sql)
def prepend_ctes(self, expression, sql):
with_ = self.sql(expression, "with")
if with_:
sql = f"{with_}{self.sep()}{self.indent(sql)}"
return sql
def with_sql(self, expression):
sql = ", ".join(self.sql(e) for e in expression.args["expressions"])
recursive = "RECURSIVE " if expression.args.get("recursive") else ""
return f"WITH {recursive}{sql}"
def cte_sql(self, expression):
alias = self.sql(expression, "alias")
return f"{alias} AS {self.wrap(expression)}"
def tablealias_sql(self, expression):
alias = self.sql(expression, "this")
columns_str = ""
columns = expression.args.get("columns")
if columns:
columns_str = ", ".join(self.sql(e) for e in columns)
columns_str = f"({columns_str})"
return f"{alias}{columns_str}"
def datatype_sql(self, expression):
type_value = expression.this
type_sql = self.type_mapping.get(type_value, type_value.value)
nested = ""
interior = self.expressions(expression, flat=True)
if interior:
nested = f"<{interior}>" if expression.args["nested"] else f"({interior})"
return f"{type_sql}{nested}"
def delete_sql(self, expression):
this = self.sql(expression, "this")
where_sql = self.sql(expression, "where")
sql = f"DELETE FROM {this}{where_sql}"
return self.prepend_ctes(expression, sql)
def drop_sql(self, expression):
this = self.sql(expression, "this")
kind = expression.args["kind"].upper()
exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
return f"DROP {kind}{exists_sql}{this}"
def except_sql(self, expression):
return self.prepend_ctes(
expression,
self.set_operation(
expression,
f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ''}",
),
)
def exists_sql(self, expression):
return f"EXISTS {self.wrap(expression)}"
def filter_sql(self, expression):
this = self.sql(expression, "this")
where = self.sql(expression, "expression")[1:] # where has a leading space
return f"{this} FILTER({where})"
def hint_sql(self, expression):
if self.sql(expression, "this"):
self.unsupported("Hints are not supported")
return ""
def identifier_sql(self, expression):
value = expression.args.get("this") or ""
if expression.args.get("quoted") or self.identify:
return f"{self.identifier}{value}{self.identifier}"
return value
def partition_sql(self, expression):
keys = csv(
*[
f"{k.args['this']}='{v.args['this']}'" if v else k.args["this"]
for k, v in expression.args.get("this")
]
)
return f"PARTITION({keys})"
def properties_sql(self, expression):
return self.properties("WITH", expression)
def property_sql(self, expression):
key = expression.text("this")
value = self.sql(expression, "value")
return f"{key} = {value}"
def insert_sql(self, expression):
kind = "OVERWRITE TABLE" if expression.args.get("overwrite") else "INTO"
this = self.sql(expression, "this")
exists = " IF EXISTS " if expression.args.get("exists") else " "
partition_sql = (
self.sql(expression, "partition")
if expression.args.get("partition")
else ""
)
expression_sql = self.sql(expression, "expression")
sep = self.sep() if partition_sql else ""
sql = f"INSERT {kind} {this}{exists}{partition_sql}{sep}{expression_sql}"
return self.prepend_ctes(expression, sql)
def intersect_sql(self, expression):
return self.prepend_ctes(
expression,
self.set_operation(
expression,
f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ''}",
),
)
def table_sql(self, expression):
return ".".join(
part
for part in [
self.sql(expression, "catalog"),
self.sql(expression, "db"),
self.sql(expression, "this"),
]
if part
)
def tablesample_sql(self, expression):
this = self.sql(expression, "this")
numerator = self.sql(expression, "bucket_numerator")
denominator = self.sql(expression, "bucket_denominator")
field = self.sql(expression, "bucket_field")
field = f" ON {field}" if field else ""
bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else ""
percent = self.sql(expression, "percent")
percent = f"{percent} PERCENT" if percent else ""
rows = self.sql(expression, "rows")
rows = f"{rows} ROWS" if rows else ""
size = self.sql(expression, "size")
return f"{this} TABLESAMPLE({bucket}{percent}{rows}{size})"
def tuple_sql(self, expression):
return f"({self.expressions(expression, flat=True)})"
def update_sql(self, expression):
this = self.sql(expression, "this")
set_sql = self.expressions(expression, flat=True)
from_sql = self.sql(expression, "from")
where_sql = self.sql(expression, "where")
sql = f"UPDATE {this} SET {set_sql}{from_sql}{where_sql}"
return self.prepend_ctes(expression, sql)
def values_sql(self, expression):
return f"VALUES{self.seg('')}{self.expressions(expression)}"
def var_sql(self, expression):
return self.sql(expression, "this")
def from_sql(self, expression):
expressions = ", ".join(self.sql(e) for e in expression.args["expressions"])
return f"{self.seg('FROM')} {expressions}"
def group_sql(self, expression):
return self.op_expressions("GROUP BY", expression)
def having_sql(self, expression):
this = self.indent_newlines(self.sql(expression, "this"))
return f"{self.seg('HAVING')}{self.sep()}{this}"
def join_sql(self, expression):
side = self.sql(expression, "side").upper()
kind = self.sql(expression, "kind").upper()
op_sql = self.seg(" ".join(op for op in [side, kind, "JOIN"] if op))
on_sql = self.sql(expression, "on")
if on_sql:
on_sql = self.indent_newlines(on_sql, skip_first=True)
on_sql = f"{self.seg('ON', pad=self.pad)} {on_sql}"
expression_sql = self.sql(expression, "expression")
this_sql = self.sql(expression, "this")
return f"{expression_sql}{op_sql} {this_sql}{on_sql}"
def lambda_sql(self, expression):
args = self.expressions(expression, flat=True)
args = f"({args})" if len(args) > 1 else args
return self.no_identify(lambda: f"{args} -> {self.sql(expression, 'this')}")
def lateral_sql(self, expression):
this = self.sql(expression, "this")
op_sql = self.seg(
f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}"
)
alias = self.sql(expression, "table")
columns = ", ".join(self.sql(e) for e in expression.args.get("columns") or [])
columns = f" AS {columns}" if columns else ""
return f"{op_sql}{self.sep()}{this} {alias}{columns}"
def limit_sql(self, expression):
return f"{self.seg('LIMIT')} {self.sql(expression, 'this')}"
def offset_sql(self, expression):
return f"{self.seg('OFFSET')} {self.sql(expression, 'this')}"
def literal_sql(self, expression):
text = expression.this or ""
if expression.is_string:
text = text.replace("\\", "\\\\") if self.escape == "\\" else text
text = text.replace(Tokenizer.ESCAPE_CODE, self.escape)
return f"{self.quote}{text}{self.quote}"
return text
def null_sql(self, expression):
# pylint: disable=unused-argument
return "NULL"
def boolean_sql(self, expression):
return "TRUE" if expression.this else "FALSE"
def order_sql(self, expression, flat=False):
return self.op_expressions("ORDER BY", expression, flat=flat)
def ordered_sql(self, expression):
desc = expression.args.get("desc")
desc = " DESC" if desc else ""
return f"{self.sql(expression, 'this')}{desc}"
def select_sql(self, expression):
hint = self.sql(expression, "hint")
distinct = " DISTINCT" if expression.args.get("distinct") else ""
expressions = self.expressions(expression)
select = "SELECT" if expressions else ""
sep = self.sep() if expressions else ""
sql = csv(
f"{select}{hint}{distinct}{sep}{expressions}",
self.sql(expression, "from"),
*[self.sql(sql) for sql in expression.args.get("laterals", [])],
*[self.sql(sql) for sql in expression.args.get("joins", [])],
self.sql(expression, "where"),
self.sql(expression, "group"),
self.sql(expression, "having"),
self.sql(expression, "qualify"),
self.sql(expression, "order"),
self.sql(expression, "limit"),
self.sql(expression, "offset"),
sep="",
)
return self.prepend_ctes(expression, sql)
def schema_sql(self, expression):
this = self.sql(expression, "this")
this = f"{this} " if this else ""
sql = f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}"
return f"{this}{sql}"
def star_sql(self, expression):
# pylint: disable=unused-argument
return "*"
def subquery_sql(self, expression):
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
if self.pretty:
return f"{self.wrap(expression)}{alias}"
return f"({self.sql(expression, 'this')}){alias}"
def qualify_sql(self, expression):
this = self.indent_newlines(self.sql(expression, "this"))
return f"{self.seg('QUALIFY')}{self.sep()}{this}"
def union_sql(self, expression):
return self.prepend_ctes(
expression,
self.set_operation(
expression, f"UNION{'' if expression.args.get('distinct') else ' ALL'}"
),
)
def unnest_sql(self, expression):
args = self.expressions(expression, flat=True)
table = self.sql(expression, "table")
ordinality = " WITH ORDINALITY" if expression.args.get("ordinality") else ""
columns = ", ".join(self.sql(e) for e in expression.args.get("columns", []))
alias = f" AS {table}" if table else ""
alias = f"{alias} ({columns})" if columns else alias
return f"UNNEST({args}){ordinality}{alias}"
def where_sql(self, expression):
this = self.indent_newlines(self.sql(expression, "this"))
return f"{self.seg('WHERE')}{self.sep()}{this}"
def window_sql(self, expression):
this_sql = self.sql(expression, "this")
partition = expression.args.get("partition_by")
partition = (
"PARTITION BY " + ", ".join(self.sql(by) for by in partition)
if partition
else ""
)
order = expression.args.get("order")
order_sql = self.order_sql(order, flat=True) if order else ""
partition_sql = partition + " " if partition and order else partition
spec = expression.args.get("spec")
spec_sql = " " + self.window_spec_sql(spec) if spec else ""
return f"{this_sql} OVER({partition_sql}{order_sql}{spec_sql})"
def window_spec_sql(self, expression):
kind = self.sql(expression, "kind")
start = csv(
self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" "
)
end = csv(
self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" "
)
return f"{kind} BETWEEN {start} AND {end}"
def withingroup_sql(self, expression):
this = self.sql(expression, "this")
expression = self.sql(expression, "expression")[1:] # order has a leading space
return f"{this} WITHIN GROUP ({expression})"
def between_sql(self, expression):
this = self.sql(expression, "this")
low = self.sql(expression, "low")
high = self.sql(expression, "high")
return f"{this} BETWEEN {low} AND {high}"
def bracket_sql(self, expression):
expressions = apply_index_offset(
expression.args["expressions"], self.index_offset
)
expressions = ", ".join(self.sql(e) for e in expressions)
return f"{self.sql(expression, 'this')}[{expressions}]"
def case_sql(self, expression):
pad = self.pad + 2
this = self.sql(expression, "this")
this = f" {this}" if this else ""
ifs = [
f"WHEN {self.sql(e, 'this')} THEN {self.sql(e, 'true')}"
for e in expression.args["ifs"]
]
if expression.args.get("default") is not None:
ifs.append(f"ELSE {self.sql(expression, 'default')}")
original = self.pretty
self.pretty = self.configured_pretty
ifs = "".join(self.seg(e, pad=pad) for e in ifs)
case = f"CASE{this}{ifs}{self.seg('END', pad=self.pad)}"
self.pretty = original
return case
def decimal_sql(self, expression):
args = ", ".join(
arg.args.get("this")
for arg in [expression.args.get("precision"), expression.args.get("scale")]
if arg
)
return f"DECIMAL({args})"
def extract_sql(self, expression):
this = self.sql(expression, "this")
expression_sql = self.sql(expression, "expression")
return f"EXTRACT({this} FROM {expression_sql})"
def if_sql(self, expression):
return self.case_sql(
exp.Case(ifs=[expression], default=expression.args.get("false"))
)
def in_sql(self, expression):
in_sql = self.no_format(
lambda: self.sql(expression, "query")
) or self.expressions(expression, flat=True)
return f"{self.sql(expression, 'this')} IN ({in_sql})"
def interval_sql(self, expression):
return f"INTERVAL {self.sql(expression, 'this')} {self.sql(expression, 'unit')}"
def anonymous_sql(self, expression):
return f"{self.sql(expression, 'this').upper()}({self.expressions(expression, flat=True)})"
def paren_sql(self, expression):
return self.no_format(lambda: f"({self.sql(expression, 'this')})")
def neg_sql(self, expression):
return f"-{self.sql(expression, 'this')}"
def not_sql(self, expression):
return f"NOT {self.sql(expression, 'this')}"
def alias_sql(self, expression):
to_sql = self.sql(expression, "alias")
to_sql = f" AS {to_sql}" if to_sql else ""
return f"{self.sql(expression, 'this')}{to_sql}"
def aliases_sql(self, expression):
return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
def add_sql(self, expression):
return self.binary(expression, "+")
def and_sql(self, expression):
return self.binary(expression, "AND", newline=self.pretty)
def bitwiseand_sql(self, expression):
return self.binary(expression, "&")
def bitwiseleftshift_sql(self, expression):
return self.binary(expression, "<<")
def bitwisenot_sql(self, expression):
return f"~{self.sql(expression, 'this')}"
def bitwiseor_sql(self, expression):
return self.binary(expression, "|")
def bitwiserightshift_sql(self, expression):
return self.binary(expression, ">>")
def bitwisexor_sql(self, expression):
return self.binary(expression, "^")
def cast_sql(self, expression):
return f"CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
def command_sql(self, expression):
return f"{self.sql(expression, 'this').upper()} {expression.text('expression').strip()}"
def count_sql(self, expression):
distinct = "DISTINCT " if expression.args["distinct"] else ""
return f"COUNT({distinct}{self.sql(expression, 'this')})"
def intdiv_sql(self, expression):
return self.sql(
exp.Cast(
this=exp.Div(
this=expression.args["this"],
expression=expression.args["expression"],
),
to=exp.DataType(this=exp.DataType.Type.INT),
)
)
def dpipe_sql(self, expression):
return self.binary(expression, "||")
def div_sql(self, expression):
return self.binary(expression, "/")
def dot_sql(self, expression):
return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}"
def eq_sql(self, expression):
return self.binary(expression, "=")
def escape_sql(self, expression):
return self.binary(expression, "ESCAPE")
def gt_sql(self, expression):
return self.binary(expression, ">")
def gte_sql(self, expression):
return self.binary(expression, ">=")
def ilike_sql(self, expression):
return self.binary(expression, "ILIKE")
def is_sql(self, expression):
return self.binary(expression, "IS")
def like_sql(self, expression):
return self.binary(expression, "LIKE")
def lt_sql(self, expression):
return self.binary(expression, "<")
def lte_sql(self, expression):
return self.binary(expression, "<=")
def mod_sql(self, expression):
return self.binary(expression, "%")
def mul_sql(self, expression):
return self.binary(expression, "*")
def neq_sql(self, expression):
return self.binary(expression, "<>")
def or_sql(self, expression):
return self.binary(expression, "OR", newline=self.pretty)
def sub_sql(self, expression):
return self.binary(expression, "-")
def trycast_sql(self, expression):
return (
f"TRY_CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
)
def binary(self, expression, op, newline=False):
sep = "\n" if newline else " "
return f"{self.sql(expression, 'this')}{sep}{op} {self.sql(expression, 'expression')}"
def function_fallback_sql(self, expression):
args = []
for arg_key in expression.arg_types:
arg_value = ensure_list(expression.args.get(arg_key) or [])
for a in arg_value:
args.append(self.sql(a))
args_str = ", ".join(args)
return f"{expression.sql_name()}({args_str})"
def format_time(self, expression):
return format_time(
self.sql(expression, "format"), self.time_mapping, self.time_trie
)
def expressions(self, expression, flat=False, pad=0):
# pylint: disable=cell-var-from-loop
expressions = expression.args.get("expressions") or []
if flat:
return ", ".join(self.sql(e) for e in expressions)
return self.sep(", ").join(
self.indent(
f"{' ' if self.pretty else ''}{self.no_format(lambda: self.sql(e))}",
pad=pad,
)
for e in expressions
)
def op_expressions(self, op, expression, flat=False):
expressions_sql = self.expressions(expression, flat=flat)
if flat:
return f"{op} {expressions_sql}"
return f"{self.seg(op)}{self.sep()}{expressions_sql}"
def set_operation(self, expression, op):
this = self.sql(expression, "this")
op = self.seg(op)
expression = self.indent(self.sql(expression, "expression"), pad=0)
return f"{this}{op}{self.sep()}{expression}"
| 36.5
| 130
| 0.596427
|
1bedefd414d6b1c6393cb913874a02c949bb42fa
| 1,546
|
py
|
Python
|
tests/test_document.py
|
yschindel/revitron
|
f373a0388c8b45f14f93510c9e8870190dba3b78
|
[
"MIT"
] | 32
|
2020-05-16T15:39:46.000Z
|
2022-02-18T04:01:37.000Z
|
tests/test_document.py
|
yschindel/revitron
|
f373a0388c8b45f14f93510c9e8870190dba3b78
|
[
"MIT"
] | 3
|
2020-10-10T00:24:38.000Z
|
2022-03-14T20:14:10.000Z
|
tests/test_document.py
|
yschindel/revitron
|
f373a0388c8b45f14f93510c9e8870190dba3b78
|
[
"MIT"
] | 6
|
2020-10-08T16:14:51.000Z
|
2022-01-19T14:33:33.000Z
|
import revitron
from revitron import _
import utils
class DocumentTests(utils.RevitronTestCase):
def testIsFamily(self):
self.assertFalse(revitron.Document().isFamily())
def testConfigStorage(self):
config = revitron.DocumentConfigStorage()
config.set('test.1', {'key': 'value'})
config.set('test.2', 'string')
raw = revitron._(revitron.DOC.ProjectInformation).get(config.storageName)
self.assertEquals(raw, '{"test.1": {"key": "value"}, "test.2": "string"}')
self.assertEquals(config.get('test.2'), 'string')
def testGetDuplicateInstances(self):
if revitron.REVIT_VERSION > '2018':
family = self.fixture.createGenericModelFamily()
p1 = revitron.DB.XYZ(0, 0, 0)
p2 = revitron.DB.XYZ(0, 0, 0)
p3 = revitron.DB.XYZ(10, 0, 0)
instance1 = self.fixture.createGenericModelInstance(family, p1)
instance2 = self.fixture.createGenericModelInstance(family, p2)
instance3 = self.fixture.createGenericModelInstance(family, p3)
duplicatesOld = revitron.Document().getDuplicateInstances(True)
duplicatesYoung = revitron.Document().getDuplicateInstances()
toStr = utils.idsToStr
self.assertEquals(str(instance1.Id.IntegerValue), toStr(duplicatesOld))
self.assertEquals(str(instance2.Id.IntegerValue), toStr(duplicatesYoung))
self.assertFalse(
str(instance3.Id.IntegerValue) in toStr(duplicatesOld) +
toStr(duplicatesYoung)
)
else:
revitron.Log().warning(
'Method revitron.Document().getDuplicateInstances() requires Revit 2018 or newer!'
)
utils.run(DocumentTests)
| 35.953488
| 89
| 0.734799
|
2aa79d9b49ef35f774cee5b31ed88893c684c42e
| 3,669
|
py
|
Python
|
setup.py
|
davehowell/sqlfluff
|
9666ffd200cdca1c4ddfc7a07c2edcd57ffe058d
|
[
"MIT"
] | null | null | null |
setup.py
|
davehowell/sqlfluff
|
9666ffd200cdca1c4ddfc7a07c2edcd57ffe058d
|
[
"MIT"
] | null | null | null |
setup.py
|
davehowell/sqlfluff
|
9666ffd200cdca1c4ddfc7a07c2edcd57ffe058d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""The script for setting up sqlfluff."""
from __future__ import absolute_import
from __future__ import print_function
import io
import sys
if sys.version_info[0] < 3:
raise Exception("SQLFluff does not support Python 2. Please upgrade to Python 3.")
import configparser
from os.path import dirname
from os.path import join
from setuptools import find_packages, setup
# Get the global config info as currently stated
# (we use the config file to avoid actually loading any python here)
config = configparser.ConfigParser()
config.read(["src/sqlfluff/config.ini"])
version = config.get("sqlfluff", "version")
def read(*names, **kwargs):
"""Read a file and return the contents as a string."""
return io.open(
join(dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")
).read()
setup(
name="sqlfluff",
version=version,
license="MIT License",
description="Modular SQL Linting for Humans",
long_description=read("README.md") + "\n\n---\n\n" + read("CHANGELOG.md"),
# Make sure pypi is expecting markdown!
long_description_content_type="text/markdown",
author="Alan Cruickshank",
author_email="alan@designingoverload.com",
url="https://github.com/sqlfluff/sqlfluff",
python_requires=">=3.6",
keywords=["sqlfluff", "sql", "linter"],
project_urls={
# Homepage not ready yet.
# 'Homepage': 'https://www.sqlfluff.com'
"Documentation": "https://docs.sqlfluff.com",
"Source": "https://github.com/sqlfluff/sqlfluff",
},
packages=find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 3 - Alpha",
# 'Development Status :: 5 - Production/Stable',
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Utilities",
"Topic :: Software Development :: Quality Assurance",
],
install_requires=[
# Core
"click>=7.1",
"colorama>=0.3",
"configparser",
"oyaml",
"Jinja2",
# Used for diffcover plugin
"diff-cover>=2.5.0",
# Used for performance profiling
"bench-it",
# Used for .sqlfluffignore
"pathspec",
# Used for finding os-specific application config dirs
"appdirs",
# Cached property for performance gains
"cached-property",
# dataclasses backport for python 3.6
"dataclasses",
# better type hints for older python versions
"typing_extensions",
],
extras_require={
"dbt": ["dbt>=0.17"],
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
entry_points={
"console_scripts": [
"sqlfluff = sqlfluff.cli.commands:cli",
],
"diff_cover": ["sqlfluff = sqlfluff.diff_quality_plugin"],
},
)
| 32.184211
| 90
| 0.615427
|
a181cbd3873dbc1778b82448b90253751c434d82
| 5,222
|
py
|
Python
|
jaraco/desktop/wallpaper.py
|
jaraco/jaraco.desktop
|
5109a3943fb0596ac26206589afa81b464dbf190
|
[
"MIT"
] | null | null | null |
jaraco/desktop/wallpaper.py
|
jaraco/jaraco.desktop
|
5109a3943fb0596ac26206589afa81b464dbf190
|
[
"MIT"
] | null | null | null |
jaraco/desktop/wallpaper.py
|
jaraco/jaraco.desktop
|
5109a3943fb0596ac26206589afa81b464dbf190
|
[
"MIT"
] | null | null | null |
"""
`jaraco.desktop.wallpaper`
Based on nat-geo_background-setter.py by Samuel Huckins
This module contains routines to pull the latest National Geographic
"Picture of the Day" and set it as your desktop background. This module
may be executed directly.
The routine won't run if you are low on space, easily configurable below.
Assumes Gnome or Windows.
"""
import os
import sys
import ctypes
import subprocess
import collections
import urllib.request
import html.parser
from bs4 import BeautifulSoup
picture_dir = os.path.expanduser("~/Pictures/NatGeoPics")
# percentage of free space required on picture dir for the photo to be
# downloaded.
free_space_minimum = 25
base_url = "http://photography.nationalgeographic.com/photography/photo-of-the-day"
# ------------------------------------------------------------------------------
def _get_free_bytes_win32(dir):
"""
Return folder/drive free space and total space (in bytes)
"""
free_bytes = ctypes.c_ulonglong()
total_bytes = ctypes.c_ulonglong()
GetDiskFreeSpaceEx = ctypes.windll.kernel32.GetDiskFreeSpaceExW
res = GetDiskFreeSpaceEx(
str(dir), None, ctypes.byref(total_bytes), ctypes.byref(free_bytes)
)
if not res:
raise WindowsError("GetDiskFreeSpace failed")
free_bytes = free_bytes.value
total_bytes = total_bytes.value
return free_bytes, total_bytes
def _get_free_bytes_default(dir):
"""
Return folder/drive free space and total space (in bytes)
"""
stat = os.statvfs(dir)
free_bytes = stat.f_bsize * stat.f_bfree
total_bytes = stat.f_bsize * stat.f_blocks
return free_bytes, total_bytes
_get_free_bytes = globals().get(
'_get_free_bytes_' + sys.platform, _get_free_bytes_default
)
def free_space(dir):
"""
Returns percentage of free space.
"""
try:
free, total = _get_free_bytes(dir)
except OSError:
return False
percen_free = free / total * 100
return int(round(percen_free))
URLDetail = collections.namedtuple('URLDetail', 'url title')
def get_wallpaper_details(base_url):
"""
Finds the URL to download the wallpaper version of the image as well
as the title shown on the page.
Return URLDetail.
>>> detail = get_wallpaper_details(base_url)
>>> assert detail.url.startswith('http')
"""
try:
res = urllib.request.urlopen(base_url)
except (urllib.request.URLError, urllib.request.HTTPError):
# Their server isn't responding, or in time, or the page is unavailable
return False
# Their pages write some script tags through document.write, which was
# causing BeautifulSoup to choke
content = b''.join(line for line in res if b'document.write' not in line)
try:
soup = BeautifulSoup(content, 'html.parser')
except html.parser.HTMLParseError as e:
print(e)
raise SystemExit(4)
# Find wallpaper image URL
url = soup.find("meta", {"name": "twitter:image:src"})['content']
title = soup.find("meta", {"name": "twitter:title"})['content']
return URLDetail(url, title)
def download_wallpaper(url, picture_dir, filename):
"""
Downloads URL passed, saves in specified location, cleans filename.
"""
filename = filename + "." + url.split(".")[-1]
outpath = os.path.join(picture_dir, filename)
try:
f = urllib.request.urlopen(url)
print(f"Downloading {url}")
with open(outpath, "wb") as local_file:
local_file.write(f.read())
except urllib.request.HTTPError as e:
print(f"HTTP Error: {e.code} {url}")
except urllib.request.URLError as e:
print(f"URL Error: {e.reason} {url}")
return outpath
def _set_wallpaper_linux(filename):
"""
Sets the passed file as wallpaper.
"""
cmd = [
'gconftool-2',
'-t',
'str',
'--set',
'/desktop/gnome/background/picture_filename',
filename,
]
subprocess.Popen(cmd)
def _set_wallpaper_win32(filename):
SPI_SETDESKWALLPAPER = 0x14
SPIF_UPDATEINIFILE = 0x1
SPIF_SENDWININICHANGE = 0x2
SystemParametersInfo = ctypes.windll.user32.SystemParametersInfoW
SystemParametersInfo(
SPI_SETDESKWALLPAPER,
0,
str(filename),
SPIF_UPDATEINIFILE | SPIF_SENDWININICHANGE,
)
def _set_wallpaper_darwin(filename):
cmd = [
'defaults',
'write',
'com.apple.desktop',
'Background',
f'{{default = {{ImageFilePath = "{filename}"; }};}}',
]
subprocess.check_call(cmd)
set_wallpaper = globals()['_set_wallpaper_' + sys.platform]
def set_random_wallpaper():
fs = free_space(picture_dir)
if not fs:
print(f"{picture_dir} does not exist, please create.")
raise SystemExit(1)
if fs <= free_space_minimum:
print(f"Not enough free space in {picture_dir}! ({fs}% free)")
raise SystemExit(2)
url, title = get_wallpaper_details(base_url)
if not url:
print("No wallpaper URL found.")
raise SystemExit(3)
filename = download_wallpaper(url, picture_dir, title)
set_wallpaper(filename)
if __name__ == '__main__':
set_random_wallpaper()
| 27.197917
| 83
| 0.662198
|
50461674942a25fcc151a155fabfca3327ed4956
| 10,161
|
py
|
Python
|
ws4py/framing.py
|
soulgalore/wptagent
|
a26b2b1135e34d458f9d332b8a338bc013d51203
|
[
"Apache-2.0"
] | 1,016
|
2015-05-21T09:45:52.000Z
|
2022-03-30T07:42:25.000Z
|
ws4py/framing.py
|
soulgalore/wptagent
|
a26b2b1135e34d458f9d332b8a338bc013d51203
|
[
"Apache-2.0"
] | 1,713
|
2015-05-19T14:32:45.000Z
|
2022-03-29T10:05:48.000Z
|
ws4py/framing.py
|
soulgalore/wptagent
|
a26b2b1135e34d458f9d332b8a338bc013d51203
|
[
"Apache-2.0"
] | 640
|
2015-05-20T02:53:52.000Z
|
2022-03-31T09:55:23.000Z
|
# -*- coding: utf-8 -*-
from struct import pack, unpack
from ws4py.exc import FrameTooLargeException, ProtocolException
from ws4py.compat import py3k, ord, range
# Frame opcodes defined in the spec.
OPCODE_CONTINUATION = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
__all__ = ['Frame']
class Frame(object):
def __init__(self, opcode=None, body=b'', masking_key=None, fin=0, rsv1=0, rsv2=0, rsv3=0):
"""
Implements the framing protocol as defined by RFC 6455.
.. code-block:: python
:linenos:
>>> test_mask = 'XXXXXX' # perhaps from os.urandom(4)
>>> f = Frame(OPCODE_TEXT, 'hello world', masking_key=test_mask, fin=1)
>>> bytes = f.build()
>>> bytes.encode('hex')
'818bbe04e66ad6618a06d1249105cc6882'
>>> f = Frame()
>>> f.parser.send(bytes[0])
1
>>> f.parser.send(bytes[1])
4
.. seealso:: Data Framing http://tools.ietf.org/html/rfc6455#section-5.2
"""
if not isinstance(body, bytes):
raise TypeError("The body must be properly encoded")
self.opcode = opcode
self.body = body
self.masking_key = masking_key
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.payload_length = len(body)
self._parser = None
@property
def parser(self):
if self._parser is None:
self._parser = self._parsing()
# Python generators must be initialized once.
next(self.parser)
return self._parser
def _cleanup(self):
if self._parser:
self._parser.close()
self._parser = None
def build(self):
"""
Builds a frame from the instance's attributes and returns
its bytes representation.
"""
header = b''
if self.fin > 0x1:
raise ValueError('FIN bit parameter must be 0 or 1')
if 0x3 <= self.opcode <= 0x7 or 0xB <= self.opcode:
raise ValueError('Opcode cannot be a reserved opcode')
## +-+-+-+-+-------+
## |F|R|R|R| opcode|
## |I|S|S|S| (4) |
## |N|V|V|V| |
## | |1|2|3| |
## +-+-+-+-+-------+
header = pack('!B', ((self.fin << 7)
| (self.rsv1 << 6)
| (self.rsv2 << 5)
| (self.rsv3 << 4)
| self.opcode))
## +-+-------------+-------------------------------+
## |M| Payload len | Extended payload length |
## |A| (7) | (16/63) |
## |S| | (if payload len==126/127) |
## |K| | |
## +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
## | Extended payload length continued, if payload len == 127 |
## + - - - - - - - - - - - - - - - +-------------------------------+
if self.masking_key: mask_bit = 1 << 7
else: mask_bit = 0
length = self.payload_length
if length < 126:
header += pack('!B', (mask_bit | length))
elif length < (1 << 16):
header += pack('!B', (mask_bit | 126)) + pack('!H', length)
elif length < (1 << 63):
header += pack('!B', (mask_bit | 127)) + pack('!Q', length)
else:
raise FrameTooLargeException()
## + - - - - - - - - - - - - - - - +-------------------------------+
## | |Masking-key, if MASK set to 1 |
## +-------------------------------+-------------------------------+
## | Masking-key (continued) | Payload Data |
## +-------------------------------- - - - - - - - - - - - - - - - +
## : Payload Data continued ... :
## + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
## | Payload Data continued ... |
## +---------------------------------------------------------------+
body = self.body
if not self.masking_key:
return bytes(header + body)
return bytes(header + self.masking_key + self.mask(body))
def _parsing(self):
"""
Generator to parse bytes into a frame. Yields until
enough bytes have been read or an error is met.
"""
buf = b''
some_bytes = b''
# yield until we get the first header's byte
while not some_bytes:
some_bytes = (yield 1)
first_byte = some_bytes[0] if isinstance(some_bytes, bytearray) else ord(some_bytes[0])
# frame-fin = %x0 ; more frames of this message follow
# / %x1 ; final frame of this message
self.fin = (first_byte >> 7) & 1
self.rsv1 = (first_byte >> 6) & 1
self.rsv2 = (first_byte >> 5) & 1
self.rsv3 = (first_byte >> 4) & 1
self.opcode = first_byte & 0xf
# frame-rsv1 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv2 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv3 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
if self.rsv1 or self.rsv2 or self.rsv3:
raise ProtocolException()
# control frames between 3 and 7 as well as above 0xA are currently reserved
if 2 < self.opcode < 8 or self.opcode > 0xA:
raise ProtocolException()
# control frames cannot be fragmented
if self.opcode > 0x7 and self.fin == 0:
raise ProtocolException()
# do we already have enough some_bytes to continue?
some_bytes = some_bytes[1:] if some_bytes and len(some_bytes) > 1 else b''
# Yield until we get the second header's byte
while not some_bytes:
some_bytes = (yield 1)
second_byte = some_bytes[0] if isinstance(some_bytes, bytearray) else ord(some_bytes[0])
mask = (second_byte >> 7) & 1
self.payload_length = second_byte & 0x7f
# All control frames MUST have a payload length of 125 some_bytes or less
if self.opcode > 0x7 and self.payload_length > 125:
raise FrameTooLargeException()
if some_bytes and len(some_bytes) > 1:
buf = some_bytes[1:]
some_bytes = buf
else:
buf = b''
some_bytes = b''
if self.payload_length == 127:
# This will compute the actual application data size
if len(buf) < 8:
nxt_buf_size = 8 - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while len(some_bytes) < 8:
b = (yield 8 - len(some_bytes))
if b is not None:
some_bytes = some_bytes + b
if len(some_bytes) > 8:
buf = some_bytes[8:]
some_bytes = some_bytes[:8]
else:
some_bytes = buf[:8]
buf = buf[8:]
extended_payload_length = some_bytes
self.payload_length = unpack(
'!Q', extended_payload_length)[0]
if self.payload_length > 0x7FFFFFFFFFFFFFFF:
raise FrameTooLargeException()
elif self.payload_length == 126:
if len(buf) < 2:
nxt_buf_size = 2 - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while len(some_bytes) < 2:
b = (yield 2 - len(some_bytes))
if b is not None:
some_bytes = some_bytes + b
if len(some_bytes) > 2:
buf = some_bytes[2:]
some_bytes = some_bytes[:2]
else:
some_bytes = buf[:2]
buf = buf[2:]
extended_payload_length = some_bytes
self.payload_length = unpack(
'!H', extended_payload_length)[0]
if mask:
if len(buf) < 4:
nxt_buf_size = 4 - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while not some_bytes or len(some_bytes) < 4:
b = (yield 4 - len(some_bytes))
if b is not None:
some_bytes = some_bytes + b
if len(some_bytes) > 4:
buf = some_bytes[4:]
else:
some_bytes = buf[:4]
buf = buf[4:]
self.masking_key = some_bytes
if len(buf) < self.payload_length:
nxt_buf_size = self.payload_length - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while len(some_bytes) < self.payload_length:
l = self.payload_length - len(some_bytes)
b = (yield l)
if b is not None:
some_bytes = some_bytes + b
else:
if self.payload_length == len(buf):
some_bytes = buf
else:
some_bytes = buf[:self.payload_length]
self.body = some_bytes
yield
def mask(self, data):
"""
Performs the masking or unmasking operation on data
using the simple masking algorithm:
..
j = i MOD 4
transformed-octet-i = original-octet-i XOR masking-key-octet-j
"""
masked = bytearray(data)
if py3k: key = self.masking_key
else: key = map(ord, self.masking_key)
for i in range(len(data)):
masked[i] = masked[i] ^ key[i%4]
return masked
unmask = mask
| 37.083942
| 96
| 0.469639
|
a9ba93b379501dc320fc8cd0c9d05b8aaad04da7
| 463
|
py
|
Python
|
leap/modules/components/__init__.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | 7
|
2022-01-06T18:37:57.000Z
|
2022-03-20T17:11:30.000Z
|
leap/modules/components/__init__.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | null | null | null |
leap/modules/components/__init__.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | null | null | null |
"""Public API for `leap.modules.components`."""
from leap.modules.components.base import (InverseNotAvailable,
InputOutsideDomain,
Transform,
CompositeTransform,
MultiscaleCompositeTransform,
InverseTransform)
| 51.444444
| 71
| 0.38013
|
14ee132b50f211d3d7d86fec3abbea89e668d824
| 849
|
py
|
Python
|
tests/test_indieweb/conftest.py
|
rmdes/tanzawa
|
d53baa10bd6c217cd18628437a88a43e3bd02b70
|
[
"Apache-2.0"
] | 25
|
2021-06-13T03:38:44.000Z
|
2022-03-15T15:53:31.000Z
|
tests/test_indieweb/conftest.py
|
rmdes/tanzawa
|
d53baa10bd6c217cd18628437a88a43e3bd02b70
|
[
"Apache-2.0"
] | 59
|
2021-06-12T23:35:06.000Z
|
2022-03-24T21:40:24.000Z
|
tests/test_indieweb/conftest.py
|
rmdes/tanzawa
|
d53baa10bd6c217cd18628437a88a43e3bd02b70
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from model_bakery import baker
@pytest.fixture
def auth_token():
return "58a51838067faa000320f5266238d673c5897f1d"
@pytest.fixture
def client_id():
return "https://ownyourswarm.p3k.io"
@pytest.fixture
def m_micropub_scope():
from indieweb.models import MMicropubScope
return MMicropubScope.objects.all()
@pytest.fixture
def t_token(auth_token, client_id, m_micropub_scope):
t_token = baker.make("indieweb.TToken", auth_token=auth_token, client_id=client_id)
t_token.micropub_scope.set([m_micropub_scope[0], m_micropub_scope[1]])
return t_token
@pytest.fixture
def t_token_access(auth_token, client_id, m_micropub_scope):
t_token = baker.make("indieweb.TToken", key=auth_token, client_id=client_id)
t_token.micropub_scope.set([m_micropub_scope[0], m_micropub_scope[1]])
return t_token
| 24.970588
| 87
| 0.775029
|
c7494c73ee07f5fdc34b711239e7455fbd57d777
| 35,682
|
py
|
Python
|
selfdrive/controls/controlsd.py
|
Hikari1023/Openpilot
|
b2105d76c797982975706147b2fe42ce940341bc
|
[
"MIT"
] | null | null | null |
selfdrive/controls/controlsd.py
|
Hikari1023/Openpilot
|
b2105d76c797982975706147b2fe42ce940341bc
|
[
"MIT"
] | null | null | null |
selfdrive/controls/controlsd.py
|
Hikari1023/Openpilot
|
b2105d76c797982975706147b2fe42ce940341bc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import math
from numbers import Number
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from common.conversions import Conversions as CV
from panda import ALTERNATIVE_EXPERIENCE
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.drive_helpers import get_lag_adjusted_curvature
from selfdrive.controls.lib.longcontrol import LongControl
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.latcontrol_torque import LatControlTorque
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager, set_offroad_alert
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI, EON
from selfdrive.manager.process_config import managed_processes
SOFT_DISABLE_TIME = 3 # seconds
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
REPLAY = "REPLAY" in os.environ
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = {"rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned",
"logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad",
"statsd", "shutdownd"} | \
{k for k, v in managed_processes.items() if not v.enabled}
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
ButtonEvent = car.CarState.ButtonEvent
SafetyModel = car.CarParams.SafetyModel
IGNORED_SAFETY_MODES = (SafetyModel.silent, SafetyModel.noOutput)
CSID_MAP = {"1": EventName.roadCameraError, "2": EventName.wideRoadCameraError, "0": EventName.driverCameraError}
ACTUATOR_FIELDS = tuple(car.CarControl.Actuators.schema.fields.keys())
ACTIVE_STATES = (State.enabled, State.softDisabling, State.overriding)
ENABLED_STATES = (State.preEnabled, *ACTIVE_STATES)
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None, CI=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# dp
self.dp_jetson = Params().get_bool('dp_jetson')
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
if self.dp_jetson:
self.camera_packets = ["roadCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
if CI is None:
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'])
else:
self.CI, self.CP = CI, CI.CP
params = Params()
self.joystick_mode = params.get_bool("JoystickDebugMode") or (self.CP.notCar and sm is None)
joystick_packet = ['testJoystick'] if self.joystick_mode else []
self.sm = sm
if self.sm is None:
ignore = []
ignore += ['driverCameraState', 'driverMonitoringState'] if self.dp_jetson else []
ignore += ['driverCameraState', 'managerState'] if SIMULATION else []
self.sm = messaging.SubMaster(['deviceState', 'pandaStates', 'peripheralState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets + joystick_packet,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
# set alternative experiences from parameters
self.disengage_on_accelerator = params.get_bool("DisengageOnAccelerator")
self.CP.alternativeExperience = 0
if not self.disengage_on_accelerator:
self.CP.alternativeExperience |= ALTERNATIVE_EXPERIENCE.DISABLE_DISENGAGE_ON_GAS
# read params
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
controller_available = self.CI.CC is not None and not passive and not self.CP.dashcamOnly
self.read_only = not car_recognized or not controller_available or self.CP.dashcamOnly
if self.read_only:
safety_config = car.CarParams.SafetyConfig.new_message()
safety_config.safetyModel = car.CarParams.SafetyModel.noOutput
self.CP.safetyConfigs = [safety_config]
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.CS_prev = car.CarState.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'torque':
self.LaC = LatControlTorque(self.CP, self.CI)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.cruise_mismatch_counter = 0
self.can_rcv_error_counter = 0
self.last_blinker_frame = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = None
self.button_timers = {ButtonEvent.Type.decelCruise: 0, ButtonEvent.Type.accelCruise: 0}
self.last_actuators = car.CarControl.Actuators.new_message()
self.desired_curvature = 0.0
self.desired_curvature_rate = 0.0
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, len(self.CP.carFw) > 0)
if TICI and not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
if len(self.CP.carFw) > 0:
set_offroad_alert("Offroad_CarUnrecognized", True)
else:
set_offroad_alert("Offroad_NoFirmware", True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
elif self.joystick_mode:
self.events.add(EventName.joystickDebug, static=True)
self.startup_event = None
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
# Add startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
# Disable on rising edge of accelerator or brake. Also disable on brake when speed > 0
if (CS.gasPressed and not self.CS_prev.gasPressed and self.disengage_on_accelerator) or \
(CS.brakePressed and (not self.CS_prev.brakePressed or not CS.standstill)):
self.events.add(EventName.pedalPressed)
if CS.gasPressed:
self.events.add(EventName.pedalPressedPreEnable if self.disengage_on_accelerator else
EventName.gasPressedOverride)
if not self.CP.notCar and not self.dp_jetson:
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle car events. Ignore when CAN is invalid
if CS.canTimeout:
self.events.add(EventName.canBusMissing)
elif not CS.canValid:
self.events.add(EventName.canError)
else:
self.events.add_from_msg(CS.events)
# Create events for battery, temperature, disk space, and memory
if EON and (self.sm['peripheralState'].pandaType != PandaType.uno) and \
self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7 and not SIMULATION:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
# TODO: make tici threshold the same
if self.sm['deviceState'].memoryUsagePercent > 90 and not SIMULATION:
self.events.add(EventName.lowMemory)
# TODO: enable this once loggerd CPU usage is more reasonable
#cpus = list(self.sm['deviceState'].cpuUsagePercent)[:(-1 if EON else None)]
#if max(cpus, default=0) > 95 and not SIMULATION:
# self.events.add(EventName.highCpuUsage)
# Alert if fan isn't spinning for 5 seconds
if self.sm['peripheralState'].pandaType in (PandaType.uno, PandaType.dos):
if self.sm['peripheralState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in (LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing):
self.events.add(EventName.laneChange)
for i, pandaState in enumerate(self.sm['pandaStates']):
# All pandas must match the list of safetyConfigs, and if outside this list, must be silent or noOutput
if i < len(self.CP.safetyConfigs):
safety_mismatch = pandaState.safetyModel != self.CP.safetyConfigs[i].safetyModel or \
pandaState.safetyParam != self.CP.safetyConfigs[i].safetyParam or \
pandaState.alternativeExperience != self.CP.alternativeExperience
else:
safety_mismatch = pandaState.safetyModel not in IGNORED_SAFETY_MODES
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
# Handle HW and system malfunctions
# Order is very intentional here. Be careful when modifying this.
num_events = len(self.events)
not_running = {p.name for p in self.sm['managerState'].processes if not p.running and p.shouldBeRunning}
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
else:
if not SIMULATION and not self.rk.lagging:
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
elif not self.sm.all_freq_ok(self.camera_packets):
self.events.add(EventName.cameraFrameRate)
if self.rk.lagging:
self.events.add(EventName.controlsdLagging)
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
if not self.sm.valid['pandaStates']:
self.events.add(EventName.usbError)
# generic catch-all. ideally, a more specific event should be added above instead
no_system_errors = len(self.events) != num_events
if (not self.sm.all_checks() or self.can_rcv_error) and no_system_errors and CS.canValid and not CS.canTimeout:
if not self.sm.all_alive():
self.events.add(EventName.commIssue)
elif not self.sm.all_freq_ok():
self.events.add(EventName.commIssueAvgFreq)
else: # invalid or can_rcv_error.
self.events.add(EventName.commIssue)
logs = {
'invalid': [s for s, valid in self.sm.valid.items() if not valid],
'not_alive': [s for s, alive in self.sm.alive.items() if not alive],
'not_freq_ok': [s for s, freq_ok in self.sm.freq_ok.items() if not freq_ok],
'can_error': self.can_rcv_error,
}
if logs != self.logged_comm_issue:
cloudlog.event("commIssue", error=True, **logs)
self.logged_comm_issue = logs
else:
self.logged_comm_issue = None
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if not REPLAY:
# Check for mismatch between openpilot and car's PCM
cruise_mismatch = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
self.cruise_mismatch_counter = self.cruise_mismatch_counter + 1 if cruise_mismatch else 0
if self.cruise_mismatch_counter > int(6. / DT_CTRL):
self.events.add(EventName.cruiseMismatch)
# Check for FCW
stock_long_is_braking = self.enabled and not self.CP.openpilotLongitudinalControl and CS.aEgo < -1.25
model_fcw = self.sm['modelV2'].meta.hardBrakePredicted and not CS.brakePressed and not stock_long_is_braking
planner_fcw = self.sm['longitudinalPlan'].fcw and self.enabled
if planner_fcw or model_fcw:
self.events.add(EventName.fcw)
if TICI:
for m in messaging.drain_sock(self.log_sock, wait_for_one=False):
try:
msg = m.androidLog.message
if any(err in msg for err in ("ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED")):
csid = msg.split("CSID:")[-1].split(" ")[0]
evt = CSID_MAP.get(csid, None)
if evt is not None:
self.events.add(evt)
except UnicodeDecodeError:
pass
# TODO: fix simulator
if not SIMULATION:
if not NOSENSOR:
if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000):
# Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
self.events.add(EventName.noGps)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
if self.sm['liveLocationKalman'].excessiveResets:
self.events.add(EventName.localizerMalfunction)
# Only allow engagement with brake pressed when stopped behind another stopped car
speeds = self.sm['longitudinalPlan'].speeds
if len(speeds) > 1:
v_future = speeds[-1]
else:
v_future = 100.0
if CS.brakePressed and v_future >= self.CP.vEgoStarting \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
if not self.initialized:
all_valid = CS.canValid and self.sm.all_checks()
if all_valid or self.sm.frame * DT_CTRL > 3.5 or SIMULATION:
if not self.read_only:
self.CI.init(self.CP, self.can_sock, self.pm.sock['sendcan'])
self.initialized = True
if REPLAY and self.sm['pandaStates'][0].controlsAllowed:
self.state = State.enabled
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_rcv_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
# All pandas not in silent mode must have controlsAllowed when openpilot is enabled
if self.enabled and any(not ps.controlsAllowed for ps in self.sm['pandaStates']
if ps.safetyModel not in IGNORED_SAFETY_MODES):
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not self.CP.pcmCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.vEgo, CS.gasPressed, CS.buttonEvents,
self.button_timers, self.enabled, self.is_metric)
else:
if CS.cruiseState.available:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
else:
self.v_cruise_kph = 0
# decrement the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, SOFT DISABLING, PRE ENABLING, OVERRIDING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = int(SOFT_DISABLE_TIME / DT_CTRL)
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.events.any(ET.OVERRIDE):
self.state = State.overriding
self.current_alert_types.append(ET.OVERRIDE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if self.events.any(ET.NO_ENTRY):
self.state = State.disabled
self.current_alert_types.append(ET.NO_ENTRY)
elif not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# OVERRIDING
elif self.state == State.overriding:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = int(SOFT_DISABLE_TIME / DT_CTRL)
self.current_alert_types.append(ET.SOFT_DISABLE)
elif not self.events.any(ET.OVERRIDE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.OVERRIDE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
elif self.events.any(ET.OVERRIDE):
self.state = State.overriding
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
if not self.CP.pcmCruise:
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if openpilot is engaged and actuators are enabled
self.enabled = self.state in ENABLED_STATES
self.active = self.state in ACTIVE_STATES
if self.active:
self.current_alert_types.append(ET.WARNING)
def state_control(self, CS):
"""Given the state, this function returns a CarControl packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
sr = max(params.steerRatio, 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
CC = car.CarControl.new_message()
CC.enabled = self.enabled
# Check which actuators can be enabled
CC.latActive = self.active and not CS.steerFaultTemporary and not CS.steerFaultPermanent and \
CS.vEgo > self.CP.minSteerSpeed and not CS.standstill
CC.longActive = self.active and not self.events.any(ET.OVERRIDE) and self.CP.openpilotLongitudinalControl
actuators = CC.actuators
actuators.longControlState = self.LoC.long_control_state
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not CC.latActive:
self.LaC.reset()
if not CC.longActive:
self.LoC.reset(v_pid=CS.vEgo)
if not self.joystick_mode:
# accel PID loop
pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_kph * CV.KPH_TO_MS)
t_since_plan = (self.sm.frame - self.sm.rcv_frame['longitudinalPlan']) * DT_CTRL
actuators.accel = self.LoC.update(CC.longActive, CS, long_plan, pid_accel_limits, t_since_plan)
# Steering PID loop and lateral MPC
self.desired_curvature, self.desired_curvature_rate = get_lag_adjusted_curvature(self.CP, CS.vEgo,
lat_plan.psis,
lat_plan.curvatures,
lat_plan.curvatureRates)
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(CC.latActive, CS, self.VM, params,
self.last_actuators, self.desired_curvature,
self.desired_curvature_rate, self.sm['liveLocationKalman'])
else:
lac_log = log.ControlsState.LateralDebugState.new_message()
if self.sm.rcv_frame['testJoystick'] > 0:
if CC.longActive:
actuators.accel = 4.0*clip(self.sm['testJoystick'].axes[0], -1, 1)
if CC.latActive:
steer = clip(self.sm['testJoystick'].axes[1], -1, 1)
# max angle is 45 for angle-based cars
actuators.steer, actuators.steeringAngleDeg = steer, steer * 45.
lac_log.active = self.active
lac_log.steeringAngleDeg = CS.steeringAngleDeg
lac_log.output = actuators.steer
lac_log.saturated = abs(actuators.steer) >= 0.9
# Send a "steering required alert" if saturation count has reached the limit
if lac_log.active and lac_log.saturated and not CS.steeringPressed:
dpath_points = lat_plan.dPathPoints
if len(dpath_points):
# Check if we deviated from the path
# TODO use desired vs actual curvature
left_deviation = actuators.steer > 0 and dpath_points[0] < -0.20
right_deviation = actuators.steer < 0 and dpath_points[0] > 0.20
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
# Ensure no NaNs/Infs
for p in ACTUATOR_FIELDS:
attr = getattr(actuators, p)
if not isinstance(attr, Number):
continue
if not math.isfinite(attr):
cloudlog.error(f"actuators.{p} not finite {actuators.to_dict()}")
setattr(actuators, p, 0.0)
return CC, lac_log
def update_button_timers(self, buttonEvents):
# increment timer for buttons still pressed
for k in self.button_timers:
if self.button_timers[k] > 0:
self.button_timers[k] += 1
for b in buttonEvents:
if b.type.raw in self.button_timers:
self.button_timers[b.type.raw] = 1 if b.pressed else 0
def publish_logs(self, CS, start_time, CC, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
# Orientation and angle rates can be useful for carcontroller
# Only calibrated (car) frame is relevant for the carcontroller
orientation_value = list(self.sm['liveLocationKalman'].calibratedOrientationNED.value)
if len(orientation_value) > 2:
CC.orientationNED = orientation_value
angular_rate_value = list(self.sm['liveLocationKalman'].angularVelocityCalibrated.value)
if len(angular_rate_value) > 2:
CC.angularVelocity = angular_rate_value
CC.cruiseControl.cancel = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
if self.joystick_mode and self.sm.rcv_frame['testJoystick'] > 0 and self.sm['testJoystick'].buttons[0]:
CC.cruiseControl.cancel = True
hudControl = CC.hudControl
hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
hudControl.speedVisible = self.enabled
hudControl.lanesVisible = self.enabled
hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
hudControl.rightLaneVisible = True
hudControl.leftLaneVisible = True
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not CC.latActive and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
model_v2 = self.sm['modelV2']
desire_prediction = model_v2.meta.desirePrediction
if len(desire_prediction) and ldw_allowed:
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
l_lane_change_prob = desire_prediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = desire_prediction[Desire.laneChangeRight - 1]
lane_lines = model_v2.laneLines
l_lane_close = left_lane_visible and (lane_lines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (lane_lines[2].y[0] < (1.08 - CAMERA_OFFSET))
hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if hudControl.rightLaneDepart or hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event_types = set()
if ET.WARNING not in self.current_alert_types:
clear_event_types.add(ET.WARNING)
if self.enabled:
clear_event_types.add(ET.NO_ENTRY)
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric, self.soft_disable_timer])
self.AM.add_many(self.sm.frame, alerts)
current_alert = self.AM.process_alerts(self.sm.frame, clear_event_types)
if current_alert:
hudControl.visualAlert = current_alert.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
self.last_actuators, can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
CC.actuatorsOutput = self.last_actuators
force_decel = False if self.dp_jetson else (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo, params.roll)
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
if current_alert:
controlsState.alertText1 = current_alert.alert_text_1
controlsState.alertText2 = current_alert.alert_text_2
controlsState.alertSize = current_alert.alert_size
controlsState.alertStatus = current_alert.alert_status
controlsState.alertBlinkingRate = current_alert.alert_rate
controlsState.alertType = current_alert.alert_type
controlsState.alertSound = current_alert.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.desiredCurvature = self.desired_curvature
controlsState.desiredCurvatureRate = self.desired_curvature_rate
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_rcv_error_counter
lat_tuning = self.CP.lateralTuning.which()
if self.joystick_mode:
controlsState.lateralControlState.debugState = lac_log
elif self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif lat_tuning == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif lat_tuning == 'torque':
controlsState.lateralControlState.torqueState = lac_log
elif lat_tuning == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif lat_tuning == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
cloudlog.timestamp("Data sampled")
self.prof.checkpoint("Sample")
self.update_events(CS)
cloudlog.timestamp("Events updated")
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
CC, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, CC, lac_log)
self.prof.checkpoint("Sent")
self.update_button_timers(CS.buttonEvents)
self.CS_prev = CS
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
| 42.227219
| 136
| 0.699428
|
94e19a64f8cc8de9b0aca6e898845d943788a829
| 582
|
py
|
Python
|
p049.py
|
arpit0891/Project-euler
|
ab36b33c578578595bb518508fa2fe5862f4a044
|
[
"MIT"
] | 1
|
2020-05-14T09:22:32.000Z
|
2020-05-14T09:22:32.000Z
|
p049.py
|
prve17/Project-Euler
|
1ff72404ca9ebe7de2eab83d43960d86bc487515
|
[
"MIT"
] | 1
|
2020-03-13T12:42:28.000Z
|
2020-05-13T13:26:32.000Z
|
p049.py
|
prve17/Project-Euler
|
1ff72404ca9ebe7de2eab83d43960d86bc487515
|
[
"MIT"
] | 3
|
2020-05-13T13:39:46.000Z
|
2020-06-26T10:44:53.000Z
|
import eulerlib
def compute():
LIMIT = 10000
isprime = eulerlib.list_primality(LIMIT - 1)
for base in range(1000, LIMIT):
if isprime[base]:
for step in range(1, LIMIT):
a = base + step
b = a + step
if a < LIMIT and isprime[a] and has_same_digits(a, base) \
and b < LIMIT and isprime[b] and has_same_digits(b, base) \
and (base != 1487 or a != 4817):
return str(base) + str(a) + str(b)
raise RuntimeError("Not found")
def has_same_digits(x, y):
return sorted(str(x)) == sorted(str(y))
if __name__ == "__main__":
print(compute())
| 21.555556
| 66
| 0.627148
|
94108f9260a7b091dd003439fcb83a0b7f013560
| 24,049
|
py
|
Python
|
jp.atcoder/abc042/abc042_a/15373460.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc042/abc042_a/15373460.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc042/abc042_a/15373460.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import string
import sys
from bisect import bisect_left as bi_l
from bisect import bisect_right as bi_r
from collections import Counter, defaultdict, deque
from heapq import heappop, heappush
from itertools import combinations, product
import numpy as np
inf = float("inf")
MOD = 10**9 + 7
# MOD = 998244353
class NumberTheory:
def __init__(self, n=2 * 10**6, numpy=True):
self.n = n
self.np_flg = numpy
self.is_prime_number, self.prime_numbers = self.sieve_of_eratosthenes(
n
)
def sieve_of_eratosthenes(self, n):
if self.np_flg:
sieve = np.ones(n + 1, dtype=np.int64)
sieve[:2] = 0
for i in range(2, int(n**0.5) + 1):
if sieve[i]:
sieve[i * 2 :: i] = 0
prime_numbers = np.flatnonzero(sieve)
else:
sieve = [1] * (n + 1)
sieve[0] = sieve[1] = 0
for i in range(2, int(n**0.5) + 1):
if not sieve[i]:
continue
for j in range(i * 2, n + 1, i):
sieve[j] = 0
prime_numbers = [i for i in range(2, n + 1) if sieve[i]]
return sieve, prime_numbers
def prime_factorize(self, n):
res = dict()
if n < 2:
return res
border = int(n**0.5)
for p in self.prime_numbers:
if p > border:
break
while n % p == 0:
res[p] = res.get(p, 0) + 1
n //= p
if n == 1:
return res
res[n] = 1
return res
def prime_factorize_factorial(self, n):
res = dict()
for i in range(2, n + 1):
for p, c in self.prime_factorize(i).items():
res[p] = res.get(p, 0) + c
return res
@staticmethod
def gcd(a, b):
return gcd(b, a % b) if b else abs(a)
@staticmethod
def lcm(a, b):
return abs(a // gcd(a, b) * b)
@staticmethod
def find_divisors(n):
divisors = []
for i in range(1, int(n**0.5) + 1):
if n % i:
continue
divisors.append(i)
j = n // i
if j != i:
divisors.append(j)
return divisors
@staticmethod
def base_convert(n, b):
if not n:
return [0]
res = []
while n:
n, r = divmod(n, b)
if r < 0:
n += 1
r -= b
res.append(r)
return res
class UnionFind:
def __init__(self, n=10**6):
self.root = list(range(n))
self.height = [0] * n
self.size = [1] * n
def find_root(self, u):
if self.root[u] == u:
return u
self.root[u] = self.find_root(self.root[u])
return self.root[u]
def unite(self, u, v):
ru = self.find_root(u)
rv = self.find_root(v)
if ru == rv:
return
hu = self.height[ru]
hv = self.height[rv]
if hu >= hv:
self.root[rv] = ru
self.size[ru] += self.size[rv]
self.height[ru] = max(hu, hv + 1)
else:
self.root[ru] = rv
self.size[rv] += self.size[ru]
class Combinatorics:
def __init__(self, N=10**9, n=10**6, mod=10**9 + 7, numpy=True):
self.mod = mod
self.nCr = dict()
self.np_flg = numpy
self.make_mod_tables(N, n)
sys.setrecursionlimit(10**6)
def choose(self, n, r, mod=None): # no mod, or mod ≠ prime
if r > n or r < 0:
return 0
if r == 0:
return 1
if (n, r) in self.nCr:
return self.nCr[(n, r)]
if not mod:
self.nCr[(n, r)] = self.choose(n - 1, r) + self.choose(
n - 1, r - 1
)
else:
self.nCr[(n, r)] = (
self.choose(n - 1, r, mod) + self.choose(n - 1, r - 1, mod)
) % mod
return self.nCr[(n, r)]
def cumprod(self, a):
p = self.mod
l = len(a)
sql = int(np.sqrt(l) + 1)
a = np.resize(a, sql**2).reshape(sql, sql)
for i in range(sql - 1):
a[:, i + 1] *= a[:, i]
a[:, i + 1] %= p
for i in range(sql - 1):
a[i + 1] *= a[i, -1]
a[i + 1] %= p
return np.ravel(a)[:l]
def make_mod_tables(self, N, n):
p = self.mod
if self.np_flg:
fac = np.arange(n + 1)
fac[0] = 1
fac = self.cumprod(fac)
ifac = np.arange(n + 1, 0, -1)
ifac[0] = pow(int(fac[-1]), p - 2, p)
ifac = self.cumprod(ifac)[n::-1]
n_choose = np.arange(N + 1, N - n, -1)
n_choose[0] = 1
n_choose[1:] = self.cumprod(n_choose[1:]) * ifac[1 : n + 1] % p
else:
fac = [None] * (n + 1)
fac[0] = 1
for i in range(n):
fac[i + 1] = fac[i] * (i + 1) % p
ifac = [None] * (n + 1)
ifac[n] = pow(fac[n], p - 2, p)
for i in range(n, 0, -1):
ifac[i - 1] = ifac[i] * i % p
n_choose = [None] * (n + 1)
n_choose[0] = 1
for i in range(n):
n_choose[i + 1] = n_choose[i] * (N - i) % p
for i in range(n + 1):
n_choose[i] = n_choose[i] * ifac[i] % p
self.fac, self.ifac, self.mod_n_choose = fac, ifac, n_choose
def z_algorithm(s):
n = len(s)
a = [0] * n
a[0] = n
l = r = -1
for i in range(1, n):
if r >= i:
a[i] = min(a[i - l], r - i)
while i + a[i] < n and s[i + a[i]] == s[a[i]]:
a[i] += 1
if i + a[i] >= r:
l, r = i, i + a[i]
return a
class ABC001:
def A():
h1, h2 = map(int, sys.stdin.read().split())
print(h1 - h2)
def B():
pass
def C():
pass
def D():
pass
class ABC002:
def A():
x, y = map(int, sys.stdin.readline().split())
print(max(x, y))
def B():
vowels = set("aeiou")
s = sys.stdin.readline().rstrip()
t = ""
for c in s:
if c in vowels:
continue
t += c
print(t)
def C():
(*coords,) = map(int, sys.stdin.readline().split())
def triangle_area(x0, y0, x1, y1, x2, y2):
x1 -= x0
x2 -= x0
y1 -= y0
y2 -= y0
return abs(x1 * y2 - x2 * y1) / 2
print(triangle_area(*coords))
def D():
n, m = map(int, sys.stdin.readline().split())
edges = set()
for _ in range(m):
x, y = map(int, sys.stdin.readline().split())
x -= 1
y -= 1
edges.add((x, y))
cand = []
for i in range(1, 1 << n):
s = [j for j in range(n) if i >> j & 1]
for x, y in combinations(s, 2):
if (x, y) not in edges:
break
else:
cand.append(len(s))
print(max(cand))
class ABC003:
def A():
n = int(sys.stdin.readline().rstrip())
print((n + 1) * 5000)
def B():
atcoder = set("atcoder")
s, t = sys.stdin.read().split()
for i in range(len(s)):
if s[i] == t[i]:
continue
if s[i] == "@" and t[i] in atcoder:
continue
if t[i] == "@" and s[i] in atcoder:
continue
print("You will lose")
return
print("You can win")
def C():
n, k, *r = map(int, sys.stdin.read().split())
res = 0
for x in sorted(r)[-k:]:
res = (res + x) / 2
print(res)
def D():
pass
class ABC004:
def A():
print(int(sys.stdin.readline().rstrip()) * 2)
def B():
c = [sys.stdin.readline().rstrip() for _ in range(4)]
for l in c[::-1]:
print(l[::-1])
def C():
n = int(sys.stdin.readline().rstrip())
n %= 30
res = list(range(1, 7))
for i in range(n):
i %= 5
res[i], res[i + 1] = res[i + 1], res[i]
print("".join(map(str, res)))
def D():
pass
class ABC005:
def A():
x, y = map(int, sys.stdin.readline().split())
print(y // x)
def B():
n, *t = map(int, sys.stdin.read().split())
print(min(t))
def C():
t = int(sys.stdin.readline().rstrip())
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().split()]
m = int(sys.stdin.readline().rstrip())
b = [int(x) for x in sys.stdin.readline().split()]
i = 0
for p in b:
if i == n:
print("no")
return
while p - a[i] > t:
i += 1
if i == n:
print("no")
return
if a[i] > p:
print("no")
return
i += 1
print("yes")
def D():
n = int(sys.stdin.readline().rstrip())
d = np.array(
[sys.stdin.readline().split() for _ in range(n)], np.int64
)
s = d.cumsum(axis=0).cumsum(axis=1)
s = np.pad(s, 1)
max_del = np.zeros((n + 1, n + 1), dtype=np.int64)
for y in range(1, n + 1):
for x in range(1, n + 1):
max_del[y, x] = np.amax(
s[y : n + 1, x : n + 1]
- s[0 : n - y + 1, x : n + 1]
- s[y : n + 1, 0 : n - x + 1]
+ s[0 : n - y + 1, 0 : n - x + 1]
)
res = np.arange(n**2 + 1)[:, None]
i = np.arange(1, n + 1)
res = max_del[i, np.minimum(res // i, n)].max(axis=1)
q = int(sys.stdin.readline().rstrip())
p = np.array(sys.stdin.read().split(), dtype=np.int64)
print(*res[p], sep="\n")
class ABC006:
def A():
n = sys.stdin.readline().rstrip()
if "3" in n:
print("YES")
elif int(n) % 3 == 0:
print("YES")
else:
print("NO")
def B():
mod = 10007
t = [0, 0, 1]
for _ in range(1001001):
t.append(t[-1] + t[-2] + t[-3])
t[-1] %= mod
n = int(sys.stdin.readline().rstrip())
print(t[n - 1])
def C():
n, m = map(int, sys.stdin.readline().split())
cnt = [0, 0, 0]
if m == 1:
cnt = [-1, -1, -1]
else:
if m & 1:
m -= 3
cnt[1] += 1
n -= 1
cnt[2] = m // 2 - n
cnt[0] = n - cnt[2]
if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:
print(-1, -1, -1)
else:
print(*cnt, sep=" ")
def D():
n, *c = map(int, sys.stdin.read().split())
lis = [inf] * n
for x in c:
lis[bi_l(lis, x)] = x
print(n - bi_l(lis, inf))
class ABC007:
def A():
n = int(sys.stdin.readline().rstrip())
print(n - 1)
def B():
s = sys.stdin.readline().rstrip()
if s == "a":
print(-1)
else:
print("a")
def C():
r, c = map(int, sys.stdin.readline().split())
sy, sx = map(int, sys.stdin.readline().split())
gy, gx = map(int, sys.stdin.readline().split())
sy -= 1
sx -= 1
gy -= 1
gx -= 1
maze = [sys.stdin.readline().rstrip() for _ in range(r)]
queue = deque([(sy, sx)])
dist = np.full((r, c), np.inf)
dist[sy, sx] = 0
while queue:
y, x = queue.popleft()
for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
i += y
j += x
if maze[i][j] == "#" or dist[i, j] != np.inf:
continue
dist[i, j] = dist[y, x] + 1
queue.append((i, j))
print(int(dist[gy, gx]))
def D():
pass
class ABC008:
def A():
s, t = map(int, sys.stdin.readline().split())
print(t - s + 1)
def B():
n, *s = sys.stdin.read().split()
res = defaultdict(int)
for name in s:
res[name] += 1
print(sorted(res.items(), key=lambda x: x[1])[-1][0])
def C():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
c = n - np.count_nonzero(a[:, None] % a, axis=1)
print(np.sum((c + 1) // 2 / c))
def D():
pass
class ABC009:
def A():
n = int(sys.stdin.readline().rstrip())
print((n + 1) // 2)
def B():
n, *a = map(int, sys.stdin.read().split())
print(sorted(set(a))[-2])
def C():
n, k = map(int, sys.stdin.readline().split())
s = list(sys.stdin.readline().rstrip())
cost = [1] * n
r = k
for i in range(n - 1):
q = []
for j in range(i + 1, n):
if s[j] < s[i] and cost[i] + cost[j] <= r:
heappush(q, (s[j], cost[i] + cost[j], -j))
if not q:
continue
_, c, j = heappop(q)
j = -j
s[i], s[j] = s[j], s[i]
r -= c
cost[i] = cost[j] = 0
print("".join(s))
def D():
pass
class ABC010:
def A():
print(sys.stdin.readline().rstrip() + "pp")
def B():
n, *a = map(int, sys.stdin.read().split())
tot = 0
for x in a:
c = 0
while x % 2 == 0 or x % 3 == 2:
x -= 1
c += 1
tot += c
print(tot)
def C():
sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())
x, y = np.array(xy).reshape(-1, 2).T
def dist(x1, y1, x2, y2):
return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
ans = (
"YES"
if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()
else "NO"
)
print(ans)
def D():
pass
class ABC011:
def A():
n = int(sys.stdin.readline().rstrip())
print(n % 12 + 1)
def B():
s = sys.stdin.readline().rstrip()
print(s[0].upper() + s[1:].lower())
def C():
n, *ng = map(int, sys.stdin.read().split())
ng = set(ng)
if n in ng:
print("NO")
else:
r = 100
while n > 0:
if r == 0:
print("NO")
return
for i in range(3, 0, -1):
if (n - i) in ng:
continue
n -= i
r -= 1
break
else:
print("NO")
return
print("YES")
def D():
pass
class ABC041:
def A():
s, i = sys.stdin.read().split()
i = int(i)
print(s[i - 1])
def B():
MOD = 10**9 + 7
a, b, c = map(int, sys.stdin.readline().split())
ans = a * b % MOD * c % MOD
print(ans)
def C():
n, *a = map(int, sys.stdin.read().split())
for i, h in sorted(enumerate(a), key=lambda x: -x[1]):
print(i + 1)
def D():
n, m, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*[iter(xy)] * 2)
edges = [0] * n
for x, y in xy:
x -= 1
y -= 1
edges[x] |= 1 << y
comb = [None] * (1 << n)
comb[0] = 1
def count(edges, bit):
if comb[bit] is not None:
return comb[bit]
comb[bit] = 0
for i in range(n):
if (bit >> i) & 1 and not edges[i]:
nxt_bit = bit & ~(1 << i)
nxt_edges = edges.copy()
for j in range(n):
nxt_edges[j] &= ~(1 << i)
cnt = count(nxt_edges, nxt_bit)
comb[bit] += cnt
return comb[bit]
print(count(edges, (1 << n) - 1))
class ABC042:
def A():
a = [int(x) for x in sys.stdin.readline().split()]
c = Counter(a)
print("YES" if c[5] == 2 and c[7] == 1 else "NO")
def B():
pass
def C():
pass
def D():
pass
class ABC170:
def A():
x = [int(x) for x in sys.stdin.readline().split()]
for i in range(5):
if x[i] != i + 1:
print(i + 1)
break
def B():
x, y = map(int, sys.stdin.readline().split())
print("Yes" if 2 * x <= y <= 4 * x and y % 2 == 0 else "No")
def C():
x, n, *p = map(int, sys.stdin.read().split())
a = list(set(range(102)) - set(p))
a = [(abs(y - x), y) for y in a]
print(sorted(a)[0][1])
def D():
n, *a = map(int, sys.stdin.read().split())
cand = set(a)
cnt = 0
for x, c in sorted(Counter(a).items()):
cnt += c == 1 and x in cand
cand -= set(range(x * 2, 10**6 + 1, x))
print(cnt)
def E():
n, q = map(int, sys.stdin.readline().split())
queue = []
num_kindergarten = 2 * 10**5
queue_kindergarten = [[] for _ in range(num_kindergarten)]
highest_kindergarten = [None] * num_kindergarten
where = [None] * n
rate = [None] * n
def entry(i, k):
where[i] = k
while queue_kindergarten[k]:
r, j = heappop(queue_kindergarten[k])
if where[j] != k or j == i:
continue
if rate[i] >= -r:
highest_kindergarten[k] = rate[i]
heappush(queue, (rate[i], k, i))
heappush(queue_kindergarten[k], (r, j))
break
else:
highest_kindergarten[k] = rate[i]
heappush(queue, (rate[i], k, i))
heappush(queue_kindergarten[k], (-rate[i], i))
def transfer(i, k):
now = where[i]
while queue_kindergarten[now]:
r, j = heappop(queue_kindergarten[now])
if where[j] != now or j == i:
continue
if highest_kindergarten[now] != -r:
highest_kindergarten[now] = -r
heappush(queue, (-r, now, j))
heappush(queue_kindergarten[now], (r, j))
break
else:
highest_kindergarten[now] = None
entry(i, k)
def inquire():
while True:
r, k, i = heappop(queue)
if where[i] != k or r != highest_kindergarten[k]:
continue
heappush(queue, (r, k, i))
return r
for i in range(n):
a, b = map(int, sys.stdin.readline().split())
rate[i] = a
entry(i, b - 1)
for _ in range(q):
c, d = map(int, sys.stdin.readline().split())
transfer(c - 1, d - 1)
print(inquire())
def F():
pass
class ABC171:
def A():
c = sys.stdin.readline().rstrip()
print("A" if c < "a" else "a")
def B():
n, k, *p = map(int, sys.stdin.read().split())
print(sum(sorted(p)[:k]))
def C():
n = int(sys.stdin.readline().rstrip())
n -= 1
l = 1
while True:
if n < pow(26, l):
break
n -= pow(26, l)
l += 1
res = "".join(
[chr(ord("a") + d % 26) for d in NumberTheory.base_convert(n, 26)][
::-1
]
)
res = "a" * (l - len(res)) + res
print(res)
def D():
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().split()]
s = sum(a)
cnt = Counter(a)
q = int(sys.stdin.readline().rstrip())
for _ in range(q):
b, c = map(int, sys.stdin.readline().split())
s += (c - b) * cnt[b]
print(s)
cnt[c] += cnt[b]
cnt[b] = 0
def E():
n, *a = map(int, sys.stdin.read().split())
s = 0
for x in a:
s ^= x
b = map(lambda x: x ^ s, a)
print(*b, sep=" ")
def F():
pass
class ABC172:
def A():
pass
def B():
pass
def C():
pass
def D():
pass
def E():
pass
def F():
pass
class ABC173:
def A():
n = int(sys.stdin.readline().rstrip())
charge = (n + 999) // 1000 * 1000 - n
print(charge)
def B():
n, *s = sys.stdin.read().split()
c = Counter(s)
for v in "AC, WA, TLE, RE".split(", "):
print(f"{v} x {c[v]}")
def C():
h, w, k = map(int, sys.stdin.readline().split())
c = [sys.stdin.readline().rstrip() for _ in range(h)]
tot = 0
for i in range(1 << h):
for j in range(1 << w):
cnt = 0
for y in range(h):
for x in range(w):
if i >> y & 1 or j >> x & 1:
continue
cnt += c[y][x] == "#"
tot += cnt == k
print(tot)
def D():
n, *a = map(int, sys.stdin.read().split())
a.sort(reverse=True)
res = (
a[0]
+ sum(a[1 : 1 + (n - 2) // 2]) * 2
+ a[1 + (n - 2) // 2] * (n & 1)
)
print(res)
def E():
MOD = 10**9 + 7
n, k, *a = map(int, sys.stdin.read().split())
minus = [x for x in a if x < 0]
plus = [x for x in a if x > 0]
if len(plus) + len(minus) // 2 * 2 >= k: # plus
(*minus,) = map(abs, minus)
minus.sort(reverse=True)
plus.sort(reverse=True)
cand = []
if len(minus) & 1:
minus = minus[:-1]
for i in range(0, len(minus) - 1, 2):
cand.append(minus[i] * minus[i + 1] % MOD)
if k & 1:
res = plus[0]
plus = plus[1:]
else:
res = 1
if len(plus) & 1:
plus = plus[:-1]
for i in range(0, len(plus) - 1, 2):
cand.append(plus[i] * plus[i + 1] % MOD)
cand.sort(reverse=True)
for x in cand[: k // 2]:
res *= x
res %= MOD
print(res)
elif 0 in a:
print(0)
else:
cand = sorted(map(abs, a))
res = 1
for i in range(k):
res *= cand[i]
res %= MOD
res = MOD - res
print(res)
pass
def F():
pass
if __name__ == "__main__":
ABC042.A()
| 27.082207
| 80
| 0.383592
|
61af03b487a04a8d1f1d0c1e56fe5752bc0a8d14
| 3,393
|
py
|
Python
|
mnist_keras.py
|
bharatsunny/installations
|
081c83c9c96678804935c52405afd197d8d0a9ea
|
[
"Apache-2.0"
] | 35
|
2017-03-15T08:17:40.000Z
|
2021-11-17T12:46:19.000Z
|
mnist_keras.py
|
bharatsunny/installations
|
081c83c9c96678804935c52405afd197d8d0a9ea
|
[
"Apache-2.0"
] | 4
|
2017-10-16T10:54:37.000Z
|
2018-12-06T08:10:18.000Z
|
mnist_keras.py
|
bharatsunny/installations
|
081c83c9c96678804935c52405afd197d8d0a9ea
|
[
"Apache-2.0"
] | 11
|
2017-06-26T12:36:40.000Z
|
2021-03-18T11:35:57.000Z
|
import tensorflow as tf
from keras.initializers import Constant
from keras.initializers import TruncatedNormal
from keras.layers import Reshape
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras import backend as K
from keras.objectives import categorical_crossentropy
from tensorflow.examples.tutorials.mnist import input_data
sess = tf.Session()
K.set_session(sess)
# this placeholder will contain our input digits, as flat vectors
img = tf.placeholder(tf.float32, shape=(None, 784), name="input_tensor")
labels = tf.placeholder(tf.float32, shape=(None, 10))
# Keras layers can be called on TensorFlow tensors:
x = Reshape((-1, 28, 28))(img)
x = Conv2D(32, kernel_size=(5, 5), strides=(1, 1), padding='same',
activation='relu', kernel_initializer=TruncatedNormal(stddev=0.1),
use_bias=True, bias_initializer=Constant(0.1), name="conv2d_1")(x)
x = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same', name="max_pool_1")(x)
x = Conv2D(64, kernel_size=(5, 5), strides=(1, 1), padding='same',
activation='relu', kernel_initializer=TruncatedNormal(stddev=0.1),
use_bias=True, bias_initializer=Constant(0.1), name="conv2d_2")(x)
x = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same', name="max_pool_2")(x)
x = Flatten(name='flatten')(x)
x = Dense(1024, activation='relu', kernel_initializer=TruncatedNormal(stddev=0.1),
use_bias=True, bias_initializer=Constant(0.1), name='fc1')(x)
x = Dropout(0.5, name='dropout')(x)
preds = Dense(10, kernel_initializer=TruncatedNormal(stddev=0.1),
use_bias=True, bias_initializer=Constant(0.1), name='fc2')(x) # output layer with 10 units and a softmax activation
preds = tf.identity(preds, name="output_tensor")
# Training function
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=preds))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Accuracy so we can verify method
correct_prediction = tf.equal(tf.argmax(preds,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Prepare saver.
builder = tf.saved_model.builder.SavedModelBuilder("./model_keras")
# Initialize all variables
sess.run(tf.global_variables_initializer())
# Load traning data.
mnist_data = input_data.read_data_sets('MNIST_data', one_hot=True)
# Run training loop
with sess.as_default():
for i in range(20000):
batch = mnist_data.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
img:batch[0], labels: batch[1], K.learning_phase(): 0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={img: batch[0],
labels: batch[1],
K.learning_phase(): 1})
# Save model so we can use it in java.
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
builder.save(True)
writer = tf.summary.FileWriter('./keras_board/1')
writer.add_graph(sess.graph)
# Print final accuracy.
with sess.as_default():
print("test accuracy %g" % accuracy.eval(feed_dict={
img: mnist_data.test.images,
labels: mnist_data.test.labels,
K.learning_phase(): 0}))
| 40.879518
| 120
| 0.724138
|
0cde91c56d35cccb423b17f8229a0008fdd9aa45
| 3,932
|
py
|
Python
|
test/LINK/VersionedLib-j2.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/LINK/VersionedLib-j2.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/LINK/VersionedLib-j2.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Ensure that SharedLibrary builder works with SHLIBVERSION and -j2.
This is regression test for:
http://article.gmane.org/gmane.comp.programming.tools.scons.user/27049
"""
import TestSCons
import os
import sys
import SCons.Platform
import SCons.Defaults
test = TestSCons.TestSCons()
test.write('foo.c', """
#if _WIN32
__declspec(dllexport)
#endif
int foo() { return 0; }
""")
test.write('main.c', """
#if _WIN32
__declspec(dllimport)
#endif
int foo();
int main(void) { return foo(); }
""")
test.write('SConstruct', """
env = Environment()
env.AppendUnique(LIBPATH = ['.'])
env.Program('main.c', LIBS = ['foo'])
env.SharedLibrary('foo', 'foo.c', SHLIBVERSION = '0.1.2')
""")
test.run(arguments = ['-j 2', '--tree=all'])
env = SCons.Defaults.DefaultEnvironment()
platform = SCons.Platform.platform_default()
tool_list = SCons.Platform.DefaultToolList(platform, env)
if platform == 'cygwin':
# PATH is used to search for *.dll librarier (cygfoo-0-2-1.dll in our case)
path = os.environ.get('PATH','')
if path: path = path + os.pathsep
path = path + test.workpath('.')
os.environ['PATH'] = path
if os.name == 'posix':
os.environ['LD_LIBRARY_PATH'] = test.workpath('.')
if sys.platform.find('irix') != -1:
os.environ['LD_LIBRARYN32_PATH'] = test.workpath('.')
test.run(program = test.workpath('main'))
test.run(arguments = ['-c'])
platform = SCons.Platform.platform_default()
if 'gnulink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'libfoo.so',
'libfoo.so.0',
'libfoo.so.0.1.2',
'foo.os',
]
elif 'applelink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'libfoo.dylib',
'libfoo.0.1.2.dylib',
'foo.os',
]
elif 'cyglink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'cygfoo-0-1-2.dll',
'libfoo-0-1-2.dll.a',
'libfoo.dll.a',
'foo.os',
]
elif 'mslink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'foo.dll',
'foo.lib',
'foo.obj',
]
elif 'sunlink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'libfoo.so',
'libfoo.so.0',
'libfoo.so.0.1.2',
'so_foo.os',
]
else:
# All (?) the files we expect will get created in the current directory
files= [
'libfoo.so',
'foo.os']
for f in files:
test.must_not_exist([ f])
test.must_exist(['main.c'])
test.must_exist(['foo.c'])
test.must_exist(['SConstruct'])
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 26.748299
| 79
| 0.683367
|
d26605014e4189da8272f1086919ded2f6db9884
| 2,085
|
py
|
Python
|
stockroom_bot/tuck_arm.py
|
amjadmajid/rosbook
|
20d4ab94d910adc62c4aecb471ceac13b5cef5ad
|
[
"Apache-2.0"
] | 442
|
2015-12-11T02:59:16.000Z
|
2022-03-31T22:10:25.000Z
|
stockroom_bot/tuck_arm.py
|
amjadmajid/rosbook
|
20d4ab94d910adc62c4aecb471ceac13b5cef5ad
|
[
"Apache-2.0"
] | 41
|
2016-01-07T19:15:29.000Z
|
2021-12-03T01:52:58.000Z
|
stockroom_bot/tuck_arm.py
|
amjadmajid/rosbook
|
20d4ab94d910adc62c4aecb471ceac13b5cef5ad
|
[
"Apache-2.0"
] | 249
|
2015-11-27T10:22:33.000Z
|
2022-03-28T09:52:05.000Z
|
#!/usr/bin/env python
# modified from Fetch's prepare_simulated_robot.py
import rospy, actionlib
from control_msgs.msg import (FollowJointTrajectoryAction,
FollowJointTrajectoryGoal)
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
arm_joint_names = \
["shoulder_pan_joint", "shoulder_lift_joint", "upperarm_roll_joint",
"elbow_flex_joint",
"forearm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
arm_intermediate_positions = [1.32, 0, -1.4, 1.72, 0.0, 1.66, 0.0]
arm_joint_positions = [1.32, 1.40, -0.2, 1.72, 0.0, 1.66, 0.0]
if __name__ == "__main__":
rospy.init_node("tuck_arm")
arm_client = actionlib.SimpleActionClient("arm_controller/follow_joint_trajectory", FollowJointTrajectoryAction)
arm_client.wait_for_server()
trajectory = JointTrajectory()
trajectory.joint_names = arm_joint_names
trajectory.points.append(JointTrajectoryPoint())
trajectory.points[0].positions = [0.0] * len(arm_joint_positions)
trajectory.points[0].velocities = [0.0] * len(arm_joint_positions)
trajectory.points[0].accelerations = [0.0] * len(arm_joint_positions)
trajectory.points[0].time_from_start = rospy.Duration(1.0)
trajectory.points.append(JointTrajectoryPoint())
trajectory.points[1].positions = arm_intermediate_positions
trajectory.points[1].velocities = [0.0] * len(arm_joint_positions)
trajectory.points[1].accelerations = [0.0] * len(arm_joint_positions)
trajectory.points[1].time_from_start = rospy.Duration(4.0)
trajectory.points.append(JointTrajectoryPoint())
trajectory.points[2].positions = arm_joint_positions
trajectory.points[2].velocities = [0.0] * len(arm_joint_positions)
trajectory.points[2].accelerations = [0.0] * len(arm_joint_positions)
trajectory.points[2].time_from_start = rospy.Duration(7.5)
arm_goal = FollowJointTrajectoryGoal()
arm_goal.trajectory = trajectory
arm_goal.goal_time_tolerance = rospy.Duration(0.0)
arm_client.send_goal(arm_goal)
arm_client.wait_for_result(rospy.Duration(6.0))
| 46.333333
| 116
| 0.748201
|
b07448d9e1fe51fa156456fd86b06718f7232a76
| 8,362
|
py
|
Python
|
samples/client/petstore/python_disallowAdditionalPropertiesIfNotPresent/petstore_api/model/parent.py
|
Celebrate-future/openapi-generator
|
100c078b90f0ce1408139c04bb83df2c2fb51875
|
[
"Apache-2.0"
] | null | null | null |
samples/client/petstore/python_disallowAdditionalPropertiesIfNotPresent/petstore_api/model/parent.py
|
Celebrate-future/openapi-generator
|
100c078b90f0ce1408139c04bb83df2c2fb51875
|
[
"Apache-2.0"
] | null | null | null |
samples/client/petstore/python_disallowAdditionalPropertiesIfNotPresent/petstore_api/model/parent.py
|
Celebrate-future/openapi-generator
|
100c078b90f0ce1408139c04bb83df2c2fb51875
|
[
"Apache-2.0"
] | null | null | null |
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from petstore_api.model.grandparent import Grandparent
from petstore_api.model.parent_all_of import ParentAllOf
globals()['Grandparent'] = Grandparent
globals()['ParentAllOf'] = ParentAllOf
class Parent(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'radio_waves': (bool,), # noqa: E501
'tele_vision': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'radio_waves': 'radioWaves', # noqa: E501
'tele_vision': 'teleVision', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Parent - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
radio_waves (bool): [optional] # noqa: E501
tele_vision (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
Grandparent,
ParentAllOf,
],
'oneOf': [
],
}
| 39.258216
| 174
| 0.593997
|
13bfa1dab39f1329e03fffe03ab9744fbd1e3682
| 5,408
|
py
|
Python
|
apel/parsers/lsf.py
|
sjones-hep-ph-liv-ac-uk/apel
|
1dcd3b809e445b0c2284017339638cfaab44892e
|
[
"Apache-2.0"
] | null | null | null |
apel/parsers/lsf.py
|
sjones-hep-ph-liv-ac-uk/apel
|
1dcd3b809e445b0c2284017339638cfaab44892e
|
[
"Apache-2.0"
] | null | null | null |
apel/parsers/lsf.py
|
sjones-hep-ph-liv-ac-uk/apel
|
1dcd3b809e445b0c2284017339638cfaab44892e
|
[
"Apache-2.0"
] | null | null | null |
'''
Copyright (C) 2012 STFC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Konrad Jopek, Will Rogers
'''
import logging
import re
from apel.db.records.event import EventRecord
from apel.parsers import Parser
log = logging.getLogger(__name__)
class LSFParser(Parser):
'''
LSFParser parses LSF accounting logs from all LSF versions.
The expression below describes elements which we are looking for.
Here is some explanation:
In accounting log file we have only two types of data: strings and numbers.
Strings may contain quoted strings inside - marked by: ""this is quoted string"".
The incorrect parsing of that lines can destroy the layout of fields (we can get
several fields more or less than the documentation assumes).
(\"([^"]|(""))*\") - we are looking for everything between " and " except for double quotation
mark ("). ("") are treated as a part of field.
([-]?\d+(\.\d*)?) - expression for integer and float numbers.
Example line from LSF accounting log:
"JOB_FINISH" "5.1" 1089407406 699195 283 33554482 1 1089290023 0 0 1089406862
"raortega" "8nm" "" "" "" "lxplus015" "prog/step3c" "" "/afs/cern.ch/user/r/raortega/log/bstep3c-362.txt"
"/afs/cern.ch/user/r/raortega/log/berr-step3c-362.txt" "1089290023.699195" 0 1 "tbed0079" 64 3.3 ""
"/afs/cern.ch/user/r/raortega/prog/step3c/startEachset.pl 362 7 8" 277.210000 17.280000 0 0 -1 0 0 927804
87722 0 0 0 -1 0 0 0 0 0 -1 "" "default" 0 1 "" "" 0 310424 339112 "" "" ""
Line was split, if you want to rejoin use ' ' as a joiner.
'''
EXPR = re.compile(r'''(
(\"([^"]|(""))*\")
|
([-]?\d+(\.\d*)?)
)''', re.VERBOSE)
def __init__(self, site, machine_name, mpi):
Parser.__init__(self, site, machine_name, mpi)
self._scale_hf = False
def set_scaling(self, scale_hf):
'''
Set to true if you want to scale CPU duration and wall duration
according to the 'HostFactor' value in the log file.
'''
if scale_hf:
log.info('Will scale durations according to host factor specified in log file.')
self._scale_hf = scale_hf
def parse(self, line):
# correct handling of double quotes
# expression <condition and expr1 or expr2> in Python
# means the same as <condition ? expr1 : expr2> in C
# the later implementations of Python introduced syntax:
# <expr1 if condition else expr2>
items = [x[0].startswith('"') and x[0][1:-1].replace('""', '"') or x[0]
for x in self.EXPR.findall(line)]
if items[0] != 'JOB_FINISH':
return None
num_asked = int(items[22])
num_exec = int(items[23 + num_asked])
offset = num_asked + num_exec
# scale by host factor if option is chosen
if self._scale_hf:
host_factor = float(items[25 + num_asked + num_exec])
else:
host_factor = 1
if self._mpi:
# get unique values for the different hosts listed after num_exec
nnodes = len(set(items[24 + num_asked:24 + offset]))
ncores = num_exec
else:
nnodes = 0
ncores = 0
mapping = {'Site' : lambda x: self.site_name,
'JobName' : lambda x: x[3],
'LocalUserID' : lambda x: x[11],
'LocalUserGroup': lambda x: "",
'WallDuration' : lambda x: int(host_factor * (int(x[2]) - int(x[10]))),
'CpuDuration' : lambda x: int(round(host_factor * (float(x[28+offset]) + float(x[29+offset])))),
'StartTime' : lambda x: int(x[10]),
'StopTime' : lambda x: int(x[2]),
'Infrastructure': lambda x: "APEL-CREAM-LSF",
'Queue' : lambda x: x[12],
'MachineName' : lambda x: self.machine_name,
'MemoryReal' : lambda x: int(x[54+offset]) > 0 and int(x[54+offset]) or 0,
'MemoryVirtual' : lambda x: int(x[55+offset]) > 0 and int(x[55+offset]) or 0,
'Processors' : lambda x: ncores,
'NodeCount' : lambda x: nnodes}
data = {}
for key in mapping:
data[key] = mapping[key](items)
# Input checking
if data['CpuDuration'] < 0:
raise ValueError('Negative CpuDuration value')
if data['WallDuration'] < 0:
raise ValueError('Negative WallDuration value')
if data['StopTime'] < data['StartTime']:
raise ValueError('StopTime less than StartTime')
record = EventRecord()
record.set_all(data)
return record
| 38.906475
| 117
| 0.579697
|
9d024bffa3240e84fc7fa196204b7f1ce5082267
| 16,418
|
py
|
Python
|
pandas/tests/series/indexing/test_boolean.py
|
r00ta/pandas
|
33f91d8f9f2e84f2b5f3ac3f0481b691c977c427
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2019-04-16T21:03:23.000Z
|
2021-05-08T13:25:44.000Z
|
pandas/tests/series/indexing/test_boolean.py
|
chanson90/pandas
|
3f1e5940e3929577f094ea2708f94ee184e7a336
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/series/indexing/test_boolean.py
|
chanson90/pandas
|
3f1e5940e3929577f094ea2708f94ee184e7a336
|
[
"BSD-3-Clause"
] | 1
|
2019-01-01T01:19:30.000Z
|
2019-01-01T01:19:30.000Z
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pytest
from pandas.compat import lrange, range
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import Index, Series, Timestamp, date_range, isna
from pandas.core.indexing import IndexingError
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
from pandas.tseries.offsets import BDay
def test_getitem_boolean(test_data):
s = test_data.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
tm.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty():
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isna()]
assert s.index.name == 'index_name'
assert s.dtype == np.int64
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
with pytest.raises(IndexingError):
s[Series([], dtype=bool)]
with pytest.raises(IndexingError):
s[Series([True], dtype=bool)]
def test_getitem_boolean_object(test_data):
# using column from DataFrame
s = test_data.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
pytest.raises(Exception, s.__getitem__, omask)
pytest.raises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(test_data):
ts = test_data.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
pytest.raises(Exception, ts.__getitem__, mask_shifted)
pytest.raises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
pytest.raises(Exception, ts.loc.__getitem__, mask_shifted)
pytest.raises(Exception, ts.loc.__setitem__, mask_shifted, 1)
# ts.loc[mask_shifted]
# ts.loc[mask_shifted] = 2
def test_setitem_boolean(test_data):
mask = test_data.series > test_data.series.median()
# similar indexed series
result = test_data.series.copy()
result[mask] = test_data.series * 2
expected = test_data.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = test_data.series.copy()
result[mask] = (test_data.series * 2)[0:5]
expected = (test_data.series * 2)[0:5].reindex_like(test_data.series)
expected[-mask] = test_data.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_get_set_boolean_different_order(test_data):
ordered = test_data.series.sort_values()
# setting
copy = test_data.series.copy()
copy[ordered > 0] = 0
expected = test_data.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = test_data.series[ordered > 0]
exp = test_data.series[test_data.series > 0]
assert_series_equal(sel, exp)
def test_where_unsafe_int(sint_dtype):
s = Series(np.arange(10), dtype=sint_dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=sint_dtype)
assert_series_equal(s, expected)
def test_where_unsafe_float(float_dtype):
s = Series(np.arange(10), dtype=float_dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=float_dtype)
assert_series_equal(s, expected)
@pytest.mark.parametrize("dtype", [np.int64, np.float64])
def test_where_unsafe_upcast(dtype):
s = Series(np.arange(10), dtype=dtype)
values = [2.5, 3.5, 4.5, 5.5, 6.5]
mask = s < 5
expected = Series(values + lrange(5, 10), dtype="float64")
s[mask] = values
assert_series_equal(s, expected)
@pytest.mark.parametrize("dtype", [
np.int8, np.int16, np.int32, np.float32
])
def test_where_unsafe_itemsize_fail(dtype):
# Can't do these, as we are forced to change the
# item size of the input to something we cannot.
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
pytest.raises(Exception, s.__setitem__, tuple(mask), values)
def test_where_unsafe():
# see gh-9731
s = Series(np.arange(10), dtype="int64")
values = [2.5, 3.5, 4.5, 5.5]
mask = s > 5
expected = Series(lrange(6) + values, dtype="float64")
s[mask] = values
assert_series_equal(s, expected)
# see gh-3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
with pytest.raises(ValueError):
s[mask] = [5, 4, 3, 2, 1]
with pytest.raises(ValueError):
s[mask] = [0] * 5
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isna(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isna(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_raise_on_error_deprecation():
# gh-14968
# deprecation of raise_on_error
s = Series(np.random.randn(5))
cond = s > 0
with tm.assert_produces_warning(FutureWarning):
s.where(cond, raise_on_error=True)
with tm.assert_produces_warning(FutureWarning):
s.mask(cond, raise_on_error=True)
def test_where():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
pytest.raises(ValueError, s.where, 1)
pytest.raises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where_array_like(klass):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
result = s.where(klass(cond))
assert_series_equal(result, expected)
@pytest.mark.parametrize('cond', [
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")]
])
def test_where_invalid_input(cond):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where([True])
def test_where_ndframe_align():
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid():
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
with pytest.raises(ValueError):
s[0:3] = list(range(27))
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
with pytest.raises(ValueError):
s[0:4:2] = list(range(27))
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
with pytest.raises(ValueError):
s[:-1] = list(range(27))
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
with pytest.raises(ValueError):
s[[0, 1, 2]] = list(range(27))
s = Series(list('abc'))
with pytest.raises(ValueError):
s[[0, 1, 2]] = list(range(2))
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
@pytest.mark.parametrize('size', range(2, 6))
@pytest.mark.parametrize('mask', [
[True, False, False, False, False],
[True, False],
[False]
])
@pytest.mark.parametrize('item', [
2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min
])
# Test numpy arrays, lists and tuples as the input to be
# broadcast
@pytest.mark.parametrize('box', [
lambda x: np.array([x]),
lambda x: [x],
lambda x: (x,)
])
def test_broadcast(size, mask, item, box):
selection = np.resize(mask, size)
data = np.arange(size, dtype=float)
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
s = Series(data)
s[selection] = box(item)
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, box(item))
assert_series_equal(result, expected)
s = Series(data)
result = s.mask(selection, box(item))
assert_series_equal(result, expected)
def test_where_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups():
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_numeric_with_string():
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, ['X', 'Y', 'Z'])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
def test_where_timedelta_coerce():
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='object')
assert_series_equal(rs, expected)
def test_where_datetime_conversion():
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='object')
assert_series_equal(rs, expected)
# GH 15701
timestamps = ['2016-12-31 12:00:04+00:00',
'2016-12-31 12:00:04.010000+00:00']
s = Series([pd.Timestamp(t) for t in timestamps])
rs = s.where(Series([False, True]))
expected = Series([pd.NaT, s[1]])
assert_series_equal(rs, expected)
def test_where_dt_tz_values(tz_naive_fixture):
ser1 = pd.Series(pd.DatetimeIndex(['20150101', '20150102', '20150103'],
tz=tz_naive_fixture))
ser2 = pd.Series(pd.DatetimeIndex(['20160514', '20160515', '20160516'],
tz=tz_naive_fixture))
mask = pd.Series([True, True, False])
result = ser1.where(mask, ser2)
exp = pd.Series(pd.DatetimeIndex(['20150101', '20150102', '20160516'],
tz=tz_naive_fixture))
assert_series_equal(exp, result)
def test_mask():
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
pytest.raises(ValueError, s.mask, 1)
pytest.raises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
# see gh-21891
s = Series([1, 2])
res = s.mask([True, False])
exp = Series([np.nan, 2])
tm.assert_series_equal(res, exp)
def test_mask_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
| 26.395498
| 75
| 0.621818
|
d72ba7707b4cce1ceb1dbc9c06f199e72acd3213
| 3,466
|
py
|
Python
|
test/functional/mempool_limit.py
|
thothd/unit-e
|
44cd02af44592ebfa99f276e20775ed7d2182d02
|
[
"MIT"
] | 36
|
2019-04-17T18:58:51.000Z
|
2022-01-18T12:16:27.000Z
|
test/functional/mempool_limit.py
|
Danpetersen448/unit-e
|
4ca86fc55a41e0daeb4409de2719a5523b6007c6
|
[
"MIT"
] | 109
|
2019-04-17T17:19:45.000Z
|
2019-06-19T15:16:37.000Z
|
test/functional/mempool_limit.py
|
Danpetersen448/unit-e
|
4ca86fc55a41e0daeb4409de2719a5523b6007c6
|
[
"MIT"
] | 16
|
2019-04-17T17:35:42.000Z
|
2020-01-09T17:51:05.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from decimal import Decimal
from test_framework.test_framework import UnitETestFramework
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
class MempoolLimitTest(UnitETestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0", "-stakesplitthreshold=5000000000"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.setup_stake_coins(*self.nodes)
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
self.log.info('The tx should be evicted by now')
assert txid not in self.nodes[0].getrawmempool()
txdata = self.nodes[0].gettransaction(txid)
assert txdata['confirmations'] == 0 #confirmation should still be 0
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
self.log.info('Create a mempool tx that will not pass mempoolminfee')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
# specifically fund this tx with a fee < mempoolminfee, >= than minrelaytxfee
txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee})
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
assert_raises_rpc_error(-26, "mempool min fee not met", self.nodes[0].sendrawtransaction, txFS['hex'])
if __name__ == '__main__':
MempoolLimitTest().main()
| 48.138889
| 166
| 0.681766
|
c220397c78b384479f61aca97158086b1681443c
| 7,448
|
py
|
Python
|
src/python/pants/backend/project_info/tasks/depmap.py
|
StephanErb/pants
|
a368267b6b4cf50138ba567f582409ed31bf5db9
|
[
"Apache-2.0"
] | 94
|
2015-01-15T21:24:20.000Z
|
2022-02-16T16:55:43.000Z
|
src/python/pants/backend/project_info/tasks/depmap.py
|
StephanErb/pants
|
a368267b6b4cf50138ba567f582409ed31bf5db9
|
[
"Apache-2.0"
] | 5
|
2020-07-18T01:04:43.000Z
|
2021-05-10T08:40:56.000Z
|
src/python/pants/backend/project_info/tasks/depmap.py
|
StephanErb/pants
|
a368267b6b4cf50138ba567f582409ed31bf5db9
|
[
"Apache-2.0"
] | 47
|
2015-02-25T02:20:07.000Z
|
2022-03-21T00:59:16.000Z
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import object
from pants.base.exceptions import TaskError
from pants.java.jar.jar_dependency import JarDependency
from pants.task.console_task import ConsoleTask
class Depmap(ConsoleTask):
"""Depict the target's dependencies.
Generates either a textual dependency tree or a graphviz digraph dot file for the dependency
set of a target.
"""
class SourceRootTypes(object):
"""Defines SourceRoot Types Constants"""
SOURCE = 'SOURCE' # Source Target
TEST = 'TEST' # Test Target
SOURCE_GENERATED = 'SOURCE_GENERATED' # Code Gen Source Targets
EXCLUDED = 'EXCLUDED' # Excluded Target
RESOURCE = 'RESOURCE' # Resource belonging to Source Target
TEST_RESOURCE = 'TEST_RESOURCE' # Resource belonging to Test Target
@classmethod
def register_options(cls, register):
super(Depmap, cls).register_options(register)
register('--internal-only', type=bool,
help='Specifies that only internal dependencies should be included in the graph '
'output (no external jars).')
register('--external-only', type=bool,
help='Specifies that only external dependencies should be included in the graph '
'output (only external jars).')
register('--minimal', type=bool,
help='For a textual dependency tree, only prints a dependency the 1st '
'time it is encountered. This is a no-op for --graph.')
register('--graph', type=bool,
help='Specifies the internal dependency graph should be output in the dot digraph '
'format.')
register('--tree', type=bool,
help='For text output, show an ascii tree to help visually line up indentions.')
register('--show-types', type=bool,
help='Show types of objects in depmap --graph.')
register('--separator', default='-',
help='Specifies the separator to use between the org/name/rev components of a '
'dependency\'s fully qualified name.')
def __init__(self, *args, **kwargs):
super(Depmap, self).__init__(*args, **kwargs)
self.is_internal_only = self.get_options().internal_only
self.is_external_only = self.get_options().external_only
if self.is_internal_only and self.is_external_only:
raise TaskError('At most one of --internal-only or --external-only can be selected.')
self.is_minimal = self.get_options().minimal
self.is_graph = self.get_options().graph
self.should_tree = self.get_options().tree
self.show_types = self.get_options().show_types
self.separator = self.get_options().separator
self.target_aliases_map = None
def console_output(self, targets):
if len(self.context.target_roots) == 0:
raise TaskError("One or more target addresses are required.")
for target in self.context.target_roots:
out = self._output_digraph(target) if self.is_graph else self._output_dependency_tree(target)
for line in out:
yield line
def _dep_id(self, dependency):
"""Returns a tuple of dependency_id, is_internal_dep."""
params = dict(sep=self.separator)
if isinstance(dependency, JarDependency):
# TODO(kwilson): handle 'classifier' and 'type'.
params.update(org=dependency.org, name=dependency.name, rev=dependency.rev)
is_internal_dep = False
else:
params.update(org='internal', name=dependency.id)
is_internal_dep = True
return ('{org}{sep}{name}{sep}{rev}' if params.get('rev') else
'{org}{sep}{name}').format(**params), is_internal_dep
def _enumerate_visible_deps(self, dep, predicate):
# We present the dependencies out of classpath order and instead in alphabetized internal deps,
# then alphabetized external deps order for ease in scanning output.
dependencies = sorted(x for x in getattr(dep, 'dependencies', []))
if not self.is_internal_only:
dependencies.extend(sorted((x for x in getattr(dep, 'jar_dependencies', [])),
key=lambda x: (x.org, x.name, x.rev, x.classifier)))
for inner_dep in dependencies:
dep_id, internal = self._dep_id(inner_dep)
if predicate(internal):
yield inner_dep
def output_candidate(self, internal):
return ((not self.is_internal_only and not self.is_external_only)
or (self.is_internal_only and internal)
or (self.is_external_only and not internal))
def _output_dependency_tree(self, target):
"""Plain-text depmap output handler."""
def make_line(dep, indent, is_dupe=False):
indent_join, indent_chars = ('--', ' |') if self.should_tree else ('', ' ')
dupe_char = '*' if is_dupe else ''
return ''.join((indent * indent_chars, indent_join, dupe_char, dep))
def output_deps(dep, indent, outputted, stack):
dep_id, internal = self._dep_id(dep)
if self.is_minimal and dep_id in outputted:
return
if self.output_candidate(internal):
yield make_line(dep_id,
0 if self.is_external_only else indent,
is_dupe=dep_id in outputted)
outputted.add(dep_id)
for sub_dep in self._enumerate_visible_deps(dep, self.output_candidate):
for item in output_deps(sub_dep, indent + 1, outputted, stack + [(dep_id, indent)]):
yield item
for item in output_deps(target, 0, set(), []):
yield item
def _output_digraph(self, target):
"""Graphviz format depmap output handler."""
color_by_type = {}
def maybe_add_type(dep, dep_id):
"""Add a class type to a dependency id if --show-types is passed."""
return dep_id if not self.show_types else '\\n'.join((dep_id, dep.__class__.__name__))
def make_node(dep, dep_id, internal):
line_fmt = ' "{id}" [style=filled, fillcolor={color}{internal}];'
int_shape = ', shape=ellipse' if not internal else ''
dep_class = dep.__class__.__name__
if dep_class not in color_by_type:
color_by_type[dep_class] = len(color_by_type.keys()) + 1
return line_fmt.format(id=dep_id, internal=int_shape, color=color_by_type[dep_class])
def make_edge(from_dep_id, to_dep_id, internal):
style = ' [style=dashed]' if not internal else ''
return ' "{}" -> "{}"{};'.format(from_dep_id, to_dep_id, style)
def output_deps(dep, parent, parent_id, outputted):
dep_id, internal = self._dep_id(dep)
if dep_id not in outputted:
yield make_node(dep, maybe_add_type(dep, dep_id), internal)
outputted.add(dep_id)
for sub_dep in self._enumerate_visible_deps(dep, self.output_candidate):
for item in output_deps(sub_dep, dep, dep_id, outputted):
yield item
if parent:
edge_id = (parent_id, dep_id)
if edge_id not in outputted:
yield make_edge(maybe_add_type(parent, parent_id), maybe_add_type(dep, dep_id), internal)
outputted.add(edge_id)
yield 'digraph "{}" {{'.format(target.id)
yield ' node [shape=rectangle, colorscheme=set312;];'
yield ' rankdir=LR;'
for line in output_deps(target, parent=None, parent_id=None, outputted=set()):
yield line
yield '}'
| 41.149171
| 99
| 0.673469
|
06e7ffd437d2c524688a9d379430198139f8a8fd
| 1,468
|
py
|
Python
|
test/test_inline_response40025.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 5
|
2021-05-17T04:45:03.000Z
|
2022-03-23T12:51:46.000Z
|
test/test_inline_response40025.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | null | null | null |
test/test_inline_response40025.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 2
|
2021-06-02T07:32:26.000Z
|
2022-02-12T02:36:23.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.create_coins_transaction_from_address_for_whole_amount_e400 import CreateCoinsTransactionFromAddressForWholeAmountE400
globals()['CreateCoinsTransactionFromAddressForWholeAmountE400'] = CreateCoinsTransactionFromAddressForWholeAmountE400
from cryptoapis.model.inline_response40025 import InlineResponse40025
class TestInlineResponse40025(unittest.TestCase):
"""InlineResponse40025 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse40025(self):
"""Test InlineResponse40025"""
# FIXME: construct object with mandatory attributes with example values
# model = InlineResponse40025() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 37.641026
| 484
| 0.779292
|
1fcb28ada48f87dc469b2253b23eedf906a30674
| 10,120
|
gyp
|
Python
|
src/third_party/cacheinvalidation/cacheinvalidation.gyp
|
jxjnjjn/chromium
|
435c1d02fd1b99001dc9e1e831632c894523580d
|
[
"Apache-2.0"
] | 9
|
2018-09-21T05:36:12.000Z
|
2021-11-15T15:14:36.000Z
|
third_party/cacheinvalidation/cacheinvalidation.gyp
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
third_party/cacheinvalidation/cacheinvalidation.gyp
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3
|
2018-11-28T14:54:13.000Z
|
2020-07-02T07:36:07.000Z
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
# This library should build cleanly with the extra warnings turned on
# for Chromium.
'chromium_code': 1,
},
'targets': [
# The C++ files generated from the cache invalidation protocol buffers.
{
'target_name': 'cacheinvalidation_proto_cpp',
'type': 'static_library',
'variables': {
# The relative path of the cacheinvalidation proto files from this
# gyp-file.
# TODO(akalin): Add a RULE_INPUT_DIR predefined variable to gyp so
# we don't need this variable.
'proto_dir_relpath': 'google/cacheinvalidation',
# Where files generated from proto files are put.
'proto_in_dir': 'src/<(proto_dir_relpath)',
'proto_out_dir': '<(proto_dir_relpath)',
},
'sources': [
'<(proto_in_dir)/client.proto',
'<(proto_in_dir)/client_gateway.proto',
'<(proto_in_dir)/client_protocol.proto',
'<(proto_in_dir)/client_test_internal.proto',
'<(proto_in_dir)/types.proto',
],
'includes': [ '../../build/protoc.gypi' ],
'direct_dependent_settings': {
'include_dirs': [
'<(proto_out_dir)',
],
},
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
# The main cache invalidation library. External clients should depend
# only on this.
{
'target_name': 'cacheinvalidation',
'type': 'static_library',
'sources': [
'overrides/google/cacheinvalidation/deps/callback.h',
'overrides/google/cacheinvalidation/deps/gmock.h',
'overrides/google/cacheinvalidation/deps/googletest.h',
'overrides/google/cacheinvalidation/deps/logging.h',
'overrides/google/cacheinvalidation/deps/mutex.h',
'overrides/google/cacheinvalidation/deps/random.h',
'overrides/google/cacheinvalidation/deps/sha1-digest-function.h',
'overrides/google/cacheinvalidation/deps/scoped_ptr.h',
'overrides/google/cacheinvalidation/deps/stl-namespace.h',
'overrides/google/cacheinvalidation/deps/string_util.h',
'overrides/google/cacheinvalidation/deps/time.h',
'src/google/cacheinvalidation/deps/digest-function.h',
'src/google/cacheinvalidation/impl/basic-system-resources.cc',
'src/google/cacheinvalidation/impl/basic-system-resources.h',
'src/google/cacheinvalidation/impl/checking-invalidation-listener.cc',
'src/google/cacheinvalidation/impl/checking-invalidation-listener.h',
'src/google/cacheinvalidation/impl/client-protocol-namespace-fix.h',
'src/google/cacheinvalidation/impl/constants.cc',
'src/google/cacheinvalidation/impl/constants.h',
'src/google/cacheinvalidation/impl/digest-store.h',
'src/google/cacheinvalidation/impl/exponential-backoff-delay-generator.cc',
'src/google/cacheinvalidation/impl/exponential-backoff-delay-generator.h',
'src/google/cacheinvalidation/impl/invalidation-client-core.cc',
'src/google/cacheinvalidation/impl/invalidation-client-core.h',
'src/google/cacheinvalidation/impl/invalidation-client-factory.cc',
'src/google/cacheinvalidation/impl/invalidation-client-impl.cc',
'src/google/cacheinvalidation/impl/invalidation-client-impl.h',
'src/google/cacheinvalidation/impl/invalidation-client-util.h',
'src/google/cacheinvalidation/impl/log-macro.h',
'src/google/cacheinvalidation/impl/object-id-digest-utils.cc',
'src/google/cacheinvalidation/impl/object-id-digest-utils.h',
'src/google/cacheinvalidation/impl/persistence-utils.cc',
'src/google/cacheinvalidation/impl/persistence-utils.h',
'src/google/cacheinvalidation/impl/proto-converter.cc',
'src/google/cacheinvalidation/impl/proto-converter.h',
'src/google/cacheinvalidation/impl/proto-helpers.h',
'src/google/cacheinvalidation/impl/proto-helpers.cc',
'src/google/cacheinvalidation/impl/protocol-handler.cc',
'src/google/cacheinvalidation/impl/protocol-handler.h',
'src/google/cacheinvalidation/impl/recurring-task.cc',
'src/google/cacheinvalidation/impl/recurring-task.h',
'src/google/cacheinvalidation/impl/registration-manager.cc',
'src/google/cacheinvalidation/impl/registration-manager.h',
'src/google/cacheinvalidation/impl/run-state.h',
'src/google/cacheinvalidation/impl/safe-storage.cc',
'src/google/cacheinvalidation/impl/safe-storage.h',
'src/google/cacheinvalidation/impl/simple-registration-store.cc',
'src/google/cacheinvalidation/impl/simple-registration-store.h',
'src/google/cacheinvalidation/impl/smearer.h',
'src/google/cacheinvalidation/impl/statistics.cc',
'src/google/cacheinvalidation/impl/statistics.h',
'src/google/cacheinvalidation/impl/throttle.cc',
'src/google/cacheinvalidation/impl/throttle.h',
'src/google/cacheinvalidation/impl/ticl-message-validator.cc',
'src/google/cacheinvalidation/impl/ticl-message-validator.h',
'src/google/cacheinvalidation/include/invalidation-client.h',
'src/google/cacheinvalidation/include/invalidation-client-factory.h',
'src/google/cacheinvalidation/include/invalidation-listener.h',
'src/google/cacheinvalidation/include/system-resources.h',
'src/google/cacheinvalidation/include/types.h',
],
'include_dirs': [
'./overrides',
'./src',
],
'dependencies': [
'../../base/base.gyp:base',
'cacheinvalidation_proto_cpp',
],
'direct_dependent_settings': {
'include_dirs': [
'./overrides',
'./src',
],
},
# We avoid including header files from
# cacheinvalidation_proto_cpp in our public header files so we
# don't need to export its settings.
'export_dependent_settings': [
'../../base/base.gyp:base',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
# Unittests for the cache invalidation library.
# TODO(ghc): Write native tests and include them here.
{
'target_name': 'cacheinvalidation_unittests',
'type': 'executable',
'sources': [
'src/google/cacheinvalidation/test/deterministic-scheduler.cc',
'src/google/cacheinvalidation/test/deterministic-scheduler.h',
'src/google/cacheinvalidation/test/test-logger.cc',
'src/google/cacheinvalidation/test/test-logger.h',
'src/google/cacheinvalidation/test/test-utils.cc',
'src/google/cacheinvalidation/test/test-utils.h',
'src/google/cacheinvalidation/impl/invalidation-client-impl_test.cc',
'src/google/cacheinvalidation/impl/protocol-handler_test.cc',
'src/google/cacheinvalidation/impl/recurring-task_test.cc',
'src/google/cacheinvalidation/impl/throttle_test.cc',
],
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:run_all_unittests',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
'cacheinvalidation',
'cacheinvalidation_proto_cpp',
],
},
],
'conditions': [
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'cacheinvalidation_unittests_run',
'type': 'none',
'dependencies': [
'cacheinvalidation_unittests',
],
'includes': [
'../../build/isolate.gypi',
'cacheinvalidation_unittests.isolate',
],
'sources': [
'cacheinvalidation_unittests.isolate',
],
},
],
}],
['OS == "android"', {
'targets': [
{
'target_name': 'cacheinvalidation_proto_java',
'type': 'none',
'variables': {
'proto_in_dir': '../../third_party/cacheinvalidation/src/proto',
},
'sources': [
'<(proto_in_dir)/android_channel.proto',
'<(proto_in_dir)/android_listener.proto',
'<(proto_in_dir)/android_service.proto',
'<(proto_in_dir)/android_state.proto',
'<(proto_in_dir)/channel.proto',
'<(proto_in_dir)/channel_common.proto',
'<(proto_in_dir)/client.proto',
'<(proto_in_dir)/client_protocol.proto',
'<(proto_in_dir)/java_client.proto',
'<(proto_in_dir)/types.proto',
],
'includes': [ '../../build/protoc_java.gypi' ],
},
{
'target_name': 'cacheinvalidation_javalib',
'type': 'none',
'dependencies': [
'../../third_party/android_tools/android_tools.gyp:android_gcm',
'../../third_party/guava/guava.gyp:guava_javalib',
'cacheinvalidation_aidl_javalib',
'cacheinvalidation_proto_java',
],
'variables': {
'java_in_dir': '../../build/android/empty',
'additional_src_dirs': [ 'src/java/' ],
},
'includes': [ '../../build/java.gypi' ],
},
{
'target_name': 'cacheinvalidation_aidl_javalib',
'type': 'none',
'variables': {
# TODO(shashishekhar): aidl_interface_file should be made optional.
'aidl_interface_file':'<(android_sdk)/framework.aidl'
},
'sources': [
'src/java/com/google/ipc/invalidation/external/client/android/service/InvalidationService.aidl',
'src/java/com/google/ipc/invalidation/external/client/android/service/ListenerService.aidl',
'src/java/com/google/ipc/invalidation/testing/android/InvalidationTest.aidl',
],
'includes': [ '../../build/java_aidl.gypi' ],
},
],
}],
],
}
| 43.433476
| 108
| 0.6333
|
7f14a6506cbdb82a4981304a2e0fcbd1004d3515
| 440
|
py
|
Python
|
vlan.py
|
ossih/cisco-interfaces
|
69222fca9a3b24c7cfe15e19bdb51dd78a367f8e
|
[
"MIT"
] | null | null | null |
vlan.py
|
ossih/cisco-interfaces
|
69222fca9a3b24c7cfe15e19bdb51dd78a367f8e
|
[
"MIT"
] | null | null | null |
vlan.py
|
ossih/cisco-interfaces
|
69222fca9a3b24c7cfe15e19bdb51dd78a367f8e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import yaml
import argparse
## This is old and deprecated
## Use of if.py is recommended instead
parser = argparse.ArgumentParser('vlan.py')
parser.add_argument('-c', '--config', required=True)
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f)
for name, vconf in config['vlans'].items():
vid = vconf['vid']
print('vlan %i' % vid)
print(' name %s' % name)
| 18.333333
| 52
| 0.652273
|
2016e98128404063494c3400010fa1b442c27464
| 1,218
|
py
|
Python
|
socialite/apps/base/oauth/backends.py
|
dgouldin/django-socialite
|
4de4060f5d8ab89ae9dd4032e66e526873351ba4
|
[
"MIT"
] | 1
|
2015-01-30T19:05:26.000Z
|
2015-01-30T19:05:26.000Z
|
socialite/apps/base/oauth/backends.py
|
dgouldin/django-socialite
|
4de4060f5d8ab89ae9dd4032e66e526873351ba4
|
[
"MIT"
] | null | null | null |
socialite/apps/base/oauth/backends.py
|
dgouldin/django-socialite
|
4de4060f5d8ab89ae9dd4032e66e526873351ba4
|
[
"MIT"
] | 2
|
2016-06-26T13:49:31.000Z
|
2021-08-13T01:00:15.000Z
|
from django.contrib.auth.models import User
class BaseOauthBackend:
def validate_service_type(self, base_url):
raise NotImplemented
def get_existing_user(self, access_token):
raise NotImplemented
def register_user(self, access_token):
"""
# FIXME: Would be good to document that if you want a UserProfile created for OAuth-created users,
you'll want to hook the post_save signal for User.
"""
raise NotImplemented
def authenticate(self, client=None, access_token=None, impersonate=None):
if client is None or access_token is None or not self.validate_service_type(getattr(client, 'base_url', None)):
return None
user = self.get_existing_user(access_token, impersonate=impersonate)
if user:
return user
import logging
logging.error("FIXME: need to put service prefix on user names to make it clearer which service a user was created for.")
user = self.register_user(access_token, impersonate=impersonate)
return user
def get_user(self, id):
try:
return User.objects.get(pk=id)
except User.DoesNotExist:
return None
| 36.909091
| 129
| 0.673235
|
02054b5a46cc89263231a69ac2f6fa5b67cb1382
| 6,391
|
py
|
Python
|
tools/train_net.py
|
qdmy/Adelaidet-Quantization
|
e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b
|
[
"Apache-2.0"
] | null | null | null |
tools/train_net.py
|
qdmy/Adelaidet-Quantization
|
e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b
|
[
"Apache-2.0"
] | null | null | null |
tools/train_net.py
|
qdmy/Adelaidet-Quantization
|
e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Detection Training Script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""
import logging
import os
import sys
from collections import OrderedDict
import torch
import detectron2_ofa.utils.comm as comm
from detectron2_ofa.checkpoint import DetectionCheckpointer
from detectron2_ofa.config import get_cfg
from detectron2_ofa.data import MetadataCatalog
from detectron2_ofa.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2_ofa.evaluation import (
CityscapesEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2_ofa.modeling import GeneralizedRCNNWithTTA
import third_party
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains a number pre-defined logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can use the cleaner
"SimpleTrainer", or write your own training loop.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesEvaluator(dataset_name)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
if getattr(cfg.MODEL, 'fp16', False):
try:
from apex import amp
apex_enable = True
except ImportError:
apex_enable = False
if torch.backends.cudnn.enabled and apex_enable:
pass
else:
setattr(cfg.MODEL, 'fp16', False)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop or subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(output_dir=cfg.OUTPUT_DIR, resume=True)
if cfg.TEST.AUG.ENABLED:
print('*'*10 + "run test_with_TTA")
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 36.3125
| 103
| 0.662964
|
e93c587b1a99351ab8cdce9137e6e8aef61d223f
| 7,820
|
py
|
Python
|
alameda_api/v1alpha1/datahub/common/rawdata_pb2.py
|
containers-ai/api
|
ec0fafc7bfd17c16cff1d1737dbceacac5d09fd8
|
[
"Apache-2.0"
] | 1
|
2020-05-18T02:34:29.000Z
|
2020-05-18T02:34:29.000Z
|
alameda_api/v1alpha1/datahub/common/rawdata_pb2.py
|
containers-ai/api
|
ec0fafc7bfd17c16cff1d1737dbceacac5d09fd8
|
[
"Apache-2.0"
] | 9
|
2018-11-01T09:08:51.000Z
|
2019-01-12T07:09:06.000Z
|
alameda_api/v1alpha1/datahub/common/rawdata_pb2.py
|
containers-ai/api
|
ec0fafc7bfd17c16cff1d1737dbceacac5d09fd8
|
[
"Apache-2.0"
] | 12
|
2018-10-30T02:46:56.000Z
|
2021-04-13T07:55:09.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: alameda_api/v1alpha1/datahub/common/rawdata.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='alameda_api/v1alpha1/datahub/common/rawdata.proto',
package='containersai.alameda.v1alpha1.datahub.common',
syntax='proto3',
serialized_options=b'Z@github.com/containers-ai/api/alameda_api/v1alpha1/datahub/common',
serialized_pb=b'\n1alameda_api/v1alpha1/datahub/common/rawdata.proto\x12,containersai.alameda.v1alpha1.datahub.common\x1a\x1fgoogle/protobuf/timestamp.proto\"]\n\tWriteData\x12\x0f\n\x07\x63olumns\x18\x01 \x03(\t\x12?\n\x04rows\x18\x02 \x03(\x0b\x32\x31.containersai.alameda.v1alpha1.datahub.common.Row\"O\n\x08ReadData\x12\x43\n\x06groups\x18\x01 \x03(\x0b\x32\x33.containersai.alameda.v1alpha1.datahub.common.Group\"?\n\x03Row\x12(\n\x04time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06values\x18\x02 \x03(\t\"Y\n\x05Group\x12\x0f\n\x07\x63olumns\x18\x01 \x03(\t\x12?\n\x04rows\x18\x02 \x03(\x0b\x32\x31.containersai.alameda.v1alpha1.datahub.common.RowBBZ@github.com/containers-ai/api/alameda_api/v1alpha1/datahub/commonb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_WRITEDATA = _descriptor.Descriptor(
name='WriteData',
full_name='containersai.alameda.v1alpha1.datahub.common.WriteData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='columns', full_name='containersai.alameda.v1alpha1.datahub.common.WriteData.columns', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rows', full_name='containersai.alameda.v1alpha1.datahub.common.WriteData.rows', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=132,
serialized_end=225,
)
_READDATA = _descriptor.Descriptor(
name='ReadData',
full_name='containersai.alameda.v1alpha1.datahub.common.ReadData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='groups', full_name='containersai.alameda.v1alpha1.datahub.common.ReadData.groups', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=227,
serialized_end=306,
)
_ROW = _descriptor.Descriptor(
name='Row',
full_name='containersai.alameda.v1alpha1.datahub.common.Row',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time', full_name='containersai.alameda.v1alpha1.datahub.common.Row.time', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='values', full_name='containersai.alameda.v1alpha1.datahub.common.Row.values', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=308,
serialized_end=371,
)
_GROUP = _descriptor.Descriptor(
name='Group',
full_name='containersai.alameda.v1alpha1.datahub.common.Group',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='columns', full_name='containersai.alameda.v1alpha1.datahub.common.Group.columns', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rows', full_name='containersai.alameda.v1alpha1.datahub.common.Group.rows', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=373,
serialized_end=462,
)
_WRITEDATA.fields_by_name['rows'].message_type = _ROW
_READDATA.fields_by_name['groups'].message_type = _GROUP
_ROW.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUP.fields_by_name['rows'].message_type = _ROW
DESCRIPTOR.message_types_by_name['WriteData'] = _WRITEDATA
DESCRIPTOR.message_types_by_name['ReadData'] = _READDATA
DESCRIPTOR.message_types_by_name['Row'] = _ROW
DESCRIPTOR.message_types_by_name['Group'] = _GROUP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WriteData = _reflection.GeneratedProtocolMessageType('WriteData', (_message.Message,), {
'DESCRIPTOR' : _WRITEDATA,
'__module__' : 'alameda_api.v1alpha1.datahub.common.rawdata_pb2'
# @@protoc_insertion_point(class_scope:containersai.alameda.v1alpha1.datahub.common.WriteData)
})
_sym_db.RegisterMessage(WriteData)
ReadData = _reflection.GeneratedProtocolMessageType('ReadData', (_message.Message,), {
'DESCRIPTOR' : _READDATA,
'__module__' : 'alameda_api.v1alpha1.datahub.common.rawdata_pb2'
# @@protoc_insertion_point(class_scope:containersai.alameda.v1alpha1.datahub.common.ReadData)
})
_sym_db.RegisterMessage(ReadData)
Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), {
'DESCRIPTOR' : _ROW,
'__module__' : 'alameda_api.v1alpha1.datahub.common.rawdata_pb2'
# @@protoc_insertion_point(class_scope:containersai.alameda.v1alpha1.datahub.common.Row)
})
_sym_db.RegisterMessage(Row)
Group = _reflection.GeneratedProtocolMessageType('Group', (_message.Message,), {
'DESCRIPTOR' : _GROUP,
'__module__' : 'alameda_api.v1alpha1.datahub.common.rawdata_pb2'
# @@protoc_insertion_point(class_scope:containersai.alameda.v1alpha1.datahub.common.Group)
})
_sym_db.RegisterMessage(Group)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 36.542056
| 757
| 0.757801
|
8961ecebf2ca4b4e071b2aa1e8778032bdcb8a52
| 576
|
py
|
Python
|
AST/block.py
|
CorentinGoet/miniC-Compiler
|
8631b1ce47e9de1c3a3255d7c0a941242ad48292
|
[
"MIT"
] | null | null | null |
AST/block.py
|
CorentinGoet/miniC-Compiler
|
8631b1ce47e9de1c3a3255d7c0a941242ad48292
|
[
"MIT"
] | null | null | null |
AST/block.py
|
CorentinGoet/miniC-Compiler
|
8631b1ce47e9de1c3a3255d7c0a941242ad48292
|
[
"MIT"
] | null | null | null |
"""
@author Corentin Goetghebeur (github.com/CorentinGoet)
"""
from AST.node import Node
class Block(Node):
"""
Class representation for the Block node.
The Block node has the following syntax:
Block = {Statements}
"""
def __init__(self, statements: list = None):
"""
Constructor for the Block node.
:param statements: list of Statements nodes
"""
self.statements = statements
def accept(self, visitor):
visitor.visitBlock(self)
def get_statements(self):
return self.statements
| 18.580645
| 54
| 0.631944
|
5269bef073ba4782bf6ca10afdb8907d76f10027
| 7,820
|
py
|
Python
|
test/functional/wallet_abandonconflict.py
|
afghany/castletmp
|
9d0daed2a6abaf7d93f9308f5c602db6eeb42c8b
|
[
"MIT"
] | null | null | null |
test/functional/wallet_abandonconflict.py
|
afghany/castletmp
|
9d0daed2a6abaf7d93f9308f5c602db6eeb42c8b
|
[
"MIT"
] | null | null | null |
test/functional/wallet_abandonconflict.py
|
afghany/castletmp
|
9d0daed2a6abaf7d93f9308f5c602db6eeb42c8b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import CastleTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
Decimal,
disconnect_nodes,
sync_blocks,
sync_mempools
)
class AbandonConflictTest(CastleTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-minrelaytxfee=0.00001"],[]]
def run_test(self):
self.nodes[0].generate(5)
sync_blocks(self.nodes)
self.nodes[1].generate(110)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 10)
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 10)
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 10)
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == 10)
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == 10)
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == 10)
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = 14.99998
outputs[self.nodes[1].getnewaddress()] = 5
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = 24.9996
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
inputs = []
inputs.append({"txid":txABC2, "vout":0})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = 24.999
signed3 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, Decimal(round(balance - Decimal("30") + Decimal(24.999), 8)))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
# Note had to make sure tx did not have AllowFree priority
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
# Verify txs no longer in mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.999"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = 9.9999
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
print("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
print("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
print(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| 45.465116
| 137
| 0.652685
|
c32c9f67f8b5a8560e955e4f576ad60ac25b9672
| 2,394
|
py
|
Python
|
tools/install_venv.py
|
openstack/zaqar-ui
|
bae3359ef23c0649081bea5b30afb6a6468d6fc2
|
[
"Apache-2.0"
] | 16
|
2016-01-17T08:23:47.000Z
|
2019-08-17T05:18:33.000Z
|
tools/install_venv.py
|
openstack/zaqar-ui
|
bae3359ef23c0649081bea5b30afb6a6468d6fc2
|
[
"Apache-2.0"
] | null | null | null |
tools/install_venv.py
|
openstack/zaqar-ui
|
bae3359ef23c0649081bea5b30afb6a6468d6fc2
|
[
"Apache-2.0"
] | 4
|
2016-02-05T22:04:28.000Z
|
2019-08-17T05:18:38.000Z
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import install_venv_common as install_venv
def print_help(venv, root):
help = """
OpenStack development environment setup is complete.
OpenStack development uses virtualenv to track and manage Python
dependencies while in development and testing.
To activate the OpenStack virtualenv for the extent of your current shell
session you can run:
$ source %s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ %s/tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print(help % (venv, root))
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if os.environ.get('tools_path'):
root = os.environ['tools_path']
venv = os.path.join(root, '.venv')
if os.environ.get('venv'):
venv = os.environ['venv']
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'OpenStack'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help(venv, root)
if __name__ == '__main__':
main(sys.argv)
| 32.794521
| 79
| 0.70802
|
7ae1952112057d9eeb14da65cc27b1ac758862b1
| 673
|
py
|
Python
|
Dataset/emnist/__init__.py
|
dtczhl/FedTuning
|
7b6274a573dfaf847cb3edae8301d8e9e866397c
|
[
"MIT"
] | null | null | null |
Dataset/emnist/__init__.py
|
dtczhl/FedTuning
|
7b6274a573dfaf847cb3edae8301d8e9e866397c
|
[
"MIT"
] | null | null | null |
Dataset/emnist/__init__.py
|
dtczhl/FedTuning
|
7b6274a573dfaf847cb3edae8301d8e9e866397c
|
[
"MIT"
] | 1
|
2022-02-14T03:21:06.000Z
|
2022-02-14T03:21:06.000Z
|
# number of classes
EMNIST_N_CLASS = 62
# number of input channel
EMNIST_N_INPUT_FEATURE = 1
# input sizes.
EMNIST_INPUT_RESIZE = (28, 28)
# top-1 accuracy
EMNIST_N_TOP_CLASS = 1
# learning rate and momentum
EMNIST_LEARNING_RATE = 0.01
EMNIST_MOMENTUM = 0.9
# train mean and std, need change
EMNIST_TRAIN_MEAN = 0.17685325354307277
EMNIST_TRAIN_STD = 0.3283675627372856
# test mean and std
EMNIST_TEST_MEAN = 0.17641518033197115
EMNIST_TEST_STD = 0.3279303292332604
# for dataloader: batch_size and n_worker
EMNIST_DATASET_TRAIN_BATCH_SIZE = 10
EMNIST_DATASET_TRAIN_N_WORKER = 5
# for testing
EMNIST_DATASET_TEST_BATCH_SIZE = 1000
EMNIST_DATASET_TEST_N_WORKER = 10
| 21.03125
| 41
| 0.809807
|
41b871d66b9560af0ec8599c1aab5d128d02423c
| 234
|
py
|
Python
|
Chapter 1/gauss.py
|
indrag49/Computational-Stat-Mech
|
0877f54a0245fce815f03478f4fb219fd6314951
|
[
"MIT"
] | 19
|
2018-06-29T12:22:47.000Z
|
2022-03-10T03:18:18.000Z
|
Chapter 1/gauss.py
|
indrag49/Computational-Stat-Mech
|
0877f54a0245fce815f03478f4fb219fd6314951
|
[
"MIT"
] | null | null | null |
Chapter 1/gauss.py
|
indrag49/Computational-Stat-Mech
|
0877f54a0245fce815f03478f4fb219fd6314951
|
[
"MIT"
] | 7
|
2018-11-30T01:56:36.000Z
|
2021-12-23T15:29:56.000Z
|
import random
import math as m
def gauss(sigma):
phi=random.uniform(0, 2*m.pi)
Upsilon=-m.log(random.uniform(0, 1))
r=sigma*m.sqrt(2*Upsilon)
x=r*m.cos(phi)
y=r*m.sin(phi)
return [x, y]
| 23.4
| 44
| 0.555556
|
f8d1ec2f536a50d4897bd92b5d3e6bf4c9f8e7da
| 708
|
py
|
Python
|
setup.py
|
Reclaim-The-Night-Leeds/PyParliment
|
b351505964099c8bcbd80855d21f470c5b9b3a14
|
[
"MIT"
] | null | null | null |
setup.py
|
Reclaim-The-Night-Leeds/PyParliment
|
b351505964099c8bcbd80855d21f470c5b9b3a14
|
[
"MIT"
] | 1
|
2021-04-25T21:46:12.000Z
|
2021-04-25T21:46:12.000Z
|
setup.py
|
Reclaim-The-Night-Leeds/PyParliment
|
b351505964099c8bcbd80855d21f470c5b9b3a14
|
[
"MIT"
] | null | null | null |
"""
Setup file for PyParliment.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 4.0.1.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
from setuptools import setup
if __name__ == "__main__":
try:
setup(use_scm_version={"version_scheme": "no-guess-dev"})
except: # noqa
print(
"\n\nAn error occurred while building the project, "
"please ensure you have the most updated version of setuptools, "
"setuptools_scm and wheel with:\n"
" pip install -U setuptools setuptools_scm wheel\n\n"
)
raise
| 32.181818
| 77
| 0.641243
|
67779fa3efd80fe4038cb069f37e8ede8a385b23
| 3,114
|
py
|
Python
|
test/python/test_sparse_logreg.py
|
tbjohns/BlitzML
|
0523743e1ae3614bfe3f16aa226d7a27fab2d623
|
[
"BSD-3-Clause"
] | 6
|
2015-06-16T05:17:17.000Z
|
2018-08-02T05:50:01.000Z
|
test/python/test_sparse_logreg.py
|
tbjohns/BlitzML
|
0523743e1ae3614bfe3f16aa226d7a27fab2d623
|
[
"BSD-3-Clause"
] | 2
|
2018-05-13T13:53:58.000Z
|
2019-06-11T14:53:26.000Z
|
test/python/test_sparse_logreg.py
|
tbjohns/BlitzML
|
0523743e1ae3614bfe3f16aa226d7a27fab2d623
|
[
"BSD-3-Clause"
] | 3
|
2018-08-02T05:50:03.000Z
|
2021-02-21T04:44:15.000Z
|
import unittest
import blitzml
import numpy as np
from scipy import sparse as sp
from common import captured_output
from common import matrix_vector_product
from common import normalize_labels
def is_solution(sol, A, b, lam, tol=1e-3):
Aomega = sol.bias + matrix_vector_product(A, sol.weights)
exp_bAomega = np.exp(b * Aomega)
grads = matrix_vector_product(A.T, -b / (1 + exp_bAomega))
max_grads = np.max(abs(grads))
if max_grads > lam * (1 + tol):
return False
pos_grads_diff = grads[sol.weights > 0] + lam
if len(pos_grads_diff) and max(abs(pos_grads_diff)) > lam * tol:
return False
neg_grads_diff = grads[sol.weights < 0] - lam
if len(neg_grads_diff) and max(abs(neg_grads_diff)) > lam * tol:
return False
return True
class TestSparseLogRegInitialConditions(unittest.TestCase):
def test_sparse_logreg_bad_initial_conditions(self):
n = 7
d = 3
A = np.arange(n * d).reshape(n, d)
b = normalize_labels(np.arange(n), True)
prob = blitzml.SparseLogisticRegressionProblem(A, b)
lammax = prob.compute_max_l1_penalty()
weights0 = -1 * np.arange(d)
lam = 0.02 * lammax
sol = prob.solve(lam, initial_weights=weights0, stopping_tolerance=1e-6)
self.assertEqual(is_solution(sol, A, b, lam), True)
def test_sparse_logreg_good_initial_conditions(self):
n = 9
d = 21
np.random.seed(0)
A = np.random.randn(n, d)
b = normalize_labels(np.random.randn(n), True)
prob = blitzml.SparseLogisticRegressionProblem(A, b)
lammax = prob.compute_max_l1_penalty()
lam = 0.03 * lammax
sol0 = prob.solve(lam, stopping_tolerance=1e-4, max_time=1.0)
sol = prob.solve(lam, initial_weights=sol0.weights, max_time=-1.0)
self.assertEqual(is_solution(sol, A, b, lam), True)
class TestSparseLogRegBadLabels(unittest.TestCase):
def test_sparse_logreg_non_pm1_labels(self):
b = np.array([-1., 0., 1.])
A = np.zeros((3, 3))
with captured_output() as out:
prob = blitzml.SparseLogisticRegressionProblem(A, b)
message = out[0]
self.assertIn("Warning", message)
def test_sparse_logreg_bad_label_too_large(self):
b = np.array([-1., 0., 2.])
A = np.zeros((3, 3))
with captured_output() as out:
prob = blitzml.SparseLogisticRegressionProblem(A, b)
message = out[0]
self.assertIn("Warning", message)
def test_sparse_logreg_bad_label_too_small(self):
b = np.array([-1., 0., -2.])
A = np.zeros((3, 3))
with captured_output() as out:
prob = blitzml.SparseLogisticRegressionProblem(A, b)
message = out[0]
self.assertIn("Warning", message)
def test_sparse_logreg_dimension_mismatch(self):
b = np.array([-1., 0., -2.])
A = np.zeros((2, 3))
def make_prob():
prob = blitzml.SparseLogisticRegressionProblem(A, b)
self.assertRaises(ValueError, make_prob)
def test_sparse_logreg_all_positive_labels_warning(self):
b = np.array([0., 1.0, 0.5])
A = np.zeros((3, 3))
with captured_output() as out:
prob = blitzml.SparseLogisticRegressionProblem(A, b)
message = out[0]
self.assertIn("Warning", message)
| 32.778947
| 76
| 0.68754
|
cfaa0039059687f51df3cf855ff613ec13938c46
| 1,686
|
py
|
Python
|
texto_realmente_falso.py
|
manfredengler95/generacion_texto_falso
|
fc9e83dfbe6ca1aee73fd96188b1a499986038a1
|
[
"MIT"
] | null | null | null |
texto_realmente_falso.py
|
manfredengler95/generacion_texto_falso
|
fc9e83dfbe6ca1aee73fd96188b1a499986038a1
|
[
"MIT"
] | null | null | null |
texto_realmente_falso.py
|
manfredengler95/generacion_texto_falso
|
fc9e83dfbe6ca1aee73fd96188b1a499986038a1
|
[
"MIT"
] | null | null | null |
import random
#recibir archivo
texto = open('entrada.txt','r')
lista = []
diccionario = {}
anterior = ""
#print texto.read(5)
for linea in texto:
linea = linea.split(" ")
lista = lista + linea
#se crea una lista de las palabras del texto en orden
#print lista
#-------------------------------------------------------------------------------
#leer palabra a palabra
for palabra in lista:
#si palabra posee grupo
if diccionario.has_key(anterior):
#(true) agregar palabra siguiente
diccionario[anterior].append(palabra)
anterior= palabra
else:
#(false) crear grupo
diccionario[anterior]= [palabra]
anterior= palabra
#print str(diccionario).replace("],","]\n")
#ir a leer palabra siguente
texto.close()
#fin parte uno
#--------------------------------------------------------------------------------
#generar nuevo archivo
resultado= open(".git\salida.txt","a")
#print "holi " + resultado.read()
#buscar palabra inicial
palabra=""
for i in range(0, 10, 1) :
#si existe siguiente palabra
if len(diccionario[palabra])>0:
#(true) seleccionar , escribir y eliminar
suerte = random.randint(0,len(diccionario[palabra])-1)
resultado.write(diccionario[palabra][suerte] + " ")
anterior = diccionario[palabra][suerte]
del diccionario[palabra][suerte]
palabra = anterior
else:
#(false) poner un punto , salto de linea
resultado.write(".\n")
del diccionario[palabra]
#elegir aleatoriamente una palabra
suerte2 = random.randint(0,len(diccionario.keys()))
palabra = diccionario.keys()[suerte2]
#termina proceso
resultado.close()
| 31.222222
| 81
| 0.597272
|
cfdc2acb608d517c9af0fdf48da5bffa9a7c202e
| 7,709
|
py
|
Python
|
cogdl/utils/spmm_utils.py
|
fishmingyu/cogdl
|
f6f33c666feb874f13eb43a8adc5db7c918778ec
|
[
"MIT"
] | null | null | null |
cogdl/utils/spmm_utils.py
|
fishmingyu/cogdl
|
f6f33c666feb874f13eb43a8adc5db7c918778ec
|
[
"MIT"
] | null | null | null |
cogdl/utils/spmm_utils.py
|
fishmingyu/cogdl
|
f6f33c666feb874f13eb43a8adc5db7c918778ec
|
[
"MIT"
] | null | null | null |
import torch
CONFIGS = {
"fast_spmm": None,
"csrmhspmm": None,
"csr_edge_softmax": None,
"fused_gat_func": None,
"fast_spmm_cpu": None,
"spmm_flag": False,
"mh_spmm_flag": False,
"fused_gat_flag": False,
"spmm_cpu_flag": False,
}
def check_fused_gat():
return CONFIGS["fused_gat_func"] is not None
def initialize_spmm():
if CONFIGS["spmm_flag"]:
return
CONFIGS["spmm_flag"] = True
if torch.cuda.is_available():
from cogdl.operators.spmm import csrspmm
CONFIGS["fast_spmm"] = csrspmm
# if csrspmm is None:
# print("Failed to load fast version of SpMM, use torch.scatter_add instead.")
def initialize_spmm_cpu():
if CONFIGS["spmm_cpu_flag"]:
return
CONFIGS["spmm_cpu_flag"] = True
from cogdl.operators.spmm import spmm_cpu
CONFIGS["fast_spmm_cpu"] = spmm_cpu
def spmm_scatter(row, col, values, b):
r"""
Args:
(row, col): Tensor, shape=(2, E)
values : Tensor, shape=(E,)
b : Tensor, shape=(N, d)
"""
output = b.index_select(0, col) * values.unsqueeze(-1)
output = torch.zeros_like(b).scatter_add_(0, row.unsqueeze(-1).expand_as(output), output)
return output
def spmm_cpu(graph, x, fast_spmm_cpu=None):
if fast_spmm_cpu is None:
initialize_spmm_cpu()
fast_spmm_cpu = CONFIGS["fast_spmm_cpu"]
if fast_spmm_cpu is not None and str(x.device) == "cpu":
if graph.out_norm is not None:
x = graph.out_norm * x
row_ptr, col_indices = graph.row_indptr, graph.col_indices
csr_data = graph.raw_edge_weight
x = fast_spmm_cpu(row_ptr.int(), col_indices.int(), csr_data, x)
if graph.in_norm is not None:
x = graph.in_norm * x
else:
row, col = graph.edge_index
x = spmm_scatter(row, col, graph.edge_weight, x)
return x
class SpMM_CPU(torch.nn.Module):
def __init__(self):
super().__init__()
initialize_spmm_cpu()
self.fast_spmm_cpu = CONFIGS["fast_spmm_cpu"]
def forward(self, graph, x):
return spmm_cpu(graph, x, self.fast_spmm_cpu)
def spmm(graph, x, actnn=False, fast_spmm=None):
if fast_spmm is None:
initialize_spmm()
fast_spmm = CONFIGS["fast_spmm"]
if fast_spmm is not None and str(x.device) != "cpu":
if graph.out_norm is not None:
x = graph.out_norm * x
row_ptr, col_indices = graph.row_indptr, graph.col_indices
csr_data = graph.raw_edge_weight
x = fast_spmm(row_ptr.int(), col_indices.int(), x, csr_data, graph.is_symmetric(), actnn=actnn)
if graph.in_norm is not None:
x = graph.in_norm * x
else:
row, col = graph.edge_index
x = spmm_scatter(row, col, graph.edge_weight, x)
return x
class SpMM(torch.nn.Module):
def __init__(self, actnn=False):
super().__init__()
initialize_spmm()
self.actnn = actnn
self.fast_spmm = CONFIGS["fast_spmm"]
def forward(self, graph, x):
return spmm(graph, x, self.actnn, self.fast_spmm)
def initialize_edge_softmax():
if CONFIGS["mh_spmm_flag"]:
return
CONFIGS["mh_spmm_flag"] = True
if torch.cuda.is_available():
from cogdl.operators.edge_softmax import csr_edge_softmax
from cogdl.operators.mhspmm import csrmhspmm
CONFIGS["csrmhspmm"] = csrmhspmm
CONFIGS["csr_edge_softmax"] = csr_edge_softmax
def edge_softmax_val(graph, edge_val):
"""
Args:
graph: cogdl.Graph
edge_val: torch.Tensor, shape=(E, 1)
Returns:
Softmax values of edge values for nodes
"""
edge_val_max = edge_val.max().item()
while edge_val_max > 10:
edge_val -= edge_val / 2
edge_val_max = edge_val.max().item()
with graph.local_graph():
edge_val = torch.exp(edge_val)
graph.edge_weight = edge_val
x = torch.ones(graph.num_nodes, 1).to(edge_val.device)
node_sum = spmm(graph, x).squeeze()
row = graph.edge_index[0]
softmax_values = edge_val / node_sum[row]
return softmax_values
def edge_softmax(graph, edge_val, csr_edge_softmax=None):
if csr_edge_softmax is None:
initialize_edge_softmax()
csr_edge_softmax = CONFIGS["csr_edge_softmax"]
if csr_edge_softmax is not None and edge_val.device.type != "cpu":
if len(edge_val.shape) == 1:
edge_val = edge_val.view(-1, 1)
val = csr_edge_softmax(graph.row_indptr.int(), edge_val)
val = val.view(-1)
else:
val = csr_edge_softmax(graph.row_indptr.int(), edge_val)
return val
else:
val = []
for i in range(edge_val.shape[1]):
val.append(edge_softmax_val(graph, edge_val[:, i]))
return torch.stack(val).t()
class EdgeSoftmax(torch.nn.Module):
def __init__(self):
super().__init__()
initialize_edge_softmax()
self.csr_edge_softmax = CONFIGS["csr_edge_softmax"]
def forward(self, graph, edge_val):
return edge_softmax(graph, edge_val, self.csr_edge_softmax)
def mh_spmm(graph, attention, h, csrmhspmm=None, fast_spmm=None):
if csrmhspmm is None:
initialize_edge_softmax()
csrmhspmm = CONFIGS["csrmhspmm"]
nhead = h.shape[1]
if csrmhspmm is not None and h.device.type != "cpu":
if nhead > 1:
h_prime = csrmhspmm(graph.row_indptr.int(), graph.col_indices.int(), h, attention)
out = h_prime.view(h_prime.shape[0], -1)
else:
edge_weight = attention.view(-1)
with graph.local_graph():
graph.edge_weight = edge_weight
out = spmm(graph, h.squeeze(1), fast_spmm=fast_spmm)
else:
with graph.local_graph():
h_prime = []
h = h.permute(1, 0, 2).contiguous()
for i in range(nhead):
edge_weight = attention[:, i]
graph.edge_weight = edge_weight.contiguous()
hidden = h[i]
assert not torch.isnan(hidden).any()
h_prime.append(spmm(graph, hidden, fast_spmm=fast_spmm))
out = torch.cat(h_prime, dim=1)
return out
class MultiHeadSpMM(torch.nn.Module):
def __init__(self):
super().__init__()
initialize_spmm()
initialize_edge_softmax()
self.spmm = CONFIGS["fast_spmm"]
self.csrmhspmm = CONFIGS["csrmhspmm"]
def forward(self, graph, attention, h):
return mh_spmm(graph, attention, h, csrmhspmm=self.csrmhspmm, fast_spmm=self.spmm)
def initialize_fused_gat():
if CONFIGS["fused_gat_flag"]:
return
CONFIGS["fused_gat_flag"] = True
if torch.cuda.is_available():
from cogdl.operators.fused_gat import fused_gat_func
CONFIGS["fused_gat_func"] = fused_gat_func
def fused_gat_op(attn_row, attn_col, graph, negative_slope, in_feat, fused_gat_func=None):
if fused_gat_func is None:
initialize_fused_gat()
fused_gat_func = CONFIGS["fused_gat_func"]
return fused_gat_func(
attn_row,
attn_col,
graph.row_indptr.int(),
graph.col_indices.int(),
graph.row_indptr.int(),
graph.col_indices.int(),
negative_slope,
in_feat,
)
class FusedGATOp(torch.nn.Module):
def __init__(self):
super().__init__()
initialize_fused_gat()
self.fused_gat_func = CONFIGS["fused_gat_func"]
def forward(self, attn_row, attn_col, graph, negative_slope, in_feat):
return fused_gat_op(attn_row, attn_col, graph, negative_slope, in_feat, fused_gat_op=self.fused_gat_func)
| 30.350394
| 113
| 0.629005
|
075c8e7e9c0d6c445eb075114d43bbb6c75f0d18
| 969
|
py
|
Python
|
lesson1_step5.py
|
scorpion-kit/stepik-QA-Selenium_Python
|
94b65d814c511ca364635d966a0f4971192be4fc
|
[
"MIT"
] | null | null | null |
lesson1_step5.py
|
scorpion-kit/stepik-QA-Selenium_Python
|
94b65d814c511ca364635d966a0f4971192be4fc
|
[
"MIT"
] | null | null | null |
lesson1_step5.py
|
scorpion-kit/stepik-QA-Selenium_Python
|
94b65d814c511ca364635d966a0f4971192be4fc
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
import time
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
try:
link = "http://suninjuly.github.io/math.html"
browser = webdriver.Chrome()
browser.get(link)
# Ваш код
x_element = browser.find_element_by_css_selector("span#input_value.nowrap")
x = x_element.text
print (x)
y = calc(x)
#Ответ вписываем
answer = browser.find_element_by_tag_name("input#answer")
answer.send_keys(y)
# Ставим флажок и переключатель
chbox = browser.find_element_by_id("robotCheckbox")
chbox.click()
radiob = browser.find_element_by_id("robotsRule")
radiob.click()
# Жмакаем кнопку
button = browser.find_element_by_css_selector("button.btn")
button.click()
print("Vse ok")
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
| 23.634146
| 79
| 0.692466
|
a88c4d3eeceb4e0a23ac78ae92f06430ba2edacd
| 5,537
|
py
|
Python
|
tests/test_users.py
|
brynjolf23/whoKnows
|
6f480f24705fd3d1ec233aab0d842164ac53fe34
|
[
"Unlicense"
] | null | null | null |
tests/test_users.py
|
brynjolf23/whoKnows
|
6f480f24705fd3d1ec233aab0d842164ac53fe34
|
[
"Unlicense"
] | null | null | null |
tests/test_users.py
|
brynjolf23/whoKnows
|
6f480f24705fd3d1ec233aab0d842164ac53fe34
|
[
"Unlicense"
] | 1
|
2020-09-06T19:01:13.000Z
|
2020-09-06T19:01:13.000Z
|
import os
import unittest
from project import app, db
from project._config import BASE_DIR
from project.models import User, Follower
TEST_DB = 'test.db'
class MainTest(unittest.TestCase):
# setup function
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
app.config['DEBUG'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(BASE_DIR, TEST_DB)
self.app = app.test_client()
db.create_all()
self.assertEquals(app.debug, False)
# teardown function
def tearDown(self):
db.session.remove()
db.drop_all()
# helper funcitons
def login(self, name, password):
return self.app.post('/', data=dict(
name=name, password=password), follow_redirects=True)
def register(self, name, email, password, confirm):
return self.app.post('register/', data=dict(
name=name, email=email, password=password, confirm=confirm
), follow_redirects=True)
def logout(self):
return self.app.get('logout/', follow_redirects=True)
def create_user(self, name, email, password):
new_user = User(
name=name,
email=email,
password=bcrypt.generate_password_hash(password)
)
db.session.add(new_user)
db.session.commit()
# tests
def test_user_can_register(self):
new_user = User('foobar', 'foobar@example.com', 'barfoo', 'barfoo')
db.session.add(new_user)
db.session.commit()
test = db.session.query(User).all()
for t in test:
t.name
assert t.name == 'foobar'
def test_users_cannot_login_unless_registered(self):
response = self.login('foobar', 'barfoo')
self.assertIn(b'Invalid username or password', response.data)
def test_users_can_login(self):
self.register('foobar', 'foobar@example.com','barfoo', 'barfoo')
response = self.login('foobar', 'barfoo')
self.assertIn(b'Welcome', response.data)
def test_logged_in_users_cannot_visit_register_page(self):
self.register('foobar', 'foobar@example.com','barfoo', 'barfoo')
self.login('foobar', 'barfoo')
response = self.app.get('register/', follow_redirects=True)
self.assertNotIn(b'Already registered?', response.data)
def test_invalid_form_data(self):
self.register('foobar', 'foobar@example.com','barfoo', 'barfoo')
response = self.login('alert("alert box!")', 'barfoo')
self.assertIn(b'Invalid username or password', response.data)
def test_user_registeration(self):
self.app.get('register', follow_redirects=True)
response = self.register(
'foobar', 'foobar@example.com','barfoo', 'barfoo'
)
self.assertIn(b'Thanks for registering. Plese login.', response.data)
def test_duplicate_user_registeration_throws_error(self):
self.app.get('register/', follow_redirects=True)
self.register('foobar', 'foobar@example.com','barfoo', 'barfoo')
self.app.get('register/', follow_redirects=True)
response = self.register(
'foobar', 'foobar@example.com','barfoo', 'barfoo'
)
self.assertIn(
b'That username and/or email already exists.', response.data
)
def test_logged_in_users_can_logout(self):
self.register('foobar', 'foobar@example.com','barfoo', 'barfoo')
self.login('foobar', 'barfoo')
response = self.logout()
self.assertIn(b'You have been logged out', response.data)
def test_not_logged_in_users_cannot_logout(self):
response = self.logout()
self.assertNotIn(b'You have been logged out', response.data)
def test_users_page_shows_all_the_users(self):
self.register('foobar', 'foobar@example.com','barfoo', 'barfoo')
self.register('barfoo', 'barfoo@example.com','foobar', 'foobar')
self.login('barfoo', 'foobar')
response = self.app.get('users/', follow_redirects=True)
self.assertIn(b'foobar', response.data)
self.assertIn(b'barfoo', response.data)
def test_is_following_fuctionality(self):
self.register('foobar', 'foobar@example.com','barfoo', 'barfoo')
self.register('barfoo', 'barfoo@example.com','foobar', 'foobar')
self.login('foobar', 'barfoo')
self.app.get('tweets/follow/2/', follow_redirects=True)
user = db.session.query(User).first()
self.assertTrue(user.is_following(1, 2))
self.assertFalse(user.is_following(1, 1))
def test_string_representation_of_the_user_obeject(self):
db.session.add(
User(
'foobar',
'foobar@example.com',
'barfoo'
)
)
db.session.commit()
users = db.session.query(User).all()
for user in users:
self.assertEqual(str(user), '<User {}>'.format(user.name))
def test_string_representation_of_the_follower_object(self):
self.register('foobar', 'foobar@example.com','barfoo', 'barfoo')
self.register('barfoo', 'barfoo@example.com','foobar', 'foobar')
self.login('foobar', 'barfoo')
self.app.get('tweets/follow/2/', follow_redirects=True)
follower = db.session.query(Follower).first()
self.assertEqual(str(follower), '<User {0} follows {1}>'.format('1', '2'))
if __name__ == "__main__":
unittest.main()
| 36.427632
| 82
| 0.629763
|
b78ec4e223ddabda11fbb4a3e0a1449c2fc37a87
| 38,028
|
py
|
Python
|
instances/passenger_demand/pas-20210421-2109-int18e/34.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int18e/34.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int18e/34.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 4082
passenger_arriving = (
(3, 5, 7, 8, 5, 0, 10, 12, 16, 5, 3, 0), # 0
(7, 9, 3, 8, 6, 0, 7, 8, 6, 4, 2, 0), # 1
(7, 14, 7, 5, 4, 0, 8, 11, 9, 6, 1, 0), # 2
(3, 4, 14, 6, 1, 0, 13, 9, 8, 5, 3, 0), # 3
(7, 8, 10, 3, 4, 0, 10, 14, 6, 5, 3, 0), # 4
(3, 20, 12, 4, 4, 0, 7, 14, 7, 5, 3, 0), # 5
(8, 12, 9, 6, 5, 0, 10, 9, 7, 5, 5, 0), # 6
(8, 7, 9, 6, 0, 0, 10, 13, 7, 2, 3, 0), # 7
(1, 11, 11, 3, 1, 0, 7, 14, 10, 12, 3, 0), # 8
(7, 13, 9, 6, 2, 0, 8, 11, 5, 4, 5, 0), # 9
(7, 10, 7, 7, 2, 0, 7, 15, 10, 2, 5, 0), # 10
(7, 12, 7, 6, 6, 0, 12, 11, 6, 5, 3, 0), # 11
(3, 6, 10, 5, 3, 0, 5, 10, 6, 4, 4, 0), # 12
(3, 14, 7, 4, 3, 0, 9, 13, 10, 8, 1, 0), # 13
(4, 16, 9, 4, 6, 0, 8, 6, 5, 6, 3, 0), # 14
(7, 14, 9, 4, 2, 0, 7, 12, 4, 5, 1, 0), # 15
(3, 11, 6, 8, 6, 0, 6, 11, 6, 6, 2, 0), # 16
(7, 10, 15, 1, 2, 0, 7, 10, 4, 6, 2, 0), # 17
(3, 4, 10, 4, 2, 0, 8, 10, 5, 7, 4, 0), # 18
(4, 15, 13, 2, 0, 0, 8, 8, 4, 6, 0, 0), # 19
(6, 14, 13, 3, 5, 0, 4, 7, 9, 5, 3, 0), # 20
(7, 13, 10, 4, 1, 0, 9, 14, 8, 8, 2, 0), # 21
(8, 14, 7, 2, 1, 0, 14, 11, 7, 9, 5, 0), # 22
(7, 12, 7, 6, 4, 0, 9, 10, 4, 4, 4, 0), # 23
(3, 6, 9, 4, 1, 0, 11, 10, 4, 10, 3, 0), # 24
(3, 16, 13, 5, 3, 0, 12, 9, 7, 11, 5, 0), # 25
(11, 14, 10, 7, 3, 0, 6, 20, 9, 8, 2, 0), # 26
(4, 13, 11, 3, 0, 0, 5, 12, 6, 8, 2, 0), # 27
(8, 14, 10, 4, 3, 0, 10, 10, 7, 8, 2, 0), # 28
(7, 7, 1, 7, 1, 0, 6, 10, 6, 11, 1, 0), # 29
(5, 11, 6, 8, 3, 0, 6, 13, 7, 9, 4, 0), # 30
(8, 18, 3, 1, 2, 0, 13, 17, 9, 4, 2, 0), # 31
(4, 14, 7, 5, 7, 0, 9, 7, 5, 6, 2, 0), # 32
(6, 12, 11, 1, 4, 0, 9, 9, 8, 5, 7, 0), # 33
(4, 15, 8, 7, 4, 0, 7, 9, 13, 6, 3, 0), # 34
(3, 9, 10, 5, 3, 0, 8, 12, 5, 9, 2, 0), # 35
(4, 9, 13, 8, 3, 0, 10, 13, 9, 7, 2, 0), # 36
(2, 10, 7, 9, 2, 0, 10, 13, 9, 7, 0, 0), # 37
(8, 6, 13, 4, 3, 0, 13, 14, 11, 4, 1, 0), # 38
(5, 8, 8, 4, 4, 0, 4, 12, 8, 3, 3, 0), # 39
(8, 8, 7, 5, 2, 0, 6, 5, 7, 5, 3, 0), # 40
(6, 10, 5, 6, 4, 0, 9, 3, 8, 9, 6, 0), # 41
(4, 8, 10, 10, 0, 0, 10, 11, 5, 5, 0, 0), # 42
(4, 13, 15, 5, 3, 0, 5, 16, 5, 4, 2, 0), # 43
(7, 8, 9, 5, 0, 0, 7, 11, 6, 5, 3, 0), # 44
(8, 13, 10, 6, 0, 0, 8, 7, 3, 4, 7, 0), # 45
(5, 20, 9, 3, 2, 0, 8, 13, 5, 6, 3, 0), # 46
(7, 12, 18, 8, 5, 0, 8, 5, 13, 6, 6, 0), # 47
(6, 11, 3, 4, 1, 0, 9, 10, 9, 5, 9, 0), # 48
(9, 10, 8, 5, 2, 0, 6, 9, 8, 8, 2, 0), # 49
(5, 17, 8, 4, 1, 0, 11, 9, 7, 10, 2, 0), # 50
(4, 10, 11, 3, 1, 0, 7, 12, 4, 1, 4, 0), # 51
(6, 14, 8, 7, 2, 0, 4, 8, 5, 2, 4, 0), # 52
(11, 10, 8, 6, 0, 0, 8, 13, 9, 7, 2, 0), # 53
(8, 17, 9, 4, 3, 0, 6, 11, 7, 14, 5, 0), # 54
(3, 8, 10, 2, 2, 0, 9, 10, 8, 4, 2, 0), # 55
(3, 9, 11, 4, 2, 0, 6, 10, 5, 4, 3, 0), # 56
(7, 13, 10, 4, 3, 0, 11, 14, 7, 3, 3, 0), # 57
(2, 14, 13, 5, 1, 0, 10, 10, 9, 5, 2, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(4.769372805092186, 12.233629261363635, 14.389624839331619, 11.405298913043477, 12.857451923076923, 8.562228260869567), # 0
(4.81413961808604, 12.369674877683082, 14.46734796754499, 11.46881589673913, 12.953819711538461, 8.559309850543478), # 1
(4.8583952589991215, 12.503702525252525, 14.54322622107969, 11.530934782608696, 13.048153846153847, 8.556302173913043), # 2
(4.902102161984196, 12.635567578125, 14.617204169344474, 11.591602581521737, 13.14036778846154, 8.553205638586958), # 3
(4.94522276119403, 12.765125410353535, 14.689226381748071, 11.650766304347826, 13.230375, 8.550020652173911), # 4
(4.987719490781387, 12.892231395991162, 14.759237427699228, 11.708372961956522, 13.318088942307691, 8.546747622282608), # 5
(5.029554784899035, 13.01674090909091, 14.827181876606687, 11.764369565217393, 13.403423076923078, 8.54338695652174), # 6
(5.0706910776997365, 13.138509323705808, 14.893004297879177, 11.818703125, 13.486290865384618, 8.5399390625), # 7
(5.1110908033362605, 13.257392013888888, 14.956649260925452, 11.871320652173912, 13.56660576923077, 8.536404347826087), # 8
(5.1507163959613695, 13.373244353693181, 15.018061335154243, 11.922169157608696, 13.644281249999999, 8.532783220108696), # 9
(5.1895302897278315, 13.485921717171717, 15.077185089974291, 11.971195652173915, 13.719230769230771, 8.529076086956522), # 10
(5.227494918788412, 13.595279478377526, 15.133965094794343, 12.018347146739131, 13.791367788461539, 8.525283355978262), # 11
(5.2645727172958745, 13.701173011363636, 15.188345919023137, 12.063570652173912, 13.860605769230768, 8.521405434782608), # 12
(5.3007261194029835, 13.803457690183082, 15.240272132069407, 12.106813179347826, 13.926858173076925, 8.51744273097826), # 13
(5.335917559262511, 13.90198888888889, 15.289688303341899, 12.148021739130433, 13.99003846153846, 8.513395652173912), # 14
(5.370109471027217, 13.996621981534089, 15.336539002249355, 12.187143342391304, 14.050060096153846, 8.509264605978261), # 15
(5.403264288849868, 14.087212342171718, 15.380768798200515, 12.224124999999999, 14.10683653846154, 8.50505), # 16
(5.4353444468832315, 14.173615344854797, 15.422322260604112, 12.258913722826087, 14.16028125, 8.500752241847827), # 17
(5.46631237928007, 14.255686363636363, 15.461143958868895, 12.291456521739132, 14.210307692307696, 8.496371739130435), # 18
(5.496130520193152, 14.333280772569443, 15.4971784624036, 12.321700407608695, 14.256829326923079, 8.491908899456522), # 19
(5.524761303775241, 14.40625394570707, 15.530370340616965, 12.349592391304348, 14.299759615384616, 8.487364130434782), # 20
(5.552167164179106, 14.47446125710227, 15.56066416291774, 12.375079483695652, 14.339012019230768, 8.482737839673913), # 21
(5.578310535557506, 14.537758080808082, 15.588004498714653, 12.398108695652175, 14.374499999999998, 8.47803043478261), # 22
(5.603153852063214, 14.595999790877526, 15.612335917416454, 12.418627038043478, 14.40613701923077, 8.473242323369567), # 23
(5.62665954784899, 14.649041761363636, 15.633602988431875, 12.43658152173913, 14.433836538461538, 8.468373913043479), # 24
(5.648790057067603, 14.696739366319445, 15.651750281169667, 12.451919157608696, 14.457512019230768, 8.463425611413044), # 25
(5.669507813871817, 14.738947979797977, 15.66672236503856, 12.464586956521739, 14.477076923076922, 8.458397826086957), # 26
(5.688775252414398, 14.77552297585227, 15.6784638094473, 12.474531929347828, 14.492444711538463, 8.453290964673915), # 27
(5.7065548068481124, 14.806319728535353, 15.68691918380463, 12.481701086956523, 14.503528846153845, 8.448105434782608), # 28
(5.722808911325724, 14.831193611900254, 15.69203305751928, 12.486041440217392, 14.510242788461538, 8.44284164402174), # 29
(5.7375, 14.85, 15.69375, 12.4875, 14.512500000000001, 8.4375), # 30
(5.751246651214834, 14.865621839488634, 15.692462907608693, 12.487236580882353, 14.511678590425532, 8.430077267616193), # 31
(5.7646965153452685, 14.881037215909092, 15.68863804347826, 12.486451470588234, 14.509231914893617, 8.418644565217393), # 32
(5.777855634590792, 14.896244211647728, 15.682330027173915, 12.485152389705883, 14.50518630319149, 8.403313830584706), # 33
(5.790730051150895, 14.91124090909091, 15.67359347826087, 12.483347058823531, 14.499568085106382, 8.38419700149925), # 34
(5.803325807225064, 14.926025390624996, 15.662483016304348, 12.481043198529411, 14.492403590425532, 8.361406015742128), # 35
(5.815648945012788, 14.940595738636366, 15.649053260869564, 12.478248529411767, 14.48371914893617, 8.335052811094453), # 36
(5.8277055067135555, 14.954950035511365, 15.63335883152174, 12.474970772058823, 14.47354109042553, 8.305249325337332), # 37
(5.839501534526853, 14.969086363636364, 15.615454347826088, 12.471217647058824, 14.461895744680852, 8.272107496251873), # 38
(5.851043070652174, 14.983002805397728, 15.595394429347825, 12.466996875000001, 14.44880944148936, 8.23573926161919), # 39
(5.862336157289003, 14.99669744318182, 15.573233695652176, 12.462316176470589, 14.434308510638296, 8.196256559220389), # 40
(5.873386836636828, 15.010168359374997, 15.549026766304348, 12.457183272058824, 14.418419281914893, 8.153771326836583), # 41
(5.88420115089514, 15.023413636363639, 15.522828260869566, 12.451605882352942, 14.401168085106384, 8.108395502248875), # 42
(5.894785142263428, 15.03643135653409, 15.494692798913043, 12.445591727941178, 14.38258125, 8.060241023238381), # 43
(5.905144852941176, 15.049219602272727, 15.464675, 12.439148529411764, 14.36268510638298, 8.009419827586207), # 44
(5.915286325127877, 15.061776455965909, 15.432829483695656, 12.43228400735294, 14.341505984042554, 7.956043853073464), # 45
(5.925215601023019, 15.074100000000003, 15.39921086956522, 12.425005882352941, 14.319070212765958, 7.90022503748126), # 46
(5.934938722826087, 15.086188316761364, 15.363873777173913, 12.417321874999999, 14.295404122340427, 7.842075318590705), # 47
(5.944461732736574, 15.098039488636365, 15.326872826086957, 12.409239705882353, 14.27053404255319, 7.7817066341829095), # 48
(5.953790672953963, 15.10965159801136, 15.288262635869566, 12.400767095588236, 14.24448630319149, 7.71923092203898), # 49
(5.96293158567775, 15.121022727272724, 15.248097826086958, 12.391911764705883, 14.217287234042553, 7.65476011994003), # 50
(5.971890513107417, 15.132150958806818, 15.206433016304347, 12.38268143382353, 14.188963164893616, 7.588406165667167), # 51
(5.980673497442456, 15.143034375, 15.163322826086954, 12.373083823529411, 14.159540425531915, 7.5202809970015), # 52
(5.989286580882353, 15.153671058238638, 15.118821875, 12.363126654411765, 14.129045345744682, 7.450496551724138), # 53
(5.9977358056266, 15.164059090909088, 15.072984782608694, 12.352817647058824, 14.09750425531915, 7.379164767616192), # 54
(6.00602721387468, 15.174196555397728, 15.02586616847826, 12.342164522058825, 14.064943484042553, 7.306397582458771), # 55
(6.014166847826087, 15.184081534090907, 14.977520652173913, 12.331175, 14.031389361702129, 7.232306934032984), # 56
(6.022160749680308, 15.193712109375003, 14.92800285326087, 12.319856801470587, 13.996868218085105, 7.15700476011994), # 57
(6.030014961636829, 15.203086363636363, 14.877367391304347, 12.308217647058825, 13.961406382978723, 7.0806029985007495), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(3, 5, 7, 8, 5, 0, 10, 12, 16, 5, 3, 0), # 0
(10, 14, 10, 16, 11, 0, 17, 20, 22, 9, 5, 0), # 1
(17, 28, 17, 21, 15, 0, 25, 31, 31, 15, 6, 0), # 2
(20, 32, 31, 27, 16, 0, 38, 40, 39, 20, 9, 0), # 3
(27, 40, 41, 30, 20, 0, 48, 54, 45, 25, 12, 0), # 4
(30, 60, 53, 34, 24, 0, 55, 68, 52, 30, 15, 0), # 5
(38, 72, 62, 40, 29, 0, 65, 77, 59, 35, 20, 0), # 6
(46, 79, 71, 46, 29, 0, 75, 90, 66, 37, 23, 0), # 7
(47, 90, 82, 49, 30, 0, 82, 104, 76, 49, 26, 0), # 8
(54, 103, 91, 55, 32, 0, 90, 115, 81, 53, 31, 0), # 9
(61, 113, 98, 62, 34, 0, 97, 130, 91, 55, 36, 0), # 10
(68, 125, 105, 68, 40, 0, 109, 141, 97, 60, 39, 0), # 11
(71, 131, 115, 73, 43, 0, 114, 151, 103, 64, 43, 0), # 12
(74, 145, 122, 77, 46, 0, 123, 164, 113, 72, 44, 0), # 13
(78, 161, 131, 81, 52, 0, 131, 170, 118, 78, 47, 0), # 14
(85, 175, 140, 85, 54, 0, 138, 182, 122, 83, 48, 0), # 15
(88, 186, 146, 93, 60, 0, 144, 193, 128, 89, 50, 0), # 16
(95, 196, 161, 94, 62, 0, 151, 203, 132, 95, 52, 0), # 17
(98, 200, 171, 98, 64, 0, 159, 213, 137, 102, 56, 0), # 18
(102, 215, 184, 100, 64, 0, 167, 221, 141, 108, 56, 0), # 19
(108, 229, 197, 103, 69, 0, 171, 228, 150, 113, 59, 0), # 20
(115, 242, 207, 107, 70, 0, 180, 242, 158, 121, 61, 0), # 21
(123, 256, 214, 109, 71, 0, 194, 253, 165, 130, 66, 0), # 22
(130, 268, 221, 115, 75, 0, 203, 263, 169, 134, 70, 0), # 23
(133, 274, 230, 119, 76, 0, 214, 273, 173, 144, 73, 0), # 24
(136, 290, 243, 124, 79, 0, 226, 282, 180, 155, 78, 0), # 25
(147, 304, 253, 131, 82, 0, 232, 302, 189, 163, 80, 0), # 26
(151, 317, 264, 134, 82, 0, 237, 314, 195, 171, 82, 0), # 27
(159, 331, 274, 138, 85, 0, 247, 324, 202, 179, 84, 0), # 28
(166, 338, 275, 145, 86, 0, 253, 334, 208, 190, 85, 0), # 29
(171, 349, 281, 153, 89, 0, 259, 347, 215, 199, 89, 0), # 30
(179, 367, 284, 154, 91, 0, 272, 364, 224, 203, 91, 0), # 31
(183, 381, 291, 159, 98, 0, 281, 371, 229, 209, 93, 0), # 32
(189, 393, 302, 160, 102, 0, 290, 380, 237, 214, 100, 0), # 33
(193, 408, 310, 167, 106, 0, 297, 389, 250, 220, 103, 0), # 34
(196, 417, 320, 172, 109, 0, 305, 401, 255, 229, 105, 0), # 35
(200, 426, 333, 180, 112, 0, 315, 414, 264, 236, 107, 0), # 36
(202, 436, 340, 189, 114, 0, 325, 427, 273, 243, 107, 0), # 37
(210, 442, 353, 193, 117, 0, 338, 441, 284, 247, 108, 0), # 38
(215, 450, 361, 197, 121, 0, 342, 453, 292, 250, 111, 0), # 39
(223, 458, 368, 202, 123, 0, 348, 458, 299, 255, 114, 0), # 40
(229, 468, 373, 208, 127, 0, 357, 461, 307, 264, 120, 0), # 41
(233, 476, 383, 218, 127, 0, 367, 472, 312, 269, 120, 0), # 42
(237, 489, 398, 223, 130, 0, 372, 488, 317, 273, 122, 0), # 43
(244, 497, 407, 228, 130, 0, 379, 499, 323, 278, 125, 0), # 44
(252, 510, 417, 234, 130, 0, 387, 506, 326, 282, 132, 0), # 45
(257, 530, 426, 237, 132, 0, 395, 519, 331, 288, 135, 0), # 46
(264, 542, 444, 245, 137, 0, 403, 524, 344, 294, 141, 0), # 47
(270, 553, 447, 249, 138, 0, 412, 534, 353, 299, 150, 0), # 48
(279, 563, 455, 254, 140, 0, 418, 543, 361, 307, 152, 0), # 49
(284, 580, 463, 258, 141, 0, 429, 552, 368, 317, 154, 0), # 50
(288, 590, 474, 261, 142, 0, 436, 564, 372, 318, 158, 0), # 51
(294, 604, 482, 268, 144, 0, 440, 572, 377, 320, 162, 0), # 52
(305, 614, 490, 274, 144, 0, 448, 585, 386, 327, 164, 0), # 53
(313, 631, 499, 278, 147, 0, 454, 596, 393, 341, 169, 0), # 54
(316, 639, 509, 280, 149, 0, 463, 606, 401, 345, 171, 0), # 55
(319, 648, 520, 284, 151, 0, 469, 616, 406, 349, 174, 0), # 56
(326, 661, 530, 288, 154, 0, 480, 630, 413, 352, 177, 0), # 57
(328, 675, 543, 293, 155, 0, 490, 640, 422, 357, 179, 0), # 58
(328, 675, 543, 293, 155, 0, 490, 640, 422, 357, 179, 0), # 59
)
passenger_arriving_rate = (
(4.769372805092186, 9.786903409090908, 8.63377490359897, 4.56211956521739, 2.5714903846153843, 0.0, 8.562228260869567, 10.285961538461537, 6.843179347826086, 5.755849935732647, 2.446725852272727, 0.0), # 0
(4.81413961808604, 9.895739902146465, 8.680408780526994, 4.587526358695651, 2.5907639423076922, 0.0, 8.559309850543478, 10.363055769230769, 6.881289538043478, 5.786939187017995, 2.4739349755366162, 0.0), # 1
(4.8583952589991215, 10.00296202020202, 8.725935732647814, 4.612373913043478, 2.609630769230769, 0.0, 8.556302173913043, 10.438523076923076, 6.918560869565217, 5.817290488431875, 2.500740505050505, 0.0), # 2
(4.902102161984196, 10.1084540625, 8.770322501606683, 4.636641032608694, 2.628073557692308, 0.0, 8.553205638586958, 10.512294230769232, 6.954961548913042, 5.846881667737789, 2.527113515625, 0.0), # 3
(4.94522276119403, 10.212100328282828, 8.813535829048842, 4.66030652173913, 2.6460749999999997, 0.0, 8.550020652173911, 10.584299999999999, 6.990459782608696, 5.875690552699228, 2.553025082070707, 0.0), # 4
(4.987719490781387, 10.313785116792928, 8.855542456619537, 4.6833491847826085, 2.663617788461538, 0.0, 8.546747622282608, 10.654471153846153, 7.025023777173913, 5.90369497107969, 2.578446279198232, 0.0), # 5
(5.029554784899035, 10.413392727272727, 8.896309125964011, 4.705747826086957, 2.680684615384615, 0.0, 8.54338695652174, 10.72273846153846, 7.058621739130436, 5.930872750642674, 2.603348181818182, 0.0), # 6
(5.0706910776997365, 10.510807458964646, 8.935802578727506, 4.72748125, 2.697258173076923, 0.0, 8.5399390625, 10.789032692307693, 7.0912218750000005, 5.95720171915167, 2.6277018647411614, 0.0), # 7
(5.1110908033362605, 10.60591361111111, 8.97398955655527, 4.7485282608695645, 2.7133211538461537, 0.0, 8.536404347826087, 10.853284615384615, 7.122792391304347, 5.982659704370181, 2.6514784027777774, 0.0), # 8
(5.1507163959613695, 10.698595482954543, 9.010836801092546, 4.768867663043478, 2.7288562499999993, 0.0, 8.532783220108696, 10.915424999999997, 7.153301494565217, 6.007224534061697, 2.6746488707386358, 0.0), # 9
(5.1895302897278315, 10.788737373737373, 9.046311053984574, 4.7884782608695655, 2.743846153846154, 0.0, 8.529076086956522, 10.975384615384616, 7.182717391304348, 6.030874035989716, 2.697184343434343, 0.0), # 10
(5.227494918788412, 10.87622358270202, 9.080379056876605, 4.807338858695652, 2.7582735576923074, 0.0, 8.525283355978262, 11.03309423076923, 7.2110082880434785, 6.053586037917737, 2.719055895675505, 0.0), # 11
(5.2645727172958745, 10.960938409090907, 9.113007551413881, 4.825428260869565, 2.7721211538461534, 0.0, 8.521405434782608, 11.088484615384614, 7.238142391304347, 6.0753383676092545, 2.740234602272727, 0.0), # 12
(5.3007261194029835, 11.042766152146465, 9.144163279241644, 4.8427252717391305, 2.7853716346153847, 0.0, 8.51744273097826, 11.141486538461539, 7.264087907608696, 6.096108852827762, 2.760691538036616, 0.0), # 13
(5.335917559262511, 11.121591111111112, 9.173812982005138, 4.859208695652173, 2.7980076923076918, 0.0, 8.513395652173912, 11.192030769230767, 7.288813043478259, 6.115875321336759, 2.780397777777778, 0.0), # 14
(5.370109471027217, 11.19729758522727, 9.201923401349612, 4.874857336956521, 2.810012019230769, 0.0, 8.509264605978261, 11.240048076923076, 7.312286005434782, 6.134615600899742, 2.7993243963068175, 0.0), # 15
(5.403264288849868, 11.269769873737372, 9.228461278920308, 4.88965, 2.8213673076923076, 0.0, 8.50505, 11.28546923076923, 7.334474999999999, 6.152307519280206, 2.817442468434343, 0.0), # 16
(5.4353444468832315, 11.338892275883836, 9.253393356362468, 4.903565489130434, 2.83205625, 0.0, 8.500752241847827, 11.328225, 7.3553482336956515, 6.168928904241644, 2.834723068970959, 0.0), # 17
(5.46631237928007, 11.40454909090909, 9.276686375321336, 4.916582608695652, 2.842061538461539, 0.0, 8.496371739130435, 11.368246153846156, 7.374873913043479, 6.184457583547558, 2.8511372727272724, 0.0), # 18
(5.496130520193152, 11.466624618055553, 9.298307077442159, 4.928680163043477, 2.8513658653846155, 0.0, 8.491908899456522, 11.405463461538462, 7.393020244565217, 6.198871384961439, 2.866656154513888, 0.0), # 19
(5.524761303775241, 11.525003156565655, 9.318222204370178, 4.939836956521739, 2.859951923076923, 0.0, 8.487364130434782, 11.439807692307692, 7.409755434782609, 6.212148136246785, 2.8812507891414136, 0.0), # 20
(5.552167164179106, 11.579569005681815, 9.336398497750643, 4.95003179347826, 2.8678024038461536, 0.0, 8.482737839673913, 11.471209615384614, 7.425047690217391, 6.224265665167096, 2.894892251420454, 0.0), # 21
(5.578310535557506, 11.630206464646465, 9.352802699228791, 4.95924347826087, 2.8748999999999993, 0.0, 8.47803043478261, 11.499599999999997, 7.438865217391305, 6.235201799485861, 2.907551616161616, 0.0), # 22
(5.603153852063214, 11.67679983270202, 9.367401550449872, 4.967450815217391, 2.8812274038461534, 0.0, 8.473242323369567, 11.524909615384614, 7.451176222826087, 6.244934366966581, 2.919199958175505, 0.0), # 23
(5.62665954784899, 11.719233409090908, 9.380161793059125, 4.974632608695652, 2.8867673076923075, 0.0, 8.468373913043479, 11.54706923076923, 7.461948913043478, 6.25344119537275, 2.929808352272727, 0.0), # 24
(5.648790057067603, 11.757391493055556, 9.391050168701799, 4.980767663043478, 2.8915024038461534, 0.0, 8.463425611413044, 11.566009615384614, 7.471151494565217, 6.260700112467866, 2.939347873263889, 0.0), # 25
(5.669507813871817, 11.79115838383838, 9.400033419023135, 4.985834782608695, 2.8954153846153843, 0.0, 8.458397826086957, 11.581661538461537, 7.478752173913043, 6.266688946015424, 2.947789595959595, 0.0), # 26
(5.688775252414398, 11.820418380681815, 9.40707828566838, 4.989812771739131, 2.8984889423076923, 0.0, 8.453290964673915, 11.593955769230769, 7.484719157608696, 6.271385523778919, 2.9551045951704538, 0.0), # 27
(5.7065548068481124, 11.84505578282828, 9.412151510282778, 4.992680434782609, 2.9007057692307687, 0.0, 8.448105434782608, 11.602823076923075, 7.489020652173913, 6.274767673521851, 2.96126394570707, 0.0), # 28
(5.722808911325724, 11.864954889520202, 9.415219834511568, 4.994416576086956, 2.902048557692307, 0.0, 8.44284164402174, 11.608194230769229, 7.491624864130435, 6.276813223007712, 2.9662387223800506, 0.0), # 29
(5.7375, 11.879999999999999, 9.41625, 4.995, 2.9025, 0.0, 8.4375, 11.61, 7.4925, 6.277499999999999, 2.9699999999999998, 0.0), # 30
(5.751246651214834, 11.892497471590906, 9.415477744565216, 4.994894632352941, 2.9023357180851064, 0.0, 8.430077267616193, 11.609342872340426, 7.492341948529411, 6.276985163043476, 2.9731243678977264, 0.0), # 31
(5.7646965153452685, 11.904829772727274, 9.413182826086956, 4.994580588235293, 2.901846382978723, 0.0, 8.418644565217393, 11.607385531914892, 7.49187088235294, 6.275455217391303, 2.9762074431818184, 0.0), # 32
(5.777855634590792, 11.916995369318181, 9.40939801630435, 4.994060955882353, 2.9010372606382977, 0.0, 8.403313830584706, 11.60414904255319, 7.491091433823529, 6.272932010869566, 2.9792488423295453, 0.0), # 33
(5.790730051150895, 11.928992727272727, 9.40415608695652, 4.993338823529412, 2.899913617021276, 0.0, 8.38419700149925, 11.599654468085104, 7.490008235294118, 6.269437391304347, 2.9822481818181816, 0.0), # 34
(5.803325807225064, 11.940820312499996, 9.39748980978261, 4.9924172794117645, 2.898480718085106, 0.0, 8.361406015742128, 11.593922872340425, 7.488625919117647, 6.264993206521739, 2.985205078124999, 0.0), # 35
(5.815648945012788, 11.952476590909091, 9.389431956521738, 4.9912994117647065, 2.896743829787234, 0.0, 8.335052811094453, 11.586975319148936, 7.486949117647059, 6.259621304347825, 2.988119147727273, 0.0), # 36
(5.8277055067135555, 11.96396002840909, 9.380015298913044, 4.989988308823529, 2.8947082180851056, 0.0, 8.305249325337332, 11.578832872340422, 7.484982463235293, 6.253343532608695, 2.9909900071022726, 0.0), # 37
(5.839501534526853, 11.97526909090909, 9.369272608695653, 4.988487058823529, 2.89237914893617, 0.0, 8.272107496251873, 11.56951659574468, 7.4827305882352935, 6.246181739130434, 2.9938172727272727, 0.0), # 38
(5.851043070652174, 11.986402244318182, 9.357236657608695, 4.98679875, 2.8897618882978717, 0.0, 8.23573926161919, 11.559047553191487, 7.480198125, 6.23815777173913, 2.9966005610795454, 0.0), # 39
(5.862336157289003, 11.997357954545455, 9.343940217391305, 4.984926470588235, 2.886861702127659, 0.0, 8.196256559220389, 11.547446808510635, 7.477389705882353, 6.22929347826087, 2.999339488636364, 0.0), # 40
(5.873386836636828, 12.008134687499997, 9.329416059782607, 4.982873308823529, 2.8836838563829783, 0.0, 8.153771326836583, 11.534735425531913, 7.474309963235294, 6.219610706521738, 3.002033671874999, 0.0), # 41
(5.88420115089514, 12.01873090909091, 9.31369695652174, 4.980642352941176, 2.880233617021277, 0.0, 8.108395502248875, 11.520934468085107, 7.4709635294117644, 6.209131304347826, 3.0046827272727277, 0.0), # 42
(5.894785142263428, 12.02914508522727, 9.296815679347825, 4.978236691176471, 2.8765162499999994, 0.0, 8.060241023238381, 11.506064999999998, 7.467355036764706, 6.1978771195652165, 3.0072862713068176, 0.0), # 43
(5.905144852941176, 12.03937568181818, 9.278805, 4.975659411764705, 2.8725370212765955, 0.0, 8.009419827586207, 11.490148085106382, 7.4634891176470575, 6.1858699999999995, 3.009843920454545, 0.0), # 44
(5.915286325127877, 12.049421164772726, 9.259697690217394, 4.972913602941176, 2.8683011968085106, 0.0, 7.956043853073464, 11.473204787234042, 7.459370404411764, 6.1731317934782615, 3.0123552911931815, 0.0), # 45
(5.925215601023019, 12.059280000000001, 9.239526521739132, 4.970002352941176, 2.8638140425531913, 0.0, 7.90022503748126, 11.455256170212765, 7.455003529411765, 6.159684347826087, 3.0148200000000003, 0.0), # 46
(5.934938722826087, 12.06895065340909, 9.218324266304347, 4.966928749999999, 2.859080824468085, 0.0, 7.842075318590705, 11.43632329787234, 7.450393124999999, 6.145549510869564, 3.0172376633522724, 0.0), # 47
(5.944461732736574, 12.07843159090909, 9.196123695652174, 4.9636958823529405, 2.854106808510638, 0.0, 7.7817066341829095, 11.416427234042551, 7.445543823529412, 6.130749130434782, 3.0196078977272727, 0.0), # 48
(5.953790672953963, 12.087721278409088, 9.17295758152174, 4.960306838235294, 2.8488972606382976, 0.0, 7.71923092203898, 11.39558904255319, 7.4404602573529415, 6.115305054347826, 3.021930319602272, 0.0), # 49
(5.96293158567775, 12.096818181818177, 9.148858695652175, 4.956764705882353, 2.8434574468085105, 0.0, 7.65476011994003, 11.373829787234042, 7.43514705882353, 6.099239130434783, 3.0242045454545443, 0.0), # 50
(5.971890513107417, 12.105720767045453, 9.123859809782608, 4.953072573529411, 2.837792632978723, 0.0, 7.588406165667167, 11.351170531914892, 7.429608860294118, 6.082573206521738, 3.026430191761363, 0.0), # 51
(5.980673497442456, 12.114427499999998, 9.097993695652173, 4.949233529411764, 2.8319080851063827, 0.0, 7.5202809970015, 11.32763234042553, 7.4238502941176465, 6.065329130434781, 3.0286068749999995, 0.0), # 52
(5.989286580882353, 12.122936846590909, 9.071293125, 4.945250661764706, 2.8258090691489364, 0.0, 7.450496551724138, 11.303236276595745, 7.417875992647058, 6.04752875, 3.030734211647727, 0.0), # 53
(5.9977358056266, 12.13124727272727, 9.043790869565216, 4.941127058823529, 2.8195008510638297, 0.0, 7.379164767616192, 11.278003404255319, 7.411690588235294, 6.0291939130434775, 3.0328118181818176, 0.0), # 54
(6.00602721387468, 12.139357244318182, 9.015519701086955, 4.93686580882353, 2.8129886968085103, 0.0, 7.306397582458771, 11.251954787234041, 7.405298713235295, 6.010346467391304, 3.0348393110795455, 0.0), # 55
(6.014166847826087, 12.147265227272724, 8.986512391304348, 4.9324699999999995, 2.8062778723404254, 0.0, 7.232306934032984, 11.225111489361701, 7.398705, 5.991008260869565, 3.036816306818181, 0.0), # 56
(6.022160749680308, 12.154969687500001, 8.95680171195652, 4.927942720588234, 2.7993736436170207, 0.0, 7.15700476011994, 11.197494574468083, 7.391914080882352, 5.9712011413043475, 3.0387424218750003, 0.0), # 57
(6.030014961636829, 12.16246909090909, 8.926420434782608, 4.923287058823529, 2.792281276595744, 0.0, 7.0806029985007495, 11.169125106382976, 7.384930588235295, 5.950946956521738, 3.0406172727272724, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
33, # 1
)
| 113.516418
| 213
| 0.730094
|
07c9dc2895d4801b00a7112b49629eb812fe9039
| 1,620
|
py
|
Python
|
player.py
|
Cleymax/WorldScanner
|
f7221f9d0e94c212b3d7731f690bb8b6ac73a1d4
|
[
"MIT"
] | 2
|
2020-02-26T12:22:24.000Z
|
2020-02-27T15:59:27.000Z
|
player.py
|
Cleymax/WorldScanner
|
f7221f9d0e94c212b3d7731f690bb8b6ac73a1d4
|
[
"MIT"
] | null | null | null |
player.py
|
Cleymax/WorldScanner
|
f7221f9d0e94c212b3d7731f690bb8b6ac73a1d4
|
[
"MIT"
] | null | null | null |
from os import listdir
from nbt.nbt import NBTFile
from Item import Item
def is_scan_item(args, _item):
for _id in args.id:
for idd in _id.split(','):
if str(idd) == str(_item.material):
return True
return False
class Player:
def __init__(self, args):
self.args = args
def scan(self):
for file in listdir(self.args.world + '/playerdata/'):
nbt = NBTFile(self.args.world + '/playerdata/' + file, 'rb')
inv_items = nbt['Inventory']
ec_items = nbt['EnderItems']
if self.args.inventory and len(inv_items) != 0:
for item in inv_items:
_item = Item(item['id'].value, item['Count'].value, item['Damage'].value)
if is_scan_item(self.args, _item) and _item.count >= self.args.count:
print('Found {0}*{1} in {2}\'s inventory !'.format(
_item.count,
_item.material,
file.replace('.dat', '')
))
if self.args.enderchest and len(ec_items) != 0:
for item in ec_items:
_item = Item(item['id'].value, item['Count'].value, item['Damage'].value)
if is_scan_item(self.args, _item) and _item.count > self.args.count:
print('Found {0}*{1} in {2}\'s enderchest !'.format(
_item.count,
_item.material,
file.replace('.dat', '')
))
| 35.217391
| 93
| 0.480247
|
5b6be29ea620c581ab96d137d157ac57d00c569b
| 9,273
|
py
|
Python
|
cigar_filter/pysam_cigar_filter.py
|
NDHall/pysam_tools
|
4de81108bc403db9258820fa4e8d63585f2e689d
|
[
"MIT"
] | null | null | null |
cigar_filter/pysam_cigar_filter.py
|
NDHall/pysam_tools
|
4de81108bc403db9258820fa4e8d63585f2e689d
|
[
"MIT"
] | null | null | null |
cigar_filter/pysam_cigar_filter.py
|
NDHall/pysam_tools
|
4de81108bc403db9258820fa4e8d63585f2e689d
|
[
"MIT"
] | 1
|
2021-05-27T11:49:29.000Z
|
2021-05-27T11:49:29.000Z
|
import pysam
"""
This script is for filtering out soft-clipped reads that are speciously mapped.
It is of particular concern especially if no attempt has been made to filter
reads during mapping for duplicate regions such as are found between chloroplast
and mitochondrial regions. Even if filtering is applied, this filter will remove
soft clipped reads that are suspcious.
"""
import logging
import getopt
import sys
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
logging.basicConfig(level=logging.WARNING)
logging.debug("\tLogging level is set to debug\n")
def usage():
message="""
pysam_cigar_filter.py -s or --sam_file <sam or bam file>
-l --length <Minimum acceptable match length int default=20>
-o --out_prefix <out_prefix>
-h --help [returns help option]
"""
print (message)
return message
def messages(case,variable):
if case == "Reference_Trouble":
message="""
There are repeated reference names in sam header.
This must be fixed. The repeated name is : %s
""" %( variable )
elif case == "mapped_reads":
total_read_counter , mapped_read_counter = variable
message="""
Total of %i reads processed
Total of %i reads excluded because of soft clipping
Total of %i reads included.
""" %( total_read_counter, (total_read_counter - mapped_read_counter), mapped_read_counter)
print (message)
def main_argv_parse(argv):
logging.debug("inside main_argv_parse sys.argv: %s" %(argv[1:]))
argv_dict={"output_prefix":'' ,"sam_file":'',"length":20};
if len(argv[1:]) == 1 and ( argv[1] == '-h' or argv[1]== '--help'):
usage()
exit(0)
try:
opts, args = getopt.gnu_getopt(argv[1:],"h:s:l:o:",["help=","output_prefix=","sam_file=","length="])
logging.debug("opts: %s\nargs: %s "%(opts, args))
except getopt.error:
logging.critical("getopt.error argv: %s" %(" ".join(argv)))
usage()
sys.exit(2)
for opt , arg in opts:
if opt in ("-o","--output_prefix"):
argv_dict["output_prefix"]=arg
elif opt in ("-s","--sam_file"):
argv_dict["sam_file"]=arg
elif opt in ("-l","--length"):
argv_dict["length"]=int(arg)
elif opt in ("-h","--help"):
usage()
exit(0)
else :
print ("\n Option not recognized %s\n\n" %(opt))
usage()
# assert False, "unhandled option"
sys.exit(1)
return argv_dict
def alignment_file(sam):
ref_len_dict={}
# max_ref_pos={"dummy_name":["current_max_pos",["list_of_reads..."]]}
# min_ref_pos={"dummy_name":["current_max_pos",["list_of_reads..."]]}
max_ref_pos={}
min_ref_pos={}
if sam.split(".")[-1] =="sam" :
sam_file=pysam.AlignmentFile(sam, 'r')
elif sam.split(".")[-1] =="bam" :
sam_file=pysam.AlignmentFile(sam, 'rb' )
else:
print("\n\n\tNo sam or bam extension detected for %s\n\n" %(sam))
usage()
exit(1)
for SQ in sam_file.header["SQ"] :
#SQ["SN"] is the reference name in the header dict.
#SQ["LN"] is the length.
#Make a dictionary of the expected last position for reads to map to.
assert (SQ["SN"] not in ref_len_dict) , messages("Reference_Trouble", SQ["SN"])
ref_len_dict[SQ["SN"]] = SQ["LN"]
max_ref_pos[SQ["SN"]] = [20,[]]
min_ref_pos[SQ["SN"]]=[1000,[]]
#since max_ref_pos is just a set of names we can use it for
# for both max_ref_pos and min_ref_pos dictionaries.
return [sam_file, ref_len_dict, max_ref_pos, min_ref_pos ]
def cigar_read_filter(read, length,ref_len_dict , max_ref_pos, min_ref_pos, cutoff=5):
"""
bam tuple ids used by pysam.
right now we are using tupples
with ID 4 or 0
cigartuples returns list of tuples with
Formatted (ID_Field, Len)
M BAM_CMATCH 0
I BAM_CINS 1
D BAM_CDEL 2
N BAM_CREF_SKIP 3
S BAM_CSOFT_CLIP 4
H BAM_CHARD_CLIP 5
P BAM_CPAD 6
= BAM_CEQUAL 7
X BAM_CDIFF 8
B BAM_CBACK 9
check that read is greater than or equal to
minimum allowable length.\
"""
ret_read=False
if length <= read.reference_length :
soft_clip=4
match=0
cigar=read.cigartuples
start=cigar[0]
end=cigar[-1]
# soft clippling by definition occurs at either end.
# if it occurs at both ends it will be excluded as poor quality mapping.
# soft clipping will only be allowed on the first or last mapped base pair.
if (start[0] == soft_clip and end[0]== soft_clip ):
pass
# if soft clipping is at the end for the last base, we want to allow allow that for last base.
elif end[0] == soft_clip :
if end[1] < cutoff:
ret_read=True
elif max_ref_pos[read.reference_name][0]< read.reference_end:
max_ref_pos[read.reference_name]=[read.reference_end,[read]]
elif max_ref_pos[read.reference_name][0]== read.reference_end:
max_ref_pos[read.reference_name][1].append(read)
else:
pass
# if the read is soft clipped at the 3prime end less than the
# the maximum there is nothing to do with.
# if reads do not start mapping at the first base of the contig we want to be able to catch
# the reads that are the very first ones mapped and allow soft clipping.
elif start[0]== soft_clip :
if start[1] < cutoff:
ret_read=True
elif read.reference_name in min_ref_pos:
if read.reference_start == 0:
ret_read=True
del min_ref_pos[read.reference_name]
logging.debug("Absolute Minimum Found for %s == 0" %(read.reference_name))
elif read.reference_start < min_ref_pos[read.reference_name][0]:
logging.debug("New Minimum found for %s == new %i old %i " %(read.reference_name, read.reference_start, min_ref_pos[read.reference_name][0]))
min_ref_pos[read.reference_name]=[read.reference_start,[read]]
elif read.reference_start == min_ref_pos[read.reference_name][0] :
# print(read)
min_ref_pos[read.reference_name][1].append(read)
elif read.reference_name not in min_ref_pos and read.reference_start == 0 :
ret_read=True
else:
ret_read=True
if ret_read is True:
return read
else :
return None
def out_from_read_dict(out, read_dict, mapped_read_counter):
for contigs in read_dict:
for reads in read_dict[contigs][1]:
mapped_read_counter+=1
out.write(reads)
return mapped_read_counter
def soft_clip_filter(sam_file, out, length, ref_len_dict, max_ref_pos, min_ref_pos ):
"""
This function iterates through the sam file and returns reads that are soft clipped on either end of
the alignment. It also allows for one end to be soft-clipped up to 5 bp, but not both ends. This is
to allow for the case where not all of an adapter was removed. If a global alignment is forced in
this situation, then it introduces artificial noise into the alignment resulting in poorly called
bases.
"""
mapped_read_counter=0
total_read_counter=0
for read in sam_file:
total_read_counter+=1
out_read =cigar_read_filter(read, length, ref_len_dict, max_ref_pos, min_ref_pos )
if out_read is not None != 0 :
logging.debug( "read passed")
# logging.debug(read.cigartuples, read.reference_start, read.reference_end)
out.write(out_read)
mapped_read_counter+=1
mapped_read_counter = out_from_read_dict(out, min_ref_pos, mapped_read_counter)
mapped_read_counter = out_from_read_dict(out, max_ref_pos,mapped_read_counter)
messages("mapped_reads",[total_read_counter,mapped_read_counter])
if __name__ == "__main__" :
argv=main_argv_parse(sys.argv)
logging.debug("argv :")
logging.debug(argv)
sam_file , ref_len_dict, max_ref_pos, min_ref_pos = alignment_file(argv["sam_file"])
logging.debug("argv[\"length\"]")
logging.debug(argv["length"])
out_bam=pysam.AlignmentFile(str(argv["output_prefix"])+".bam", "wb", header=sam_file.header)
soft_clip_filter( sam_file, out_bam ,argv["length"], ref_len_dict, max_ref_pos, min_ref_pos)
out_bam.close()
sam_file.close()
pysam.sort("-o", "sorted_"+str(argv["output_prefix"])+".bam" , str(argv["output_prefix"])+".bam" )
| 34.091912
| 161
| 0.601531
|
4514a92b48e5ec6554b39a1805fb177e41645d73
| 1,159
|
py
|
Python
|
examples/temperature-offset.py
|
hboshnak/bme680-python
|
8fb321b1ea52a0b2b9c191faedf198e090900f4e
|
[
"MIT"
] | 169
|
2018-08-16T12:25:54.000Z
|
2022-03-08T13:11:58.000Z
|
examples/temperature-offset.py
|
hboshnak/bme680-python
|
8fb321b1ea52a0b2b9c191faedf198e090900f4e
|
[
"MIT"
] | 34
|
2018-10-23T16:35:58.000Z
|
2022-02-11T11:46:31.000Z
|
examples/temperature-offset.py
|
hboshnak/bme680-python
|
8fb321b1ea52a0b2b9c191faedf198e090900f4e
|
[
"MIT"
] | 71
|
2018-08-30T13:38:59.000Z
|
2022-01-23T20:22:10.000Z
|
#!/usr/bin/env python
import bme680
print("""temperature-offset.py - Displays temperature, pressure, and humidity with different offsets.
Press Ctrl+C to exit!
""")
try:
sensor = bme680.BME680(bme680.I2C_ADDR_PRIMARY)
except (RuntimeError, IOError):
sensor = bme680.BME680(bme680.I2C_ADDR_SECONDARY)
# These oversampling settings can be tweaked to
# change the balance between accuracy and noise in
# the data.
sensor.set_humidity_oversample(bme680.OS_2X)
sensor.set_pressure_oversample(bme680.OS_4X)
sensor.set_temperature_oversample(bme680.OS_8X)
sensor.set_filter(bme680.FILTER_SIZE_3)
def display_data(offset=0):
sensor.set_temp_offset(offset)
sensor.get_sensor_data()
output = '{0:.2f} C, {1:.2f} hPa, {2:.3f} %RH'.format(
sensor.data.temperature,
sensor.data.pressure,
sensor.data.humidity)
print(output)
print('')
print('Initial readings')
display_data()
print('SET offset 4 degrees celsius')
display_data(4)
print('SET offset -1.87 degrees celsius')
display_data(-1.87)
print('SET offset -100 degrees celsius')
display_data(-100)
print('SET offset 0 degrees celsius')
display_data(0)
| 22.72549
| 101
| 0.740293
|
9068f748d4d8ec6358a80a1ad0589f08edadbec0
| 13,555
|
py
|
Python
|
advanced_filters/forms.py
|
9999years/django-advanced-filters
|
4f1575a67e751330606a9a3ee4f33eda5214b44e
|
[
"MIT"
] | null | null | null |
advanced_filters/forms.py
|
9999years/django-advanced-filters
|
4f1575a67e751330606a9a3ee4f33eda5214b44e
|
[
"MIT"
] | null | null | null |
advanced_filters/forms.py
|
9999years/django-advanced-filters
|
4f1575a67e751330606a9a3ee4f33eda5214b44e
|
[
"MIT"
] | null | null | null |
from datetime import datetime as dt
from pprint import pformat
import logging
import operator
from django import forms
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.utils import get_fields_from_path
from django.db.models import Q, FieldDoesNotExist
from django.db.models.fields import DateField
from django.forms.formsets import formset_factory, BaseFormSet
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.utils.six.moves import range, reduce
from django.utils.text import capfirst
import django
from .models import AdvancedFilter
from .form_helpers import CleanWhiteSpacesMixin, VaryingTypeCharField
# django < 1.9 support
USE_VENDOR_DIR = django.VERSION >= (1, 9)
logger = logging.getLogger('advanced_filters.forms')
# select2 location can be modified via settings
SELECT2_JS = getattr(settings, 'SELECT2_JS', 'select2/select2.min.js')
SELECT2_CSS = getattr(settings, 'SELECT2_CSS', 'select2/select2.min.css')
def date_to_string(timestamp):
if timestamp:
return dt.fromtimestamp(timestamp).strftime('%Y-%m-%d')
else:
return ""
class AdvancedFilterQueryForm(CleanWhiteSpacesMixin, forms.Form):
""" Build the query from field, operator and value """
OPERATORS = (
("iexact", _("Equals")),
("icontains", _("Contains")),
("iregex", _("One of")),
("range", _("DateTime Range")),
("isnull", _("Is NULL")),
("istrue", _("Is TRUE")),
("isfalse", _("Is FALSE")),
("lt", _("Less Than")),
("gt", _("Greater Than")),
("lte", _("Less Than or Equal To")),
("gte", _("Greater Than or Equal To")),
)
FIELD_CHOICES = (
("_OR", _("Or (mark an or between blocks)")),
)
field = forms.ChoiceField(required=True, widget=forms.Select(
attrs={'class': 'query-field'}), label=_('Field'))
operator = forms.ChoiceField(
label=_('Operator'),
required=True, choices=OPERATORS, initial="iexact",
widget=forms.Select(attrs={'class': 'query-operator'}))
value = VaryingTypeCharField(required=True, widget=forms.TextInput(
attrs={'class': 'query-value'}), label=_('Value'))
value_from = forms.DateTimeField(widget=forms.HiddenInput(
attrs={'class': 'query-dt-from'}), required=False)
value_to = forms.DateTimeField(widget=forms.HiddenInput(
attrs={'class': 'query-dt-to'}), required=False)
negate = forms.BooleanField(initial=False, required=False, label=_('Negate'))
def _build_field_choices(self, fields):
"""
Iterate over passed model fields tuple and update initial choices.
"""
return tuple(sorted(
[(fquery, capfirst(fname)) for fquery, fname in fields.items()],
key=lambda f: f[1].lower())
) + self.FIELD_CHOICES
def _build_query_dict(self, formdata=None):
"""
Take submitted data from form and create a query dict to be
used in a Q object (or filter)
"""
if self.is_valid() and formdata is None:
formdata = self.cleaned_data
key = "{field}__{operator}".format(**formdata)
if formdata['operator'] == "isnull":
return {key: None}
elif formdata['operator'] == "istrue":
return {formdata['field']: True}
elif formdata['operator'] == "isfalse":
return {formdata['field']: False}
return {key: formdata['value']}
@staticmethod
def _parse_query_dict(query_data, model):
"""
Take a list of query field dict and return data for form initialization
"""
operator = 'iexact'
if query_data['field'] == '_OR':
query_data['operator'] = operator
return query_data
parts = query_data['field'].split('__')
if len(parts) < 2:
field = parts[0]
else:
if parts[-1] in dict(AdvancedFilterQueryForm.OPERATORS).keys():
field = '__'.join(parts[:-1])
operator = parts[-1]
else:
field = query_data['field']
query_data['field'] = field
mfield = get_fields_from_path(model, query_data['field'])
if not mfield:
raise Exception('Field path "%s" could not be followed to a field'
' in model %s', query_data['field'], model)
else:
mfield = mfield[-1] # get the field object
if query_data['value'] is None:
query_data['operator'] = "isnull"
elif query_data['value'] is True:
query_data['operator'] = "istrue"
elif query_data['value'] is False:
query_data['operator'] = "isfalse"
else:
if isinstance(mfield, DateField):
# this is a date/datetime field
query_data['operator'] = "range" # default
else:
query_data['operator'] = operator # default
if isinstance(query_data.get('value'),
list) and query_data['operator'] == 'range':
date_from = date_to_string(query_data.get('value_from'))
date_to = date_to_string(query_data.get('value_to'))
query_data['value'] = ','.join([date_from, date_to])
return query_data
def set_range_value(self, data):
"""
Validates date range by parsing into 2 datetime objects and
validating them both.
"""
dtfrom = data.pop('value_from')
dtto = data.pop('value_to')
if dtfrom is dtto is None:
self.errors['value'] = ['Date range requires values']
raise forms.ValidationError([])
data['value'] = (dtfrom, dtto)
def clean(self):
cleaned_data = super(AdvancedFilterQueryForm, self).clean()
if cleaned_data.get('operator') == "range":
if ('value_from' in cleaned_data and
'value_to' in cleaned_data):
self.set_range_value(cleaned_data)
return cleaned_data
def make_query(self, *args, **kwargs):
""" Returns a Q object from the submitted form """
query = Q() # initial is an empty query
query_dict = self._build_query_dict(self.cleaned_data)
if 'negate' in self.cleaned_data and self.cleaned_data['negate']:
query = query & ~Q(**query_dict)
else:
query = query & Q(**query_dict)
return query
def __init__(self, model_fields={}, *args, **kwargs):
super(AdvancedFilterQueryForm, self).__init__(*args, **kwargs)
self.FIELD_CHOICES = self._build_field_choices(model_fields)
self.fields['field'].choices = self.FIELD_CHOICES
if not self.fields['field'].initial:
self.fields['field'].initial = self.FIELD_CHOICES[0]
class AdvancedFilterFormSet(BaseFormSet):
""" """
fields = ()
extra_kwargs = {}
def __init__(self, *args, **kwargs):
self.model_fields = kwargs.pop('model_fields', {})
super(AdvancedFilterFormSet, self).__init__(*args, **kwargs)
if self.forms:
form = self.forms[0]
self.fields = form.visible_fields()
def get_form_kwargs(self, index):
kwargs = super(AdvancedFilterFormSet, self).get_form_kwargs(index)
kwargs['model_fields'] = self.model_fields
return kwargs
@cached_property
def forms(self):
# override the original property to include `model_fields` argument
forms = [self._construct_form(i, model_fields=self.model_fields)
for i in range(self.total_form_count())]
forms.append(self.empty_form) # add initial empty form
return forms
AFQFormSet = formset_factory(
AdvancedFilterQueryForm, formset=AdvancedFilterFormSet,
extra=1, can_delete=True)
AFQFormSetNoExtra = formset_factory(
AdvancedFilterQueryForm, formset=AdvancedFilterFormSet,
extra=0, can_delete=True)
class AdvancedFilterForm(CleanWhiteSpacesMixin, forms.ModelForm):
""" Form to save/edit advanced filter forms """
class Meta:
model = AdvancedFilter
fields = ('title',)
class Media:
required_js = [
'admin/js/%sjquery.min.js' % ('vendor/jquery/' if USE_VENDOR_DIR else ''),
'advanced-filters/jquery_adder.js',
'orig_inlines%s.js' % ('' if settings.DEBUG else '.min'),
'magnific-popup/jquery.magnific-popup.js',
'advanced-filters/advanced-filters.js',
]
js = required_js + [SELECT2_JS]
css = {'screen': [
SELECT2_CSS,
'advanced-filters/advanced-filters.css',
'magnific-popup/magnific-popup.css'
]}
def get_fields_from_model(self, model, fields):
"""
Iterate over given <field> names (in "orm query" notation) and find
the actual field given the initial <model>.
If <field> is a tuple of the format ('field_name', 'Verbose name'),
overwrite the field's verbose name with the given name for display
purposes.
"""
model_fields = {}
for field in fields:
if isinstance(field, tuple) and len(field) == 2:
field, verbose_name = field[0], field[1]
else:
try:
model_field = get_fields_from_path(model, field)[-1]
verbose_name = model_field.verbose_name
except (FieldDoesNotExist, IndexError, TypeError) as e:
logger.warn("AdvancedFilterForm: skip invalid field "
"- %s", e)
continue
model_fields[field] = verbose_name
return model_fields
def __init__(self, *args, **kwargs):
model_admin = kwargs.pop('model_admin', None)
instance = kwargs.get('instance')
extra_form = kwargs.pop('extra_form', False)
# TODO: allow all fields to be determined by model
filter_fields = kwargs.pop('filter_fields', None)
if model_admin:
self._model = model_admin.model
elif instance and instance.model:
# get existing instance model
self._model = apps.get_model(*instance.model.split('.'))
try:
model_admin = admin.site._registry[self._model]
except KeyError:
logger.debug('No ModelAdmin registered for %s', self._model)
else:
raise Exception('Adding new AdvancedFilter from admin is '
'not supported')
self._filter_fields = filter_fields or getattr(
model_admin, 'advanced_filter_fields', ())
super(AdvancedFilterForm, self).__init__(*args, **kwargs)
# populate existing or empty forms formset
data = None
if len(args):
data = args[0]
elif kwargs.get('data'):
data = kwargs.get('data')
self.initialize_form(instance, self._model, data, extra_form)
def clean(self):
cleaned_data = super(AdvancedFilterForm, self).clean()
if not self.fields_formset.is_valid():
logger.debug(
"Errors validating advanced query filters: %s",
pformat([(f.errors, f.non_field_errors())
for f in self.fields_formset.forms]))
raise forms.ValidationError("Error validating filter forms")
cleaned_data['model'] = "%s.%s" % (self._model._meta.app_label,
self._model._meta.object_name)
return cleaned_data
@property
def _non_deleted_forms(self):
forms = []
for form in self.fields_formset.forms:
if form in self.fields_formset.deleted_forms:
continue # skip deleted forms when generating query
forms.append(form)
return forms
def generate_query(self):
""" Reduces multiple queries into a single usable query """
query = Q()
ORed = []
for form in self._non_deleted_forms:
if not hasattr(form, 'cleaned_data'):
continue
if form.cleaned_data['field'] == "_OR":
ORed.append(query)
query = Q()
else:
query = query & form.make_query()
if ORed:
if query: # add last query for OR if any
ORed.append(query)
query = reduce(operator.or_, ORed)
return query
def initialize_form(self, instance, model, data=None, extra=None):
""" Takes a "finalized" query and generate it's form data """
model_fields = self.get_fields_from_model(model, self._filter_fields)
forms = []
if instance:
for field_data in instance.list_fields():
forms.append(
AdvancedFilterQueryForm._parse_query_dict(
field_data, model))
formset = AFQFormSetNoExtra if not extra else AFQFormSet
self.fields_formset = formset(
data=data,
initial=forms or None,
model_fields=model_fields
)
def save(self, commit=True):
self.instance.query = self.generate_query()
self.instance.model = self.cleaned_data.get('model')
return super(AdvancedFilterForm, self).save(commit)
| 37.444751
| 86
| 0.599041
|
dac1b1e23b606a03e325d810f9e5ce8f78754597
| 377
|
py
|
Python
|
main/stocks/migrations/0004_auto_20170623_2116.py
|
Hawk94/coin_tracker
|
082909e17308a8dd460225c1b035751d12a27106
|
[
"MIT"
] | null | null | null |
main/stocks/migrations/0004_auto_20170623_2116.py
|
Hawk94/coin_tracker
|
082909e17308a8dd460225c1b035751d12a27106
|
[
"MIT"
] | null | null | null |
main/stocks/migrations/0004_auto_20170623_2116.py
|
Hawk94/coin_tracker
|
082909e17308a8dd460225c1b035751d12a27106
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-23 21:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stocks', '0003_sandp'),
]
operations = [
migrations.RenameModel(
old_name='SandP',
new_name='SPX',
),
]
| 18.85
| 48
| 0.596817
|
8d11fb7a697800c04561a6a944444d1aa8801930
| 5,730
|
py
|
Python
|
src/abaqus/Optimization/ShapeDemoldControl.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/Optimization/ShapeDemoldControl.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/Optimization/ShapeDemoldControl.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
from abaqusConstants import *
from .GeometricRestriction import GeometricRestriction
from ..Region.Region import Region
class ShapeDemoldControl(GeometricRestriction):
"""The ShapeDemoldControl object defines a shape demold control geometric restriction.
The ShapeDemoldControl object is derived from the GeometricRestriction object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import optimization
mdb.models[name].optimizationTasks[name].geometricRestrictions[name]
"""
def __init__(self, name: str, pullDirection: tuple, region: Region,
collisionCheckRegion: SymbolicConstant = DEMOLD_REGION, csys: int = None,
drawAngle: float = 0, mainPointDetermination: SymbolicConstant = MAXIMUM,
presumeFeasibleRegionAtStart: Boolean = ON, tolerance1: float = 0,
tolerance2: float = 0, tolerance3: float = 0, undercutTolerance: float = 0):
"""This method creates a ShapeDemoldControl object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].optimizationTasks[name].ShapeDemoldControl
Parameters
----------
name
A String specifying the geometric restriction repository key.
pullDirection
A VertexArray object of length 2 specifying the demold pull direction. Instead of
through a ConstrainedSketchVertex, each point might be specified through a tuple of coordinates.
region
A Region object specifying the region to which the geometric restriction is applied.
When used with a TopologyTask, there is no default value. When used with a ShapeTask,
the default value is MODEL.
collisionCheckRegion
The SymbolicConstant DEMOLD_REGION or a Region object specifying the collision check
region. If the value is DEMOLD_REGION, then the value of *region* is used as both the
demold region and the collision check region. The default value is DEMOLD_REGION.
csys
None or a DatumCsys object specifying the local coordinate system of the
*pullDirection*. If *csys*=None, the global coordinate system is used. When this member
is queried, it returns an Int indicating the identifier of the DatumCsys. The default
value is None.
drawAngle
A Float specifying the draw angle. The default value is 0.0.
mainPointDetermination
A SymbolicConstant specifying the rule for assigning point priority. Possible values are
MAXIMUM and MINIMUM. The default value is MAXIMUM.
presumeFeasibleRegionAtStart
A Boolean specifying whether to ignore the geometric restriction in the first design
cycle. The default value is ON.
tolerance1
A Float specifying the geometric tolerance in the 1-direction. The default value is
0.01.
tolerance2
A Float specifying the geometric tolerance in the 2-direction. The default value is
0.01.
tolerance3
A Float specifying the geometric tolerance in the 3-direction. The default value is
0.01.
undercutTolerance
A Float specifying the undercut tolerance. The default value is 0.0.
Returns
-------
A ShapeDemoldControl object.
"""
super().__init__()
pass
def setValues(self, collisionCheckRegion: SymbolicConstant = DEMOLD_REGION, csys: int = None,
drawAngle: float = 0, mainPointDetermination: SymbolicConstant = MAXIMUM,
presumeFeasibleRegionAtStart: Boolean = ON, tolerance1: float = 0,
tolerance2: float = 0, tolerance3: float = 0, undercutTolerance: float = 0):
"""This method modifies the ShapeDemoldControl object.
Parameters
----------
collisionCheckRegion
The SymbolicConstant DEMOLD_REGION or a Region object specifying the collision check
region. If the value is DEMOLD_REGION, then the value of *region* is used as both the
demold region and the collision check region. The default value is DEMOLD_REGION.
csys
None or a DatumCsys object specifying the local coordinate system of the
*pullDirection*. If *csys*=None, the global coordinate system is used. When this member
is queried, it returns an Int indicating the identifier of the DatumCsys. The default
value is None.
drawAngle
A Float specifying the draw angle. The default value is 0.0.
mainPointDetermination
A SymbolicConstant specifying the rule for assigning point priority. Possible values are
MAXIMUM and MINIMUM. The default value is MAXIMUM.
presumeFeasibleRegionAtStart
A Boolean specifying whether to ignore the geometric restriction in the first design
cycle. The default value is ON.
tolerance1
A Float specifying the geometric tolerance in the 1-direction. The default value is
0.01.
tolerance2
A Float specifying the geometric tolerance in the 2-direction. The default value is
0.01.
tolerance3
A Float specifying the geometric tolerance in the 3-direction. The default value is
0.01.
undercutTolerance
A Float specifying the undercut tolerance. The default value is 0.0.
"""
pass
| 47.355372
| 108
| 0.652356
|
4b97e4369cb6a477782dae8a0f2ee694f149d375
| 5,663
|
py
|
Python
|
python/mxnet/gluon/data/dataset.py
|
rah9eu/p3
|
530628be7b7a8dd3e6199c3bebebdbf104005e5f
|
[
"Apache-2.0"
] | 22
|
2019-02-20T12:42:20.000Z
|
2021-12-25T06:09:46.000Z
|
python/mxnet/gluon/data/dataset.py
|
rah9eu/p3
|
530628be7b7a8dd3e6199c3bebebdbf104005e5f
|
[
"Apache-2.0"
] | 4
|
2019-04-01T07:36:04.000Z
|
2022-03-24T03:11:26.000Z
|
python/mxnet/gluon/data/dataset.py
|
rah9eu/p3
|
530628be7b7a8dd3e6199c3bebebdbf104005e5f
|
[
"Apache-2.0"
] | 7
|
2019-03-20T16:04:37.000Z
|
2021-04-28T18:40:11.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Dataset container."""
__all__ = ['Dataset', 'SimpleDataset', 'ArrayDataset',
'RecordFileDataset']
import os
from ... import recordio, ndarray
class Dataset(object):
"""Abstract dataset class. All datasets should have this interface.
Subclasses need to override `__getitem__`, which returns the i-th
element, and `__len__`, which returns the total number elements.
.. note:: An mxnet or numpy array can be directly used as a dataset.
"""
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def transform(self, fn, lazy=True):
"""Returns a new dataset with each sample transformed by the
transformer function `fn`.
Parameters
----------
fn : callable
A transformer function that takes a sample as input and
returns the transformed sample.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
"""
trans = _LazyTransformDataset(self, fn)
if lazy:
return trans
return SimpleDataset([i for i in trans])
def transform_first(self, fn, lazy=True):
"""Returns a new dataset with the first element of each sample
transformed by the transformer function `fn`.
This is useful, for example, when you only want to transform data
while keeping label as is.
Parameters
----------
fn : callable
A transformer function that takes the first elemtn of a sample
as input and returns the transformed element.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
"""
def base_fn(x, *args):
if args:
return (fn(x),) + args
return fn(x)
return self.transform(base_fn, lazy)
class SimpleDataset(Dataset):
"""Simple Dataset wrapper for lists and arrays.
Parameters
----------
data : dataset-like object
Any object that implements `len()` and `[]`.
"""
def __init__(self, data):
self._data = data
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
return self._data[idx]
class _LazyTransformDataset(Dataset):
"""Lazily transformed dataset."""
def __init__(self, data, fn):
self._data = data
self._fn = fn
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
item = self._data[idx]
if isinstance(item, tuple):
return self._fn(*item)
return self._fn(item)
class ArrayDataset(Dataset):
"""A dataset that combines multiple dataset-like objects, e.g.
Datasets, lists, arrays, etc.
The i-th sample is defined as `(x1[i], x2[i], ...)`.
Parameters
----------
*args : one or more dataset-like objects
The data arrays.
"""
def __init__(self, *args):
assert len(args) > 0, "Needs at least 1 arrays"
self._length = len(args[0])
self._data = []
for i, data in enumerate(args):
assert len(data) == self._length, \
"All arrays must have the same length; array[0] has length %d " \
"while array[%d] has %d." % (self._length, i+1, len(data))
if isinstance(data, ndarray.NDArray) and len(data.shape) == 1:
data = data.asnumpy()
self._data.append(data)
def __getitem__(self, idx):
if len(self._data) == 1:
return self._data[0][idx]
else:
return tuple(data[idx] for data in self._data)
def __len__(self):
return self._length
class RecordFileDataset(Dataset):
"""A dataset wrapping over a RecordIO (.rec) file.
Each sample is a string representing the raw content of an record.
Parameters
----------
filename : str
Path to rec file.
"""
def __init__(self, filename):
idx_file = os.path.splitext(filename)[0] + '.idx'
self._record = recordio.MXIndexedRecordIO(idx_file, filename, 'r')
def __getitem__(self, idx):
return self._record.read_idx(self._record.keys[idx])
def __len__(self):
return len(self._record.keys)
| 30.777174
| 81
| 0.61787
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.